-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathdoc.go
478 lines (364 loc) · 15.4 KB
/
doc.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
// Copyright 2020 FishGoddess. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package cachego provides an easy way to use foundation for your caching operations.
1. basic:
// Use NewCache function to create a cache.
// By default, it creates a standard cache which evicts entries randomly.
// Use WithShardings to shard cache to several parts for higher performance.
// Use WithGC to clean expired entries every 10 minutes.
cache := cachego.NewCache(cachego.WithGC(10*time.Minute), cachego.WithShardings(64))
// Set an entry to cache with ttl.
cache.Set("key", 123, time.Second)
// Get an entry from cache.
value, ok := cache.Get("key")
fmt.Println(value, ok) // 123 true
// Check how many entries stores in cache.
size := cache.Size()
fmt.Println(size) // 1
// Clean expired entries.
cleans := cache.GC()
fmt.Println(cleans) // 1
// Set an entry which doesn't have ttl.
cache.Set("key", 123, cachego.NoTTL)
// Remove an entry.
removedValue := cache.Remove("key")
fmt.Println(removedValue) // 123
// Reset resets cache to initial status.
cache.Reset()
// Get value from cache and load it to cache if not found.
value, ok = cache.Get("key")
if !ok {
// Loaded entry will be set to cache and returned.
// By default, it will use singleflight.
value, _ = cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, nil
})
}
fmt.Println(value) // 666
// You can use WithLRU to specify the type of cache to lru.
// Also, try WithLFU if you want to use lfu to evict data.
cache = cachego.NewCache(cachego.WithLRU(100))
cache = cachego.NewCache(cachego.WithLFU(100))
// Use NewCacheWithReport to create a cache with report.
cache, reporter := cachego.NewCacheWithReport(cachego.WithCacheName("test"))
fmt.Println(reporter.CacheName())
fmt.Println(reporter.CacheType())
2. ttl:
cache := cachego.NewCache()
// We think most of the entries in cache should have its ttl.
// So set an entry to cache should specify a ttl.
cache.Set("key", 666, time.Second)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666 true
time.Sleep(2 * time.Second)
// The entry is expired after ttl.
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil> false
// Notice that the entry still stores in cache even if it's expired.
// This is because we think you will reset entry to cache after cache missing in most situations.
// So we can reuse this entry and just reset its value and ttl.
size := cache.Size()
fmt.Println(size) // 1
// What should I do if I want an expired entry never storing in cache? Try GC:
cleans := cache.GC()
fmt.Println(cleans) // 1
size = cache.Size()
fmt.Println(size) // 0
// However, not all entries have ttl, and you can specify a NoTTL constant to do so.
// In fact, the entry won't expire as long as its ttl is <= 0.
// So you may have known NoTTL is a "readable" value of "<= 0".
cache.Set("key", 666, cachego.NoTTL)
3. lru:
// By default, NewCache() returns a standard cache which evicts entries randomly.
cache := cachego.NewCache(cachego.WithMaxEntries(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Since we set 20 entries to cache, the size won't be 20 because we limit the max entries to 10.
size := cache.Size()
fmt.Println(size) // 10
// We don't know which entries will be evicted and stayed.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
fmt.Println()
// Sometimes we want it evicts entries by lru, try WithLRU.
// You need to specify the max entries storing in lru cache.
// More details see https://en.wikipedia.org/wiki/Cache_replacement_policies#Least_recently_used_(LRU).
cache = cachego.NewCache(cachego.WithLRU(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Only the least recently used entries can be got in a lru cache.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
// By default, lru will share one lock to do all operations.
// You can sharding cache to several parts for higher performance.
// Notice that max entries only effect to one part in sharding mode.
// For example, the total max entries will be 2*10 if shardings is 2 and max entries is 10 in WithLRU or WithMaxEntries.
// In some cache libraries, they will calculate max entries in each parts of shardings, like 10/2.
// However, the result divided by max entries and shardings may be not an integer which will make the total max entries incorrect.
// So we let users decide the exact max entries in each parts of shardings.
cache = cachego.NewCache(cachego.WithShardings(2), cachego.WithLRU(10))
4. lfu:
// By default, NewCache() returns a standard cache which evicts entries randomly.
cache := cachego.NewCache(cachego.WithMaxEntries(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
// Since we set 20 entries to cache, the size won't be 20 because we limit the max entries to 10.
size := cache.Size()
fmt.Println(size) // 10
// We don't know which entries will be evicted and stayed.
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
fmt.Println()
// Sometimes we want it evicts entries by lfu, try WithLFU.
// You need to specify the max entries storing in lfu cache.
// More details see https://en.wikipedia.org/wiki/Cache_replacement_policies#Least-frequently_used_(LFU).
cache = cachego.NewCache(cachego.WithLFU(10))
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
// Let entries have some frequently used operations.
for j := 0; j < i; j++ {
cache.Set(key, i, cachego.NoTTL)
}
}
for i := 0; i < 20; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(key, value, ok)
}
// By default, lfu will share one lock to do all operations.
// You can sharding cache to several parts for higher performance.
// Notice that max entries only effect to one part in sharding mode.
// For example, the total max entries will be 2*10 if shardings is 2 and max entries is 10 in WithLFU or WithMaxEntries.
// In some cache libraries, they will calculate max entries in each parts of shardings, like 10/2.
// However, the result divided by max entries and shardings may be not an integer which will make the total max entries incorrect.
// So we let users decide the exact max entries in each parts of shardings.
cache = cachego.NewCache(cachego.WithShardings(2), cachego.WithLFU(10))
5. sharding:
// All operations in cache share one lock for concurrency.
// Use read lock or write lock is depends on cache implements.
// Get will use read lock in standard cache, but lru and lfu don't.
// This may be a serious performance problem in high qps.
cache := cachego.NewCache()
// We provide a sharding cache wrapper to shard one cache to several parts with hash.
// Every parts store its entries and all operations of one entry work on one part.
// This means there are more than one lock when you operate entries.
// The performance will be better in high qps.
cache = cachego.NewCache(cachego.WithShardings(64))
cache.Set("key", 666, cachego.NoTTL)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666 true
// Notice that max entries will be the sum of shards.
// For example, we set WithShardings(4) and WithMaxEntries(100), and the max entries in whole cache will be 4 * 100.
cache = cachego.NewCache(cachego.WithShardings(4), cachego.WithMaxEntries(100))
for i := 0; i < 1000; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, cachego.NoTTL)
}
size := cache.Size()
fmt.Println(size) // 400
6. gc:
cache := cachego.NewCache()
cache.Set("key", 666, time.Second)
time.Sleep(2 * time.Second)
// The entry is expired after ttl.
value, ok := cache.Get("key")
fmt.Println(value, ok) // <nil> false
// As you know the entry still stores in cache even if it's expired.
// This is because we think you will reset entry to cache after cache missing in most situations.
// So we can reuse this entry and just reset its value and ttl.
size := cache.Size()
fmt.Println(size) // 1
// What should I do if I want an expired entry never storing in cache? Try GC:
cleans := cache.GC()
fmt.Println(cleans) // 1
// Is there a smart way to do that? Try WithGC:
// For testing, we set a small duration of gc.
// You should set at least 3 minutes in production for performance.
cache = cachego.NewCache(cachego.WithGC(2 * time.Second))
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 0
// Or you want a cancalable gc task? Try RunGCTask:
cache = cachego.NewCache()
cancel := cachego.RunGCTask(cache, 2*time.Second)
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 0
cancel()
cache.Set("key", 666, time.Second)
size = cache.Size()
fmt.Println(size) // 1
time.Sleep(3 * time.Second)
size = cache.Size()
fmt.Println(size) // 1
// By default, gc only scans at most maxScans entries one time to remove expired entries.
// This is because scans all entries may cost much time if there is so many entries in cache, and a "stw" will happen.
// This can be a serious problem in some situations.
// Use WithMaxScans to set this value, remember, a value <= 0 means no scan limit.
cache = cachego.NewCache(cachego.WithGC(10*time.Minute), cachego.WithMaxScans(0))
7. load:
// By default, singleflight is enabled in cache.
// Use WithDisableSingleflight to disable if you want.
cache := cachego.NewCache(cachego.WithDisableSingleflight())
// We recommend you to use singleflight.
cache = cachego.NewCache()
value, ok := cache.Get("key")
fmt.Println(value, ok) // <nil> false
if !ok {
// Load loads a value of key to cache with ttl.
// Use cachego.NoTTL if you want this value is no ttl.
// After loading value to cache, it returns the loaded value and error if failed.
value, _ = cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, nil
})
}
fmt.Println(value) // 666
value, ok = cache.Get("key")
fmt.Println(value, ok) // 666, true
time.Sleep(2 * time.Second)
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil>, false
8. report:
func reportMissed(reporter *cachego.Reporter, key string) {
fmt.Printf("report: missed key %s, missed rate %.3f\n", key, reporter.MissedRate())
}
func reportHit(reporter *cachego.Reporter, key string, value interface{}) {
fmt.Printf("report: hit key %s value %+v, hit rate %.3f\n", key, value, reporter.HitRate())
}
func reportGC(reporter *cachego.Reporter, cost time.Duration, cleans int) {
fmt.Printf("report: gc cost %s cleans %d, gc count %d, cache size %d\n", cost, cleans, reporter.CountGC(), reporter.CacheSize())
}
func reportLoad(reporter *cachego.Reporter, key string, value interface{}, ttl time.Duration, err error) {
fmt.Printf("report: load key %s value %+v ttl %s, err %+v, load count %d\n", key, value, ttl, err, reporter.CountLoad())
}
// We provide some ways to report the status of cache.
// Use NewCacheWithReport to create a cache with reporting features.
cache, reporter := cachego.NewCacheWithReport(
// Sometimes you may have several caches in one service.
// You can set each name by WithCacheName and get the name from reporter.
cachego.WithCacheName("test"),
// For testing...
cachego.WithMaxEntries(3),
cachego.WithGC(100*time.Millisecond),
// ReportMissed reports the missed key getting from cache.
// ReportHit reports the hit entry getting from cache.
// ReportGC reports the status of cache gc.
// ReportLoad reports the result of loading.
cachego.WithReportMissed(reportMissed),
cachego.WithReportHit(reportHit),
cachego.WithReportGC(reportGC),
cachego.WithReportLoad(reportLoad),
)
for i := 0; i < 5; i++ {
key := strconv.Itoa(i)
evictedValue := cache.Set(key, key, 10*time.Millisecond)
fmt.Println(evictedValue)
}
for i := 0; i < 5; i++ {
key := strconv.Itoa(i)
value, ok := cache.Get(key)
fmt.Println(value, ok)
}
time.Sleep(200 * time.Millisecond)
value, err := cache.Load("key", time.Second, func() (value interface{}, err error) {
return 666, io.EOF
})
fmt.Println(value, err)
// These are some useful methods of reporter.
fmt.Println("CacheName:", reporter.CacheName())
fmt.Println("CacheType:", reporter.CacheType())
fmt.Println("CountMissed:", reporter.CountMissed())
fmt.Println("CountHit:", reporter.CountHit())
fmt.Println("CountGC:", reporter.CountGC())
fmt.Println("CountLoad:", reporter.CountLoad())
fmt.Println("CacheSize:", reporter.CacheSize())
fmt.Println("MissedRate:", reporter.MissedRate())
fmt.Println("HitRate:", reporter.HitRate())
9. task:
var (
contextKey = struct{}{}
)
func beforePrint(ctx context.Context) {
fmt.Println("before:", ctx.Value(contextKey))
}
func afterPrint(ctx context.Context) {
fmt.Println("after:", ctx.Value(contextKey))
}
func printContextValue(ctx context.Context) {
fmt.Println("context value:", ctx.Value(contextKey))
}
// Create a context to stop the task.
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
// Wrap context with key and value
ctx = context.WithValue(ctx, contextKey, "hello")
// Use New to create a task and run it.
// You can use it to load some hot data to cache at fixed duration.
// Before is called before the task loop, optional.
// After is called after the task loop, optional.
// Context is passed to fn include fn/before/after which can stop the task by Done(), optional.
// Duration is the duration between two loop of fn, optional.
// Run will start a new goroutine and run the task loop.
// The task will stop if context is done.
task.New(printContextValue).Before(beforePrint).After(afterPrint).Context(ctx).Duration(time.Second).Run()
10. clock:
// Create a fast clock and get current time in nanosecond by Now.
c := clock.New()
c.Now()
// Fast clock may return an "incorrect" time compared with time.Now.
// The gap will be smaller than about 100 ms.
for i := 0; i < 10; i++ {
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
timeNow := time.Now().UnixNano()
clockNow := c.Now()
fmt.Println(timeNow)
fmt.Println(clockNow)
fmt.Println("gap:", time.Duration(timeNow-clockNow))
fmt.Println()
}
// You can specify the fast clock to cache by WithNow.
// All getting current time operations in this cache will use fast clock.
cache := cachego.NewCache(cachego.WithNow(clock.New().Now))
cache.Set("key", 666, 100*time.Millisecond)
value, ok := cache.Get("key")
fmt.Println(value, ok) // 666, true
time.Sleep(200 * time.Millisecond)
value, ok = cache.Get("key")
fmt.Println(value, ok) // <nil>, false
*/
package cachego // import "github.com/FishGoddess/cachego"
// Version is the version string representation of cachego.
const Version = "v0.6.1"