Skip to content

Commit

Permalink
improvement: Use LRU instead of maps on defaultStore
Browse files Browse the repository at this point in the history
Changing actual cache implementation to use an LRU cache improves
resources usage and avoid exhaustion.

Signed-off-by: Antonio Navarro Perez <antnavper@gmail.com>
  • Loading branch information
ajnavarro committed Apr 26, 2023
1 parent 71f0bd5 commit d56846a
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 35 deletions.
97 changes: 62 additions & 35 deletions gnovm/pkg/gnolang/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,17 @@ import (
"strconv"
"strings"

lru "github.com/hashicorp/golang-lru/v2"

"github.com/gnolang/gno/tm2/pkg/amino"
"github.com/gnolang/gno/tm2/pkg/std"
"github.com/gnolang/gno/tm2/pkg/store"
)

const iavlCacheSize = 1024 * 1024 // TODO increase and parameterize.
const objectCacheSize = 100 // TODO parameterize.
const typeCacheSize = 1000 // TODO parameterize.
const nodeCacheSize = 10000 // TODO parameterize.

// return nil if package doesn't exist.
type PackageGetter func(pkgPath string) (*PackageNode, *PackageValue)
Expand Down Expand Up @@ -65,10 +70,10 @@ type Store interface {
type defaultStore struct {
alloc *Allocator // for accounting for cached items
pkgGetter PackageGetter // non-realm packages
cacheObjects map[ObjectID]Object
cacheTypes map[TypeID]Type
cacheNodes map[Location]BlockNode
cacheNativeTypes map[reflect.Type]Type // go spec: reflect.Type are comparable
cacheObjects *lru.Cache[ObjectID, Object]
cacheTypes *lru.Cache[TypeID, Type]
cacheNodes *lru.Cache[Location, BlockNode]
cacheNativeTypes map[reflect.Type]Type // go spec: reflect.Type are comparable // TODO: reflect.Type is not comparable until we update to Go 1.20
baseStore store.Store // for objects, types, nodes
iavlStore store.Store // for escaped object hashes
pkgInjector PackageInjector // for injecting natives
Expand All @@ -81,12 +86,24 @@ type defaultStore struct {
}

func NewStore(alloc *Allocator, baseStore, iavlStore store.Store) *defaultStore {
oc, err := lru.New[ObjectID, Object](objectCacheSize)
if err != nil {
panic(err)
}
tc, err := lru.New[TypeID, Type](typeCacheSize)
if err != nil {
panic(err)
}
nc, err := lru.New[Location, BlockNode](nodeCacheSize)
if err != nil {
panic(err)
}
ds := &defaultStore{
alloc: alloc,
pkgGetter: nil,
cacheObjects: make(map[ObjectID]Object),
cacheTypes: make(map[TypeID]Type),
cacheNodes: make(map[Location]BlockNode),
cacheObjects: oc,
cacheTypes: tc,
cacheNodes: nc,
cacheNativeTypes: make(map[reflect.Type]Type),
baseStore: baseStore,
iavlStore: iavlStore,
Expand Down Expand Up @@ -118,7 +135,8 @@ func (ds *defaultStore) GetPackage(pkgPath string, isImport bool) *PackageValue
}
// first, check cache.
oid := ObjectIDFromPkgPath(pkgPath)
if oo, exists := ds.cacheObjects[oid]; exists {

if oo, exists := ds.cacheObjects.Get(oid); exists {
pv := oo.(*PackageValue)
return pv
}
Expand Down Expand Up @@ -171,7 +189,7 @@ func (ds *defaultStore) GetPackage(pkgPath string, isImport bool) *PackageValue
// Realm values obtained this way
// will get written elsewhere
// later.
ds.cacheObjects[oid] = pv
ds.cacheObjects.Add(oid, pv)
// inject natives after init.
if ds.pkgInjector != nil {
if pn.HasAttribute(ATTR_INJECTED) {
Expand Down Expand Up @@ -203,10 +221,11 @@ func (ds *defaultStore) GetPackage(pkgPath string, isImport bool) *PackageValue
// Used to set throwaway packages.
func (ds *defaultStore) SetCachePackage(pv *PackageValue) {
oid := ObjectIDFromPkgPath(pv.PkgPath)
if _, exists := ds.cacheObjects[oid]; exists {
if ds.cacheObjects.Contains(oid) {
panic(fmt.Sprintf("package %s already exists in cache", pv.PkgPath))
}
ds.cacheObjects[oid] = pv

ds.cacheObjects.Add(oid, pv)
}

// Some atomic operation.
Expand Down Expand Up @@ -250,7 +269,7 @@ func (ds *defaultStore) GetObject(oid ObjectID) Object {

func (ds *defaultStore) GetObjectSafe(oid ObjectID) Object {
// check cache.
if oo, exists := ds.cacheObjects[oid]; exists {
if oo, exists := ds.cacheObjects.Get(oid); exists {
return oo
}
// check baseStore.
Expand Down Expand Up @@ -285,7 +304,7 @@ func (ds *defaultStore) loadObjectSafe(oid ObjectID) Object {
}
}
oo.SetHash(ValueHash{NewHashlet(hash)})
ds.cacheObjects[oid] = oo
ds.cacheObjects.Add(oid, oo)
_ = fillTypesOfValue(ds, oo)
return oo
}
Expand Down Expand Up @@ -319,15 +338,15 @@ func (ds *defaultStore) SetObject(oo Object) {
if oid.IsZero() {
panic("object id cannot be zero")
}
if oo2, exists := ds.cacheObjects[oid]; exists {
if oo2, exists := ds.cacheObjects.Get(oid); exists {
if oo != oo2 {
panic(fmt.Sprintf(
"duplicate object: set %s (oid: %s) but %s (oid %s) already exists",
oo.String(), oid.String(), oo2.String(), oo2.GetObjectID().String()))
}
}
}
ds.cacheObjects[oid] = oo
ds.cacheObjects.Add(oid, oo)
// make store op log entry
if ds.opslog != nil {
var op StoreOpType
Expand All @@ -351,7 +370,7 @@ func (ds *defaultStore) SetObject(oo Object) {
func (ds *defaultStore) DelObject(oo Object) {
oid := oo.GetObjectID()
// delete from cache.
delete(ds.cacheObjects, oid)
ds.cacheObjects.Remove(oid)
// delete from backend.
if ds.baseStore != nil {
key := backendObjectKey(oid)
Expand All @@ -378,7 +397,7 @@ func (ds *defaultStore) GetType(tid TypeID) Type {

func (ds *defaultStore) GetTypeSafe(tid TypeID) Type {
// check cache.
if tt, exists := ds.cacheTypes[tid]; exists {
if tt, exists := ds.cacheTypes.Get(tid); exists {
return tt
}
// check backend.
Expand All @@ -395,7 +414,7 @@ func (ds *defaultStore) GetTypeSafe(tid TypeID) Type {
}
}
// set in cache.
ds.cacheTypes[tid] = tt
ds.cacheTypes.Add(tid, tt)
// after setting in cache, fill tt.
fillType(ds, tt)
return tt
Expand All @@ -406,22 +425,22 @@ func (ds *defaultStore) GetTypeSafe(tid TypeID) Type {

func (ds *defaultStore) SetCacheType(tt Type) {
tid := tt.TypeID()
if tt2, exists := ds.cacheTypes[tid]; exists {
if tt2, exists := ds.cacheTypes.Get(tid); exists {
if tt != tt2 {
// NOTE: not sure why this would happen.
panic("should not happen")
} else {
// already set.
}
} else {
ds.cacheTypes[tid] = tt
ds.cacheTypes.Add(tid, tt)
}
}

func (ds *defaultStore) SetType(tt Type) {
tid := tt.TypeID()
// return if tid already known.
if tt2, exists := ds.cacheTypes[tid]; exists {
if tt2, exists := ds.cacheTypes.Get(tid); exists {
if tt != tt2 {
// this can happen for a variety of reasons.
// TODO classify them and optimize.
Expand All @@ -436,7 +455,7 @@ func (ds *defaultStore) SetType(tt Type) {
ds.baseStore.Set([]byte(key), bz)
}
// save type to cache.
ds.cacheTypes[tid] = tt
ds.cacheTypes.Add(tid, tt)
}

func (ds *defaultStore) GetBlockNode(loc Location) BlockNode {
Expand All @@ -449,7 +468,7 @@ func (ds *defaultStore) GetBlockNode(loc Location) BlockNode {

func (ds *defaultStore) GetBlockNodeSafe(loc Location) BlockNode {
// check cache.
if bn, exists := ds.cacheNodes[loc]; exists {
if bn, exists := ds.cacheNodes.Get(loc); exists {
return bn
}
// check backend.
Expand All @@ -465,7 +484,7 @@ func (ds *defaultStore) GetBlockNodeSafe(loc Location) BlockNode {
loc, bn.GetLocation()))
}
}
ds.cacheNodes[loc] = bn
ds.cacheNodes.Add(loc, bn)
return bn
}
}
Expand All @@ -484,7 +503,7 @@ func (ds *defaultStore) SetBlockNode(bn BlockNode) {
// ds.backend.Set([]byte(key), bz)
}
// save node to cache.
ds.cacheNodes[loc] = bn
ds.cacheNodes.Add(loc, bn)
// XXX duplicate?
// XXX
}
Expand Down Expand Up @@ -582,8 +601,8 @@ func (ds *defaultStore) IterMemPackage() <-chan *std.MemPackage {
// It also sets a new allocator.
func (ds *defaultStore) ClearObjectCache() {
ds.alloc.Reset()
ds.cacheObjects = make(map[ObjectID]Object) // new cache.
ds.opslog = nil // new ops log.
ds.cacheObjects.Purge()
ds.opslog = nil // new ops log.
if len(ds.current) > 0 {
ds.current = make(map[string]struct{})
}
Expand All @@ -593,10 +612,14 @@ func (ds *defaultStore) ClearObjectCache() {
// Unstable.
// This function is used to handle queries and checktx transactions.
func (ds *defaultStore) Fork() Store {
co2, err := lru.New[ObjectID, Object](ds.cacheObjects.Len())
if err != nil {
panic(err)
}
ds2 := &defaultStore{
alloc: ds.alloc.Fork().Reset(),
pkgGetter: ds.pkgGetter,
cacheObjects: make(map[ObjectID]Object), // new cache.
cacheObjects: co2, // new cache.
cacheTypes: ds.cacheTypes,
cacheNodes: ds.cacheNodes,
cacheNativeTypes: ds.cacheNativeTypes,
Expand Down Expand Up @@ -695,9 +718,9 @@ func (ds *defaultStore) LogSwitchRealm(rlmpath string) {
}

func (ds *defaultStore) ClearCache() {
ds.cacheObjects = make(map[ObjectID]Object)
ds.cacheTypes = make(map[TypeID]Type)
ds.cacheNodes = make(map[Location]BlockNode)
ds.cacheObjects.Purge()
ds.cacheTypes.Purge()
ds.cacheNodes.Purge()
ds.cacheNativeTypes = make(map[reflect.Type]Type)
// restore builtin types to cache.
InitStoreCaches(ds)
Expand All @@ -713,13 +736,17 @@ func (ds *defaultStore) Print() {
store.Print(ds.iavlStore)
fmt.Println("//----------------------------------------")
fmt.Println("defaultStore:cacheTypes...")
for tid, typ := range ds.cacheTypes {
fmt.Printf("- %v: %v\n", tid, typ)
for _, tid := range ds.cacheTypes.Keys() {
if typ, ok := ds.cacheTypes.Get(tid); ok {
fmt.Printf("- %v: %v\n", tid, typ)
}
}
fmt.Println("//----------------------------------------")
fmt.Println("defaultStore:cacheNodes...")
for loc, bn := range ds.cacheNodes {
fmt.Printf("- %v: %v\n", loc, bn)
for _, loc := range ds.cacheNodes.Keys() {
if bn, ok := ds.cacheNodes.Get(loc); ok {
fmt.Printf("- %v: %v\n", loc, bn)
}
}
}

Expand Down
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,7 @@ require (
github.com/google/flatbuffers v1.12.1 // indirect
github.com/gorilla/securecookie v1.1.1 // indirect
github.com/gorilla/sessions v1.2.1 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect
github.com/klauspost/compress v1.12.3 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lib/pq v1.10.7 // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit d56846a

Please sign in to comment.