From 53fbe897689c99aaea4cd1ee413bd68729139266 Mon Sep 17 00:00:00 2001 From: Vadim Macagon Date: Fri, 13 Mar 2020 14:17:22 +0700 Subject: [PATCH] Revert "Allow EVM state to be flushed to disk every Nth block instead of every block (#1532)" This reverts commit a6fba1cdf754384218f06db8548d73b7c0079e77. --- Makefile | 12 +- app.go | 70 +---- app_test.go | 2 +- cmd/loom/db/db.go | 2 + cmd/loom/db/evm.go | 240 ++++++++++++++++- cmd/loom/loom.go | 67 +++-- config/config.go | 11 - e2e/dposv2-genesis.json | 102 -------- e2e/enable-receipts-v2-feature-genesis.json | 24 -- e2e/eth-test-1-loom.yaml | 3 +- e2e/eth_test.go | 6 +- e2e/tests/receipts/run_truffle_tests.sh | 7 +- e2e/tests/truffle/test/EvmSnapshot.js | 94 ------- evm/config.go | 9 +- evm/evm_test.go | 19 +- evm/loom_statedb.go | 6 +- {store => evm}/loomethdb.go | 241 ++++++++--------- evm/loomethdb_test.go | 128 +++++++++ evm/loomevm.go | 84 +++--- evm/test_cryptozombies.go | 10 + evm_state.go | 86 ------- plugin/fake_context.go | 13 +- plugin/vm_test.go | 12 +- store/evmstore.go | 155 +++++------ store/evmstore_test.go | 14 +- store/iavlstore.go | 4 - store/logstore.go | 4 - store/loomethdb_test.go | 43 ---- store/memstore.go | 4 - store/multi_writer_app_store.go | 39 +-- store/multi_writer_app_store_test.go | 119 +++++++-- store/pruning_iavlstore.go | 271 ++++++++++++++++++++ store/store.go | 1 - store/store_test.go | 188 ++++++++++++++ store/versioned_cachingstore.go | 57 ++-- store/versioned_cachingstore_test.go | 11 +- 36 files changed, 1294 insertions(+), 864 deletions(-) delete mode 100644 e2e/dposv2-genesis.json delete mode 100644 e2e/tests/truffle/test/EvmSnapshot.js rename {store => evm}/loomethdb.go (51%) create mode 100644 evm/loomethdb_test.go delete mode 100644 evm_state.go delete mode 100644 store/loomethdb_test.go create mode 100644 store/pruning_iavlstore.go diff --git a/Makefile b/Makefile index 8fa2b06495..d50c2db78b 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ GO_LOOM_GIT_REV = HEAD # Specifies the loomnetwork/transfer-gateway branch/revision to use. TG_GIT_REV = HEAD # loomnetwork/go-ethereum loomchain branch -ETHEREUM_GIT_REV = cce1b3f69354033160583e5576169f9b309ee62e +ETHEREUM_GIT_REV = 6128fa1a8c767035d3da6ef0c27ebb7778ce3713 # use go-plugin we get 'timeout waiting for connection info' error HASHICORP_GIT_REV = f4c3476bd38585f9ec669d10ed1686abd52b9961 LEVIGO_GIT_REV = c42d9e0ca023e2198120196f842701bb4c55d7b9 @@ -197,15 +197,13 @@ $(BINANCE_TGORACLE_DIR): git clone -q git@github.com:loomnetwork/binance-tgoracle.git $@ cd $(BINANCE_TGORACLE_DIR) && git checkout master && git pull && git checkout $(BINANCE_TG_GIT_REV) -$(PROMETHEUS_PROCFS_DIR): - # Temp workaround for https://github.com/prometheus/procfs/issues/221 - git clone -q git@github.com:prometheus/procfs $(PROMETHEUS_PROCFS_DIR) - cd $(PROMETHEUS_PROCFS_DIR) && git checkout master && git pull && git checkout d3b299e382e6acf1baa852560d862eca4ff643c8 - validators-tool: $(TRANSFER_GATEWAY_DIR) go build -tags gateway -o e2e/validators-tool $(PKG)/e2e/cmd -deps: $(PLUGIN_DIR) $(GO_ETHEREUM_DIR) $(SSHA3_DIR) $(PROMETHEUS_PROCFS_DIR) +deps: $(PLUGIN_DIR) $(GO_ETHEREUM_DIR) $(SSHA3_DIR) + # Temp workaround for https://github.com/prometheus/procfs/issues/221 + git clone -q git@github.com:prometheus/procfs $(PROMETHEUS_PROCFS_DIR) + cd $(PROMETHEUS_PROCFS_DIR) && git checkout master && git pull && git checkout d3b299e382e6acf1baa852560d862eca4ff643c8 # Lock down Prometheus golang client to v1.2.1 (newer versions use a different protobuf version) git clone -q git@github.com:prometheus/client_golang $(GOPATH)/src/github.com/prometheus/client_golang cd $(GOPATH)/src/github.com/prometheus/client_golang && git checkout master && git pull && git checkout v1.2.1 diff --git a/app.go b/app.go index eab21c0824..75fdf6ceda 100644 --- a/app.go +++ b/app.go @@ -6,9 +6,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" - "sync/atomic" "time" - "unsafe" "github.com/loomnetwork/go-loom/config" "github.com/loomnetwork/go-loom/util" @@ -53,7 +51,6 @@ type State interface { SetFeature(string, bool) SetMinBuildNumber(uint64) ChangeConfigSetting(name, value string) error - EVMState() *EVMState } type StoreState struct { @@ -63,7 +60,6 @@ type StoreState struct { validators loom.ValidatorSet getValidatorSet GetValidatorSet config *cctypes.Config - evmState *EVMState } var _ = State(&StoreState{}) @@ -105,11 +101,6 @@ func (s *StoreState) WithOnChainConfig(config *cctypes.Config) *StoreState { return s } -func (s *StoreState) WithEVMState(evmState *EVMState) *StoreState { - s.evmState = evmState - return s -} - func (s *StoreState) Range(prefix []byte) plugin.RangeData { return s.store.Range(prefix) } @@ -150,10 +141,6 @@ func (s *StoreState) Context() context.Context { return s.ctx } -func (s *StoreState) EVMState() *EVMState { - return s.evmState -} - const ( featurePrefix = "feature" MinBuildKey = "minbuild" @@ -247,7 +234,6 @@ func (s *StoreState) WithContext(ctx context.Context) State { ctx: ctx, validators: s.validators, getValidatorSet: s.getValidatorSet, - evmState: s.evmState, } } @@ -258,7 +244,6 @@ func (s *StoreState) WithPrefix(prefix []byte) State { ctx: s.ctx, validators: s.validators, getValidatorSet: s.getValidatorSet, - evmState: s.evmState, } } @@ -362,7 +347,7 @@ type CommittedTx struct { } type Application struct { - lastBlockHeader unsafe.Pointer // *abci.Header + lastBlockHeader abci.Header curBlockHeader abci.Header curBlockHash []byte Store store.VersionedKVStore @@ -383,7 +368,6 @@ type Application struct { config *cctypes.Config childTxRefs []evmaux.ChildTxRef // links Tendermint txs to EVM txs ReceiptsVersion int32 - EVMState *EVMState committedTxs []CommittedTx } @@ -528,7 +512,7 @@ func (a *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginB a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) contractUpkeepHandler, err := a.CreateContractUpkeepHandler(upkeepState) if err != nil { panic(err) @@ -548,7 +532,7 @@ func (a *Application) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginB a.curBlockHeader, nil, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) validatorManager, err := a.CreateValidatorManager(state) if err != registry.ErrNotFound { @@ -616,7 +600,7 @@ func (a *Application) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { a.curBlockHeader, nil, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) validatorManager, err := a.CreateValidatorManager(state) if err != registry.ErrNotFound { @@ -673,10 +657,6 @@ func (a *Application) CheckTx(txBytes []byte) abci.ResponseCheckTx { a.GetValidatorSet, ).WithOnChainConfig(a.config) - if a.EVMState != nil { - state = state.WithEVMState(a.EVMState.Clone()) - } - // Receipts & events generated in CheckTx must be discarded since the app state changes they // reflect aren't persisted. defer a.ReceiptHandlerProvider.Store().DiscardCurrentReceipt() @@ -712,7 +692,7 @@ func (a *Application) DeliverTx(txBytes []byte) abci.ResponseDeliverTx { a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) var r abci.ResponseDeliverTx @@ -745,7 +725,7 @@ func (a *Application) processTx(storeTx store.KVStoreTx, txBytes []byte, isCheck a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) receiptHandler := a.ReceiptHandlerProvider.Store() defer receiptHandler.DiscardCurrentReceipt() @@ -798,7 +778,7 @@ func (a *Application) deliverTx2(storeTx store.KVStoreTx, txBytes []byte) abci.R a.curBlockHeader, a.curBlockHash, a.GetValidatorSet, - ).WithOnChainConfig(a.config).WithEVMState(a.EVMState) + ).WithOnChainConfig(a.config) receiptHandler := a.ReceiptHandlerProvider.Store() defer receiptHandler.DiscardCurrentReceipt() @@ -850,19 +830,13 @@ func (a *Application) Commit() abci.ResponseCommit { commitBlockLatency.With(lvs...).Observe(time.Since(begin).Seconds()) }(time.Now()) - if a.EVMState != nil { - // Commit EVM state changes to the EvmStore - if err := a.EVMState.Commit(); err != nil { - panic(err) - } - } - appHash, _, err := a.Store.SaveVersion() if err != nil { panic(err) } height := a.curBlockHeader.GetHeight() + if err := a.EvmAuxStore.SaveChildTxRefs(a.childTxRefs); err != nil { // TODO: consider panic instead log.Error("Failed to save Tendermint -> EVM tx hash refs", "height", height, "err", err) @@ -877,8 +851,7 @@ func (a *Application) Commit() abci.ResponseCommit { // Update the last block header before emitting events in case the subscribers attempt to access // the latest committed state as soon as they receive an event. - curBlockHeader := a.curBlockHeader - atomic.StorePointer(&a.lastBlockHeader, unsafe.Pointer(&curBlockHeader)) + a.lastBlockHeader = a.curBlockHeader go func(height int64, blockHeader abci.Header, committedTxs []CommittedTx) { if err := a.EventHandler.EmitBlockTx(uint64(height), blockHeader.Time); err != nil { @@ -931,28 +904,13 @@ func (a *Application) height() int64 { } func (a *Application) ReadOnlyState() State { - lastBlockHeader := (*abci.Header)(atomic.LoadPointer(&a.lastBlockHeader)) - appStateSnapshot, err := a.Store.GetSnapshotAt(lastBlockHeader.Height) - if err != nil { - panic(err) - } - - var evmStateSnapshot *EVMState - if a.EVMState != nil { - evmStateSnapshot, err = a.EVMState.GetSnapshot( - lastBlockHeader.Height, - store.GetEVMRootFromAppStore(appStateSnapshot), - ) - if err != nil { - panic(err) - } - } - + // TODO: the store snapshot should be created atomically, otherwise the block header might + // not match the state... need to figure out why this hasn't spectacularly failed already return NewStoreStateSnapshot( nil, - appStateSnapshot, - *lastBlockHeader, + a.Store.GetSnapshot(), + a.lastBlockHeader, nil, // TODO: last block hash! a.GetValidatorSet, - ).WithEVMState(evmStateSnapshot) + ) } diff --git a/app_test.go b/app_test.go index 7ece2cfb20..9b3e04f435 100644 --- a/app_test.go +++ b/app_test.go @@ -60,7 +60,7 @@ func mockMultiWriterStore(flushInterval int64) (*store.MultiWriterAppStore, erro return nil, err } memDb, _ = db.LoadMemDB() - evmStore := store.NewEvmStore(memDb, 100, 0) + evmStore := store.NewEvmStore(memDb, 100) multiWriterStore, err := store.NewMultiWriterAppStore(iavlStore, evmStore, false) if err != nil { return nil, err diff --git a/cmd/loom/db/db.go b/cmd/loom/db/db.go index e292f2d6f5..49b5751cc0 100644 --- a/cmd/loom/db/db.go +++ b/cmd/loom/db/db.go @@ -14,6 +14,8 @@ func NewDBCommand() *cobra.Command { cmd.AddCommand( newPruneDBCommand(), newCompactDBCommand(), + newDumpEVMStateCommand(), + newDumpEVMStateMultiWriterAppStoreCommand(), newDumpEVMStateFromEvmDB(), newGetEvmHeightCommand(), newGetAppHeightCommand(), diff --git a/cmd/loom/db/evm.go b/cmd/loom/db/evm.go index b75d5d3a2e..fdda84a2d9 100644 --- a/cmd/loom/db/evm.go +++ b/cmd/loom/db/evm.go @@ -3,6 +3,7 @@ package db import ( + "context" "fmt" "math" "path" @@ -13,18 +14,235 @@ import ( gstate "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" + "github.com/loomnetwork/loomchain" "github.com/loomnetwork/loomchain/cmd/loom/common" cdb "github.com/loomnetwork/loomchain/db" + "github.com/loomnetwork/loomchain/events" + "github.com/loomnetwork/loomchain/evm" + "github.com/loomnetwork/loomchain/log" + "github.com/loomnetwork/loomchain/plugin" + "github.com/loomnetwork/loomchain/receipts" + registry "github.com/loomnetwork/loomchain/registry/factory" "github.com/loomnetwork/loomchain/store" "github.com/spf13/cobra" + abci "github.com/tendermint/tendermint/abci/types" + dbm "github.com/tendermint/tendermint/libs/db" ) +func newDumpEVMStateCommand() *cobra.Command { + var appHeight int64 + + cmd := &cobra.Command{ + Use: "evm-dump", + Short: "Dumps EVM state stored at a specific block height", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := common.ParseConfig() + if err != nil { + return err + } + + db, err := dbm.NewGoLevelDB(cfg.DBName, cfg.RootPath()) + if err != nil { + return err + } + appStore, err := store.NewIAVLStore(db, 0, appHeight, 0) + if err != nil { + return err + } + + eventHandler := loomchain.NewDefaultEventHandler(events.NewLogEventDispatcher()) + + regVer, err := registry.RegistryVersionFromInt(cfg.RegistryVersion) + if err != nil { + return err + } + createRegistry, err := registry.NewRegistryFactory(regVer) + if err != nil { + return err + } + + receiptHandlerProvider := receipts.NewReceiptHandlerProvider( + eventHandler, + cfg.EVMPersistentTxReceiptsMax, + nil, + ) + + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(appStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appStore.Version(), + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + var newABMFactory plugin.NewAccountBalanceManagerFactoryFunc + if evm.EVMEnabled && cfg.EVMAccountsEnabled { + newABMFactory = plugin.NewAccountBalanceManagerFactory + } + + var accountBalanceManager evm.AccountBalanceManager + if newABMFactory != nil { + pvm := plugin.NewPluginVM( + common.NewDefaultContractsLoader(cfg), + state, + createRegistry(state), + eventHandler, + log.Default, + newABMFactory, + receiptHandlerProvider.Writer(), + receiptHandlerProvider.Reader(), + ) + createABM, err := newABMFactory(pvm) + if err != nil { + return err + } + accountBalanceManager = createABM(true) + if err != nil { + return err + } + } + + vm, err := evm.NewLoomEvm(state, accountBalanceManager, nil, false) + if err != nil { + return err + } + + fmt.Printf("\n--- EVM state at app height %d ---\n%s\n", appStore.Version(), string(vm.RawDump())) + return nil + }, + } + + cmdFlags := cmd.Flags() + cmdFlags.Int64Var(&appHeight, "app-height", 0, "Dump EVM state as it was the specified app height") + return cmd +} + +func newDumpEVMStateMultiWriterAppStoreCommand() *cobra.Command { + var appHeight int64 + var evmDBName string + cmd := &cobra.Command{ + Use: "evm-dump-2", + Short: "Dumps EVM state stored at a specific block height from MultiWriterAppStore", + RunE: func(cmd *cobra.Command, args []string) error { + cfg, err := common.ParseConfig() + if err != nil { + return err + } + + db, err := dbm.NewGoLevelDB(cfg.DBName, cfg.RootPath()) + if err != nil { + return err + } + evmDB, err := cdb.LoadDB( + "goleveldb", + evmDBName, + cfg.RootPath(), + 256, + 4, + false, + ) + if err != nil { + return err + } + iavlStore, err := store.NewIAVLStore(db, 0, appHeight, 0) + if err != nil { + return err + } + evmStore := store.NewEvmStore(evmDB, 100) + if err := evmStore.LoadVersion(iavlStore.Version()); err != nil { + return err + } + + appStore, err := store.NewMultiWriterAppStore(iavlStore, evmStore, false) + if err != nil { + return err + } + eventHandler := loomchain.NewDefaultEventHandler(events.NewLogEventDispatcher()) + + regVer, err := registry.RegistryVersionFromInt(cfg.RegistryVersion) + if err != nil { + return err + } + createRegistry, err := registry.NewRegistryFactory(regVer) + if err != nil { + return err + } + + receiptHandlerProvider := receipts.NewReceiptHandlerProvider( + eventHandler, + cfg.EVMPersistentTxReceiptsMax, + nil, + ) + + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(appStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appStore.Version(), + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + var newABMFactory plugin.NewAccountBalanceManagerFactoryFunc + if evm.EVMEnabled && cfg.EVMAccountsEnabled { + newABMFactory = plugin.NewAccountBalanceManagerFactory + } + + var accountBalanceManager evm.AccountBalanceManager + if newABMFactory != nil { + pvm := plugin.NewPluginVM( + common.NewDefaultContractsLoader(cfg), + state, + createRegistry(state), + eventHandler, + log.Default, + newABMFactory, + receiptHandlerProvider.Writer(), + receiptHandlerProvider.Reader(), + ) + createABM, err := newABMFactory(pvm) + if err != nil { + return err + } + accountBalanceManager = createABM(true) + if err != nil { + return err + } + } + + vm, err := evm.NewLoomEvm(state, accountBalanceManager, nil, false) + if err != nil { + return err + } + + fmt.Printf("\n--- EVM state at app height %d ---\n%s\n", appStore.Version(), string(vm.RawDump())) + return nil + }, + } + + cmdFlags := cmd.Flags() + cmdFlags.Int64Var(&appHeight, "app-height", 0, "Dump EVM state as it was the specified app height") + cmdFlags.StringVar(&evmDBName, "evmdb-name", "evm", "Name of EVM state database") + return cmd +} + func newDumpEVMStateFromEvmDB() *cobra.Command { var appHeight int64 var evmDBName string var dumpStorageTrie bool cmd := &cobra.Command{ - Use: "evm-dump", + Use: "evm-dump-3", Short: "Dumps EVM state stored at a specific block height from evm.db", RunE: func(cmd *cobra.Command, args []string) error { cfg, err := common.ParseConfig() @@ -44,7 +262,7 @@ func newDumpEVMStateFromEvmDB() *cobra.Command { return err } - evmStore := store.NewEvmStore(evmDB, 100, -1) + evmStore := store.NewEvmStore(evmDB, 100) if err := evmStore.LoadVersion(appHeight); err != nil { return err } @@ -53,7 +271,21 @@ func newDumpEVMStateFromEvmDB() *cobra.Command { fmt.Printf("version: %d, root: %x\n", version, root) - srcStateDB := gstate.NewDatabase(store.NewLoomEthDB(evmStore)) + // TODO: This should use snapshot obtained from appStore.ReadOnlyState() + storeTx := store.WrapAtomic(evmStore).BeginTx() + state := loomchain.NewStoreState( + context.Background(), + storeTx, + abci.Header{ + Height: appHeight, + }, + // it is possible to load the block hash from the TM block store, but probably don't + // need it for just dumping the EVM state + nil, + nil, + ) + + srcStateDB := gstate.NewDatabase(evm.NewLoomEthdb(state, nil)) srcStateDBTrie, err := srcStateDB.OpenTrie(evmRoot) if err != nil { fmt.Printf("cannot open trie, %s\n", evmRoot.Hex()) @@ -132,7 +364,7 @@ func newGetEvmHeightCommand() *cobra.Command { } defer db.Close() - evmStore := store.NewEvmStore(db, 100, -1) + evmStore := store.NewEvmStore(db, 100) if err := evmStore.LoadVersion(math.MaxInt64); err != nil { return err } diff --git a/cmd/loom/loom.go b/cmd/loom/loom.go index d7bf7546ea..55caa2bd2f 100644 --- a/cmd/loom/loom.go +++ b/cmd/loom/loom.go @@ -607,15 +607,12 @@ func destroyBlockIndexDB(cfg *config.Config) error { return nil } -func loadAppStore( - cfg *config.Config, logger *loom.Logger, targetVersion int64, -) (store.VersionedKVStore, *store.EvmStore, error) { +func loadAppStore(cfg *config.Config, logger *loom.Logger, targetVersion int64) (store.VersionedKVStore, error) { db, err := cdb.LoadDB( - cfg.DBBackend, cfg.DBName, cfg.RootPath(), cfg.DBBackendConfig.CacheSizeMegs, - cfg.DBBackendConfig.WriteBufferMegs, cfg.Metrics.Database, + cfg.DBBackend, cfg.DBName, cfg.RootPath(), cfg.DBBackendConfig.CacheSizeMegs, cfg.DBBackendConfig.WriteBufferMegs, cfg.Metrics.Database, ) if err != nil { - return nil, nil, err + return nil, err } if cfg.AppStore.CompactOnLoad { @@ -629,47 +626,60 @@ func loadAppStore( } var appStore store.VersionedKVStore - var evmStore *store.EvmStore if cfg.AppStore.Version == 1 { // TODO: cleanup these hardcoded numbers - logger.Info("Loading IAVL Store") - appStore, err = store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) - if err != nil { - return nil, nil, err + if cfg.AppStore.PruneInterval > int64(0) { + logger.Info("Loading Pruning IAVL Store") + appStore, err = store.NewPruningIAVLStore(db, store.PruningIAVLStoreConfig{ + MaxVersions: cfg.AppStore.MaxVersions, + BatchSize: cfg.AppStore.PruneBatchSize, + Interval: time.Duration(cfg.AppStore.PruneInterval) * time.Second, + Logger: logger, + FlushInterval: cfg.AppStore.IAVLFlushInterval, + }) + if err != nil { + return nil, err + } + } else { + logger.Info("Loading IAVL Store") + appStore, err = store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) + if err != nil { + return nil, err + } } } else if cfg.AppStore.Version == 3 { logger.Info("Loading Multi-Writer App Store") iavlStore, err := store.NewIAVLStore(db, cfg.AppStore.MaxVersions, targetVersion, cfg.AppStore.IAVLFlushInterval) if err != nil { - return nil, nil, err + return nil, err } - evmStore, err = loadEvmStore(cfg, iavlStore.Version()) + evmStore, err := loadEvmStore(cfg, iavlStore.Version()) if err != nil { - return nil, nil, err + return nil, err } appStore, err = store.NewMultiWriterAppStore(iavlStore, evmStore, cfg.AppStore.SaveEVMStateToIAVL) if err != nil { - return nil, nil, err + return nil, err } } else { - return nil, nil, errors.New("Invalid AppStore.Version config setting") + return nil, errors.New("Invalid AppStore.Version config setting") } if cfg.LogStateDB { appStore, err = store.NewLogStore(appStore) if err != nil { - return nil, nil, err + return nil, err } } if cfg.CachingStoreConfig.CachingEnabled { appStore, err = store.NewVersionedCachingStore(appStore, cfg.CachingStoreConfig, appStore.Version()) if err != nil { - return nil, nil, err + return nil, err } logger.Info("VersionedCachingStore enabled") } - return appStore, evmStore, nil + return appStore, nil } func loadEventStore(cfg *config.Config, logger *loom.Logger) (store.EventStore, error) { @@ -700,12 +710,7 @@ func loadEvmStore(cfg *config.Config, targetVersion int64) (*store.EvmStore, err if err != nil { return nil, err } - if cfg.AppStore.IAVLFlushInterval != evmStoreCfg.FlushInterval && - cfg.AppStore.IAVLFlushInterval > 0 && - evmStoreCfg.FlushInterval > 0 { - return nil, errors.New("invalid config, AppStore.IAVLFlushInterval doesn't match EvmStore.FlushInterval") - } - evmStore := store.NewEvmStore(db, evmStoreCfg.NumCachedRoots, evmStoreCfg.FlushInterval) + evmStore := store.NewEvmStore(db, evmStoreCfg.NumCachedRoots) if err := evmStore.LoadVersion(targetVersion); err != nil { return nil, err } @@ -721,7 +726,8 @@ func loadApp( ) (*loomchain.Application, error) { logger := log.Root - appStore, evmStore, err := loadAppStore(cfg, log.Default, appHeight) + appStore, err := loadAppStore(cfg, log.Default, appHeight) + if err != nil { return nil, err } @@ -808,7 +814,6 @@ func loadApp( ), nil }) - var evmState *loomchain.EVMState if evm.EVMEnabled { vmManager.Register(vm.VMType_EVM, func(state loomchain.State) (vm.VM, error) { var createABM evm.AccountBalanceManagerFactoryFunc @@ -831,13 +836,8 @@ func loadApp( } return evm.NewLoomVm(state, eventHandler, receiptHandlerProvider.Writer(), createABM, cfg.EVMDebugEnabled), nil }) - - evmState, err = loomchain.NewEVMState(evmStore) - if err != nil { - return nil, err - } } - store.LogEthDBBatch = cfg.LogEthDbBatch + evm.LogEthDbBatch = cfg.LogEthDbBatch deployTxHandler := &vm.DeployTxHandler{ Manager: vmManager, @@ -1134,7 +1134,6 @@ func loadApp( GetValidatorSet: getValidatorSet, EvmAuxStore: evmAuxStore, ReceiptsVersion: cfg.ReceiptsVersion, - EVMState: evmState, }, nil } diff --git a/config/config.go b/config/config.go index f95bdd2e24..3f39230d22 100755 --- a/config/config.go +++ b/config/config.go @@ -750,11 +750,6 @@ AppStore: # If true the app store will write EVM state to both IAVLStore and EvmStore # This config works with AppStore Version 3 (MultiWriterAppStore) only SaveEVMStateToIAVL: {{ .AppStore.SaveEVMStateToIAVL }} - # Specifies the number of IAVL tree versions that should be kept in memory before writing a new - # version to disk. - # If set to zero every version will be written to disk unless overridden via the on-chain config. - # If set to -1 every version will always be written to disk, regardless of the on-chain config. - IAVLFlushInterval: {{ .AppStore.IAVLFlushInterval }} {{if .EventStore -}} # # EventStore @@ -778,12 +773,6 @@ EvmStore: CacheSizeMegs: {{.EvmStore.CacheSizeMegs}} # NumCachedRoots defines a number of in-memory cached EVM roots NumCachedRoots: {{.EvmStore.NumCachedRoots}} - # Specifies the number of Merkle tree versions that should be kept in memory before writing a - # new version to disk. - # If set to zero every version will be written to disk unless overridden via the on-chain config - # AppStore.IAVLFlushInterval setting. - # If set to -1 every version will always be written to disk, regardless of the on-chain config. - FlushInterval: {{.EvmStore.FlushInterval}} {{end}} {{if .Web3 -}} diff --git a/e2e/dposv2-genesis.json b/e2e/dposv2-genesis.json deleted file mode 100644 index ac639bf0e7..0000000000 --- a/e2e/dposv2-genesis.json +++ /dev/null @@ -1,102 +0,0 @@ -{ - "contracts": [ - { - "vm": "plugin", - "format": "plugin", - "name": "coin", - "location": "coin:1.0.0", - "init": null - }, - { - "vm": "plugin", - "format": "plugin", - "name": "dposV2", - "location": "dposV2:2.0.0", - "init": { - "params": { - "validatorCount": "21", - "electionCycleLength": "604800" - }, - "validators": [ - { - "pubKey": "dMI2nJa3ZOxU3yFYNVRYarPOda5b19qZdGENG6yFVVk=", - "power": "10" - } - ] - } - }, - { - "vm": "plugin", - "format": "plugin", - "name": "addressmapper", - "location": "addressmapper:0.1.0", - "init": null - }, - { - "vm": "plugin", - "format": "plugin", - "name": "chainconfig", - "location": "chainconfig:1.0.0", - "init": { - "owner": { - "chainId": "default", - "local": "8ebnLFSTiXXZuVhl8mQJRL8kwJk=" - }, - "features": [ - { - "name": "dpos:v3.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.2", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.3", - "status": "WAITING" - }, - { - "name": "receipts:v2", - "status": "WAITING" - }, - { - "name": "receipts:v3.4", - "status": "WAITING" - }, - { - "name": "receipts:v3.1", - "status": "WAITING" - }, - { - "name": "coin:v1.1", - "status": "WAITING" - }, - { - "name": "appstore:v3.1", - "status": "WAITING" - }, - { - "name": "auth:sigtx:eth", - "status": "WAITING" - }, - { - "name": "tx:eth", - "status": "WAITING" - }, - { - "name": "tx:check-value", - "status": "WAITING" - }, - { - "name": "evm:constantinople", - "status": "WAITING" - } - ] - } - } - ] -} diff --git a/e2e/enable-receipts-v2-feature-genesis.json b/e2e/enable-receipts-v2-feature-genesis.json index 04452b11d5..09349c4a73 100644 --- a/e2e/enable-receipts-v2-feature-genesis.json +++ b/e2e/enable-receipts-v2-feature-genesis.json @@ -22,30 +22,6 @@ "numBlockConfirmations":"1" }, "features": [ - { - "name": "chaincfg:v1.1", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.2", - "status": "WAITING" - }, - { - "name": "chaincfg:v1.3", - "status": "WAITING" - }, - { - "name": "receipts:v3.4", - "status": "WAITING" - }, - { - "name": "receipts:v3.1", - "status": "WAITING" - }, - { - "name": "appstore:v3.1", - "status": "WAITING" - } ] } }, diff --git a/e2e/eth-test-1-loom.yaml b/e2e/eth-test-1-loom.yaml index f7d6933235..382cafce07 100644 --- a/e2e/eth-test-1-loom.yaml +++ b/e2e/eth-test-1-loom.yaml @@ -1,4 +1,3 @@ AppStore: - Version: 3 -DPOSVersion: 2 + Version: 1 CreateEmptyBlocks: false \ No newline at end of file diff --git a/e2e/eth_test.go b/e2e/eth_test.go index b2f94ed5c1..e223cb0251 100644 --- a/e2e/eth_test.go +++ b/e2e/eth_test.go @@ -18,10 +18,10 @@ func TestEthJSONRPC2(t *testing.T) { }{ {"blockNumber", "eth-1-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, {"ethPolls", "eth-2-test.toml", 1, 1, "empty-genesis.json", "eth-test-2-loom.yaml"}, - {"getBlockByNumber", "eth-3-test.toml", 1, 1, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, - {"getBlockTransactionCountByNumber", "eth-4-test.toml", 1, 1, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, + {"getBlockByNumber", "eth-3-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, + {"getBlockTransactionCountByNumber", "eth-4-test.toml", 1, 1, "empty-genesis.json", "eth-test-1-loom.yaml"}, {"getLogs", "eth-5-test.toml", 1, 4, "empty-genesis.json", "eth-test-2-loom.yaml"}, - {"go-getBlockByNumber", "eth-6-test.toml", 1, 3, "dposv2-genesis.json", "eth-test-1-loom.yaml"}, + {"go-getBlockByNumber", "eth-6-test.toml", 1, 3, "coin.genesis.json", "eth-test-1-loom.yaml"}, } for _, test := range tests { diff --git a/e2e/tests/receipts/run_truffle_tests.sh b/e2e/tests/receipts/run_truffle_tests.sh index 3d9008aeac..c176227a81 100755 --- a/e2e/tests/receipts/run_truffle_tests.sh +++ b/e2e/tests/receipts/run_truffle_tests.sh @@ -13,7 +13,12 @@ bash ../cluster.sh --init --dir $TEST_DIR --start cd ../truffle # Wait for all built-in contracts to be deployed to the test cluster. -sleep 5 +if [[ "$OSTYPE" == "darwin"* ]] && [[ "$NODE_NAME" == "osx"* ]]; then + # Jenkins OSX machine is slugish so give it more time to spin up the test cluster. + sleep 5 +else + sleep 1 +fi # Run Truffle tests using Truffle HDWallet provider & /eth endpoint CLUSTER_DIR=$TEST_DIR/cluster yarn run map-accounts diff --git a/e2e/tests/truffle/test/EvmSnapshot.js b/e2e/tests/truffle/test/EvmSnapshot.js deleted file mode 100644 index 62a997b7ff..0000000000 --- a/e2e/tests/truffle/test/EvmSnapshot.js +++ /dev/null @@ -1,94 +0,0 @@ -const { - waitForXBlocks, - ethGetTransactionCount -} = require('./helpers') -const Web3 = require('web3') -const fs = require('fs') -const path = require('path') -const { - SpeculativeNonceTxMiddleware, - SignedTxMiddleware, - Client, - EthersSigner, - createDefaultTxMiddleware, - Address, - LocalAddress, - CryptoUtils, - LoomProvider, - Contracts -} = require('loom-js') -const ethers = require('ethers').ethers - -const NonceTestContract = artifacts.require('NonceTestContract'); - -// web3 functions called using truffle objects use the loomProvider -// web3 functions called uisng we3js access the loom QueryInterface directly -contract('TestEvmSnapshot', async (accounts) => { - // This test is not provider dependent so just run it with Loom Truffle provider - if (process.env.TRUFFLE_PROVIDER === 'hdwallet') { - return - } - - let contract, from, nodeAddr - - beforeEach(async () => { - nodeAddr = fs.readFileSync(path.join(process.env.CLUSTER_DIR, '0', 'node_rpc_addr'), 'utf-8').trim() - - const client = new Client('default', `ws://${nodeAddr}/websocket`, `ws://${nodeAddr}/queryws`) - client.on('error', msg => { - console.error('Error on connect to client', msg) - console.warn('Please verify if loom cluster is running') - }) - const privKey = CryptoUtils.generatePrivateKey() - const pubKey = CryptoUtils.publicKeyFromPrivateKey(privKey) - client.txMiddleware = createDefaultTxMiddleware(client, privKey); - - const setupMiddlewareFn = function (client, privateKey) { - const publicKey = CryptoUtils.publicKeyFromPrivateKey(privateKey) - return [new SpeculativeNonceTxMiddleware(publicKey, client), new SignedTxMiddleware(privateKey)] - } - const loomProvider = new LoomProvider(client, privKey, setupMiddlewareFn) - const web3 = new Web3(loomProvider) - - // Create a mapping between a DAppChain account & an Ethereum account so that - // ethGetTransactionCount can resolve the Ethereum address it's given to a DAppChain address - const localAddr = new Address(client.chainId, LocalAddress.fromPublicKey(pubKey)); - const addressMapper = await Contracts.AddressMapper.createAsync(client, localAddr); - const ethAccount = web3.eth.accounts.create(); - const ethWallet = new ethers.Wallet(ethAccount.privateKey); - await addressMapper.addIdentityMappingAsync( - localAddr, - new Address('eth', LocalAddress.fromHexString(ethAccount.address)), - new EthersSigner(ethWallet) - ); - from = ethAccount.address - - const nonceTestContract = await NonceTestContract.deployed() - contract = new web3.eth.Contract( - NonceTestContract._json.abi, - nonceTestContract.address, - // contract calls go through LoomProvider, which expect the sender address to be - // a local address (not an eth address) - { - from: localAddr.local.toString() - } - ); - }) - - // SnapshotTest generates a lot of txs and queries and send them to a contract almost at the same time. - // This test ensures that the snapshot does not have a concurrent read/write problem. - it('SnapshotTest', async () => { - for (var i = 0; i < 50; i++) { - contract.methods.set(7777).send().then() - contract.methods.get().call().then() - } - for (var i = 0; i < 50; i++) { - contract.methods.set(8888).send().then() - contract.methods.get().call().then() - } - await waitForXBlocks(nodeAddr, 5) - await contract.methods.set(9999).send().then() - assert.equal(await contract.methods.get().call(), 9999) - }); - -}); \ No newline at end of file diff --git a/evm/config.go b/evm/config.go index 1d8575d205..4239a74b11 100644 --- a/evm/config.go +++ b/evm/config.go @@ -12,12 +12,6 @@ type EvmStoreConfig struct { WriteBufferMegs int // NumCachedRoots defines a number of in-memory cached EVM roots NumCachedRoots int - // Specifies the number of Merkle tree versions that should be kept in memory before writing a - // new version to disk. - // If set to zero every version will be written to disk unless overridden via the on-chain config - // AppStore.IAVLFlushInterval setting. - // If set to -1 every version will always be written to disk, regardless of the on-chain config. - FlushInterval int64 } func DefaultEvmStoreConfig() *EvmStoreConfig { @@ -26,8 +20,7 @@ func DefaultEvmStoreConfig() *EvmStoreConfig { DBBackend: "goleveldb", CacheSizeMegs: 256, WriteBufferMegs: 4, - NumCachedRoots: 500, - FlushInterval: 0, + NumCachedRoots: 100, } } diff --git a/evm/evm_test.go b/evm/evm_test.go index f60778f472..217d2c3f5d 100644 --- a/evm/evm_test.go +++ b/evm/evm_test.go @@ -18,7 +18,6 @@ import ( ethvm "github.com/ethereum/go-ethereum/core/vm" "github.com/loomnetwork/go-loom" "github.com/loomnetwork/loomchain" - "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/features" "github.com/loomnetwork/loomchain/store" lvm "github.com/loomnetwork/loomchain/vm" @@ -41,17 +40,7 @@ func mockState() loomchain.State { header := abci.Header{} header.Height = BlockHeight header.Time = blockTime - return loomchain.NewStoreState(context.Background(), store.NewMemStore(), header, nil, nil). - WithEVMState(mockEVMState()) -} - -func mockEVMState() *loomchain.EVMState { - memDb, _ := db.LoadMemDB() - evmState, err := loomchain.NewEVMState(store.NewEvmStore(memDb, 100, 0)) - if err != nil { - panic(err) - } - return evmState + return loomchain.NewStoreState(context.Background(), store.NewMemStore(), header, nil, nil) } func TestProcessDeployTx(t *testing.T) { @@ -220,11 +209,17 @@ func TestGlobals(t *testing.T) { vm, _ := manager.InitVM(lvm.VMType_EVM, state) abiGP, gPAddr := deploySolContract(t, caller, "GlobalProperties", vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testNow(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testBlockTimeStamp(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testBlockNumber(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testTxOrigin(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testMsgSender(t, abiGP, caller, gPAddr, vm) + vm, _ = manager.InitVM(lvm.VMType_EVM, state) testMsgValue(t, abiGP, caller, gPAddr, vm) } diff --git a/evm/loom_statedb.go b/evm/loom_statedb.go index 4ac88b4054..e1357fac67 100644 --- a/evm/loom_statedb.go +++ b/evm/loom_statedb.go @@ -16,7 +16,11 @@ type LoomStateDB struct { abm *evmAccountBalanceManager } -func newLoomStateDB(abm *evmAccountBalanceManager, sdb *state.StateDB) (*LoomStateDB, error) { +func newLoomStateDB(abm *evmAccountBalanceManager, root common.Hash, db state.Database) (*LoomStateDB, error) { + sdb, err := state.New(root, db) + if err != nil { + return nil, err + } return &LoomStateDB{ StateDB: sdb, abm: abm, diff --git a/store/loomethdb.go b/evm/loomethdb.go similarity index 51% rename from store/loomethdb.go rename to evm/loomethdb.go index edf7faaa25..9d5024f333 100644 --- a/store/loomethdb.go +++ b/evm/loomethdb.go @@ -1,94 +1,89 @@ -package store +// +build evm + +package evm import ( "bytes" "log" "os" "sort" + "sync" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethdb" - loom "github.com/loomnetwork/go-loom" - "github.com/loomnetwork/go-loom/util" - dbm "github.com/tendermint/tendermint/libs/db" + "github.com/loomnetwork/loomchain" + "github.com/loomnetwork/loomchain/store" ) var ( - LogEthDBBatch = true + LogEthDbBatch = true logger log.Logger loggerStarted = false ) -// EthDBLogContext provides additional context when -type EthDBLogContext struct { - blockHeight int64 - contractAddr loom.Address - callerAddr loom.Address -} - -func NewEthDBLogContext(height int64, contractAddr loom.Address, callerAddr loom.Address) *EthDBLogContext { - return &EthDBLogContext{ - blockHeight: height, - contractAddr: contractAddr, - callerAddr: callerAddr, - } -} - -// LoomEthDB implements ethdb.Database -type LoomEthDB struct { - store *EvmStore +// implements ethdb.Database +type LoomEthdb struct { + state store.KVStore + lock sync.RWMutex + logContext *ethdbLogContext } -func NewLoomEthDB(evmStore *EvmStore) *LoomEthDB { - return &LoomEthDB{ - store: evmStore, - } +func NewLoomEthdb(_state loomchain.State, logContext *ethdbLogContext) *LoomEthdb { + p := new(LoomEthdb) + p.state = store.PrefixKVStore(vmPrefix, _state) + p.logContext = logContext + return p } -func (s *LoomEthDB) Put(key []byte, value []byte) error { - s.store.Set(util.PrefixKey(vmPrefix, key), value) +func (s *LoomEthdb) Put(key []byte, value []byte) error { + s.state.Set(key, value) return nil } -func (s *LoomEthDB) Get(key []byte) ([]byte, error) { - return s.store.Get(util.PrefixKey(vmPrefix, key)), nil +func (s *LoomEthdb) Get(key []byte) ([]byte, error) { + return s.state.Get(key), nil } -func (s *LoomEthDB) Has(key []byte) (bool, error) { - return s.store.Has(util.PrefixKey(vmPrefix, key)), nil +func (s *LoomEthdb) Has(key []byte) (bool, error) { + return s.state.Has(key), nil } -func (s *LoomEthDB) Delete(key []byte) error { - s.store.Delete(util.PrefixKey(vmPrefix, key)) +func (s *LoomEthdb) Delete(key []byte) error { + s.state.Delete(key) return nil } -func (s *LoomEthDB) Close() { +func (s *LoomEthdb) Close() { } -func (s *LoomEthDB) NewBatch() ethdb.Batch { - if LogEthDBBatch { - return s.NewLogBatch(nil) +func (s *LoomEthdb) NewBatch() ethdb.Batch { + if LogEthDbBatch { + return s.NewLogBatch(s.logContext) + } else { + newBatch := new(batch) + newBatch.parentStore = s + newBatch.Reset() + return newBatch } - return newBatch(s.store) } // implements ethdb.Batch -type batch struct { - dbBatch dbm.Batch - parentStore *EvmStore - size int +type kvPair struct { + key []byte + value []byte } -func newBatch(store *EvmStore) *batch { - return &batch{ - dbBatch: store.NewBatch(), - parentStore: store, - } +type batch struct { + cache []kvPair + parentStore *LoomEthdb + size int } func (b *batch) Put(key, value []byte) error { - b.dbBatch.Set(util.PrefixKey(vmPrefix, key), value) + b.cache = append(b.cache, kvPair{ + key: common.CopyBytes(key), + value: common.CopyBytes(value), + }) b.size += len(value) return nil } @@ -98,22 +93,46 @@ func (b *batch) ValueSize() int { } func (b *batch) Write() error { - b.dbBatch.Write() + b.parentStore.lock.Lock() + defer b.parentStore.lock.Unlock() + + sort.Slice(b.cache, func(j, k int) bool { + return bytes.Compare(b.cache[j].key, b.cache[k].key) < 0 + }) + + for _, kv := range b.cache { + if kv.value == nil { + b.parentStore.Delete(kv.key) + } else { + b.parentStore.Put(kv.key, kv.value) + } + } return nil } func (b *batch) Reset() { - b.dbBatch.Close() - b.dbBatch = b.parentStore.NewBatch() + b.cache = make([]kvPair, 0) b.size = 0 } func (b *batch) Delete(key []byte) error { - b.dbBatch.Delete(util.PrefixKey(vmPrefix, key)) + b.cache = append(b.cache, kvPair{ + key: common.CopyBytes(key), + value: nil, + }) return nil } -type EthDBLogParams struct { +func (b *batch) Dump(logger *log.Logger) { + b.parentStore.lock.Lock() + defer b.parentStore.lock.Unlock() + logger.Print("\n---- BATCH DUMP ----\n") + for i, kv := range b.cache { + logger.Printf("IDX %d, KEY %s\n", i, kv.key) + } +} + +type LogParams struct { LogFilename string LogFlags int LogReset bool @@ -127,15 +146,9 @@ type EthDBLogParams struct { LogBeforeWriteDump bool } -type kvPair struct { - key []byte - value []byte -} type LogBatch struct { - parentStore *EvmStore - size int - params EthDBLogParams - cache []kvPair + batch batch + params LogParams } const batchHeaderWithContext = ` @@ -157,28 +170,29 @@ const batchHeader = ` ` -func (s *LoomEthDB) NewLogBatch(logContext *EthDBLogContext) ethdb.Batch { - b := &LogBatch{ - parentStore: s.store, - params: EthDBLogParams{ - LogFilename: "ethdb-batch.log", - LogFlags: 0, - LogReset: true, - LogDelete: true, - LogWrite: true, - LogValueSize: false, - LogPutKey: true, - LogPutValue: false, - LogPutDump: false, - LogWriteDump: true, - LogBeforeWriteDump: false, - }, +func (s *LoomEthdb) NewLogBatch(logContext *ethdbLogContext) ethdb.Batch { + b := new(LogBatch) + b.batch = *new(batch) + b.batch.parentStore = s + b.batch.Reset() + b.params = LogParams{ + LogFilename: "ethdb-batch.log", + LogFlags: 0, + LogReset: true, + LogDelete: true, + LogWrite: true, + LogValueSize: false, + LogPutKey: true, + LogPutValue: false, + LogPutDump: false, + LogWriteDump: true, + LogBeforeWriteDump: false, } if !loggerStarted { file, err := os.Create(b.params.LogFilename) if err != nil { - panic(err) + return &b.batch } logger = *log.New(file, "", b.params.LogFlags) logger.Println("Created ethdb batch logger") @@ -196,11 +210,7 @@ func (b *LogBatch) Delete(key []byte) error { if b.params.LogDelete { logger.Println("Delete key: ", string(key)) } - b.cache = append(b.cache, kvPair{ - key: common.CopyBytes(key), - value: nil, - }) - return nil + return b.batch.Delete(key) } func (b *LogBatch) Put(key, value []byte) error { @@ -210,22 +220,19 @@ func (b *LogBatch) Put(key, value []byte) error { if b.params.LogPutValue { logger.Println("Put value: ", string(value)) } - b.cache = append(b.cache, kvPair{ - key: common.CopyBytes(key), - value: common.CopyBytes(value), - }) - b.size += len(value) + err := b.batch.Put(key, value) if b.params.LogPutDump { - b.Dump(&logger) + b.batch.Dump(&logger) } - return nil + return err } func (b *LogBatch) ValueSize() int { + size := b.batch.ValueSize() if b.params.LogValueSize { - logger.Println("ValueSize : ", b.size) + logger.Println("ValueSize : ", size) } - return b.size + return size } func (b *LogBatch) Write() error { @@ -234,41 +241,39 @@ func (b *LogBatch) Write() error { } if b.params.LogBeforeWriteDump { logger.Println("Write, before : ") - b.Dump(&logger) + b.batch.Dump(&logger) } - - sort.Slice(b.cache, func(j, k int) bool { - return bytes.Compare(b.cache[j].key, b.cache[k].key) < 0 - }) - - dbBatch := b.parentStore.NewBatch() - for _, kv := range b.cache { - if kv.value == nil { - dbBatch.Delete(util.PrefixKey(vmPrefix, kv.key)) - } else { - dbBatch.Set(util.PrefixKey(vmPrefix, kv.key), kv.value) - } - } - dbBatch.Write() - + err := b.batch.Write() if b.params.LogWriteDump { logger.Println("Write, after : ") - b.Dump(&logger) + b.batch.Dump(&logger) } - return nil + return err } func (b *LogBatch) Reset() { if b.params.LogReset { logger.Println("Reset batch") } - b.cache = make([]kvPair, 0) - b.size = 0 + b.batch.Reset() } -func (b *LogBatch) Dump(logger *log.Logger) { - logger.Print("\n---- BATCH DUMP ----\n") - for i, kv := range b.cache { - logger.Printf("IDX %d, KEY %s\n", i, kv.key) +// sortKeys sorts prefixed keys, it will sort the postfix of the key in ascending lexographical order +func sortKeys(prefix []byte, kvs []kvPair) []kvPair { + var unsorted, sorted []int + var tmpKv []kvPair + for i, kv := range kvs { + if 0 == bytes.Compare(prefix, kv.key[:len(prefix)]) { + unsorted = append(unsorted, i) + sorted = append(sorted, i) + } + tmpKv = append(tmpKv, kv) + } + sort.Slice(sorted, func(j, k int) bool { + return bytes.Compare(kvs[sorted[j]].key, kvs[sorted[k]].key) < 0 + }) + for index := 0; index < len(sorted); index++ { + kvs[unsorted[index]] = tmpKv[sorted[index]] } + return kvs } diff --git a/evm/loomethdb_test.go b/evm/loomethdb_test.go new file mode 100644 index 0000000000..2fbab564c8 --- /dev/null +++ b/evm/loomethdb_test.go @@ -0,0 +1,128 @@ +// +build evm + +package evm + +import ( + "bytes" + "sort" + "testing" + + "github.com/stretchr/testify/require" +) + +// This test only verifies running a sort twice gives same result +func TestSortKeys(t *testing.T) { + test1 := []kvPair{ + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixAnne"), []byte("data6")}, + } + test1 = sortKeys([]byte("prefix"), test1) + + test2 := []kvPair{ + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("prefixAnne"), []byte("data6")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixFred"), []byte("data1")}, + } + + test2 = sortKeys([]byte("prefix"), test2) + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } +} + +// This test verifies that prefixed items are sorted by ascending order +func TestSortKeys2(t *testing.T) { + test1 := []kvPair{ + {[]byte("prefixSally"), []byte("data4")}, + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixAnne"), []byte("data6")}, + } + test1 = sortKeys([]byte("prefix"), test1) + + test2 := []kvPair{ + {[]byte("prefixAnne"), []byte("data6")}, + {[]byte("prefixFred"), []byte("data1")}, + {[]byte("noPrefixMary"), []byte("data2")}, + {[]byte("noPrefixJohn"), []byte("data3")}, + {[]byte("noPrefixBob"), []byte("data5")}, + {[]byte("prefixSally"), []byte("data4")}, + } + + for i := 0; i < len(test1); i++ { + require.Equal(t, string(test2[i].key), string(test1[i].key)) + } +} + +// Real life example +func TestSortSecureKeys(t *testing.T) { + test1 := []kvPair{ + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + test1 = sortKeys([]byte("secure-key-"), test1) + + test2 := []kvPair{ + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + + test2 = sortKeys([]byte("secure-key-"), test2) + + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } +} + +func TestSortBarch(t *testing.T) { + test1 := []kvPair{ + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + sort.Slice(test1, func(j, k int) bool { + return bytes.Compare(test1[j].key, test1[k].key) < 0 + }) + + test2 := []kvPair{ + {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, + {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, + {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, + {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, + {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, + {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, + {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, + } + + sort.Slice(test2, func(j, k int) bool { + return bytes.Compare(test2[j].key, test2[k].key) < 0 + }) + + for i := 0; i < len(test1); i++ { + require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) + } + +} diff --git a/evm/loomevm.go b/evm/loomevm.go index 1e39d33f11..3d3ae3ff3f 100644 --- a/evm/loomevm.go +++ b/evm/loomevm.go @@ -22,7 +22,6 @@ import ( "github.com/loomnetwork/loomchain/features" "github.com/loomnetwork/loomchain/receipts" "github.com/loomnetwork/loomchain/receipts/handler" - "github.com/loomnetwork/loomchain/store" "github.com/loomnetwork/loomchain/vm" "github.com/pkg/errors" ) @@ -39,6 +38,12 @@ type StateDB interface { Commit(bool) (common.Hash, error) } +type ethdbLogContext struct { + blockHeight int64 + contractAddr loom.Address + callerAddr loom.Address +} + // TODO: this doesn't need to be exported, rename to loomEvmWithState type LoomEvm struct { *Evm @@ -49,24 +54,44 @@ type LoomEvm struct { // TODO: this doesn't need to be exported, rename to newLoomEvmWithState func NewLoomEvm( loomState loomchain.State, accountBalanceManager AccountBalanceManager, - logContext *store.EthDBLogContext, debug bool, + logContext *ethdbLogContext, debug bool, ) (*LoomEvm, error) { - p := &LoomEvm{} + p := new(LoomEvm) + p.db = NewLoomEthdb(loomState, logContext) + oldRoot, err := p.db.Get(rootKey) + if err != nil { + return nil, err + } + var abm *evmAccountBalanceManager - var err error if accountBalanceManager != nil { abm = newEVMAccountBalanceManager(accountBalanceManager, loomState.Block().ChainID) - p.sdb, err = newLoomStateDB(abm, loomState.EVMState().StateDB()) - if err != nil { - return nil, err - } + p.sdb, err = newLoomStateDB(abm, common.BytesToHash(oldRoot), state.NewDatabase(p.db)) } else { - p.sdb = loomState.EVMState().StateDB() + p.sdb, err = state.New(common.BytesToHash(oldRoot), state.NewDatabase(p.db)) + } + if err != nil { + return nil, err } + p.Evm = NewEvm(p.sdb, loomState, abm, debug) return p, nil } +func (levm LoomEvm) Commit() (common.Hash, error) { + root, err := levm.sdb.Commit(true) + if err != nil { + return root, err + } + if err := levm.sdb.Database().TrieDB().Commit(root, false); err != nil { + return root, err + } + if err := levm.db.Put(rootKey, root[:]); err != nil { + return root, err + } + return root, err +} + func (levm LoomEvm) RawDump() []byte { d := levm.sdb.RawDump() output, err := json.MarshalIndent(d, "", " ") @@ -121,31 +146,26 @@ func (lvm LoomVm) accountBalanceManager(readOnly bool) AccountBalanceManager { } func (lvm LoomVm) Create(caller loom.Address, code []byte, value *loom.BigUInt) ([]byte, loom.Address, error) { - logContext := store.NewEthDBLogContext(lvm.state.Block().Height, loom.Address{}, caller) + logContext := ðdbLogContext{ + blockHeight: lvm.state.Block().Height, + contractAddr: loom.Address{}, + callerAddr: caller, + } levm, err := NewLoomEvm(lvm.state, lvm.accountBalanceManager(false), logContext, lvm.debug) if err != nil { return nil, loom.Address{}, err } - stateDB := levm.sdb - lastLogsIndex := len(stateDB.Logs()) - // evm.Create changes Nonce even though tx fails - // To prevent any state change from error tx, create a snapshot and revert EVM state if tx fails - snapshot := stateDB.Snapshot() bytecode, addr, err := levm.Create(caller, code, value) - if err != nil { - stateDB.RevertToSnapshot(snapshot) + if err == nil { + _, err = levm.Commit() } var txHash []byte if lvm.receiptHandler != nil { var events []*ptypes.EventData if err == nil { - addedLogs := stateDB.Logs() - if len(addedLogs) > 0 { - addedLogs = addedLogs[lastLogsIndex:] - } events = lvm.receiptHandler.GetEventsFromLogs( - addedLogs, lvm.state.Block().Height, caller, addr, code, + levm.sdb.Logs(), lvm.state.Block().Height, caller, addr, code, ) } @@ -189,30 +209,26 @@ func (lvm LoomVm) Create(caller loom.Address, code []byte, value *loom.BigUInt) } func (lvm LoomVm) Call(caller, addr loom.Address, input []byte, value *loom.BigUInt) ([]byte, error) { - logContext := store.NewEthDBLogContext(lvm.state.Block().Height, addr, caller) + logContext := ðdbLogContext{ + blockHeight: lvm.state.Block().Height, + contractAddr: addr, + callerAddr: caller, + } levm, err := NewLoomEvm(lvm.state, lvm.accountBalanceManager(false), logContext, lvm.debug) if err != nil { return nil, err } - stateDB := levm.sdb - lastLogsIndex := len(stateDB.Logs()) - // To prevent any state change from error tx, create a snapshot and revert EVM state if tx fails - snapshot := stateDB.Snapshot() _, err = levm.Call(caller, addr, input, value) - if err != nil { - stateDB.RevertToSnapshot(snapshot) + if err == nil { + _, err = levm.Commit() } var txHash []byte if lvm.receiptHandler != nil { var events []*ptypes.EventData if err == nil { - addedLogs := stateDB.Logs() - if len(addedLogs) > 0 { - addedLogs = addedLogs[lastLogsIndex:] - } events = lvm.receiptHandler.GetEventsFromLogs( - addedLogs, lvm.state.Block().Height, caller, addr, input, + levm.sdb.Logs(), lvm.state.Block().Height, caller, addr, input, ) } diff --git a/evm/test_cryptozombies.go b/evm/test_cryptozombies.go index d30080b2cd..d0bf1bb02c 100644 --- a/evm/test_cryptozombies.go +++ b/evm/test_cryptozombies.go @@ -82,9 +82,15 @@ func testCryptoZombiesUpdateState(t *testing.T, state loomchain.State, caller lo vm, _ := manager.InitVM(lvm.VMType_PLUGIN, state) kittyAddr := deployContract(t, vm, motherKat, kittyData.Bytecode, kittyData.RuntimeBytecode) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) zOwnershipAddr := deployContract(t, vm, caller, zOwnershipData.Bytecode, zOwnershipData.RuntimeBytecode) + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) checkKitty(t, vm, caller, kittyAddr, kittyData) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) makeZombie(t, vm, caller, zOwnershipAddr, zOwnershipData, "EEK") + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) greedyZombie := getZombies(t, vm, caller, zOwnershipAddr, zOwnershipData, 0) // greedy zombie should look like: //{ @@ -101,8 +107,12 @@ func testCryptoZombiesUpdateState(t *testing.T, state loomchain.State, caller lo t.Error("Wrong dna for greedy zombie") } + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) setKittyAddress(t, vm, caller, kittyAddr, zOwnershipAddr, zOwnershipData) + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) zombieFeed(t, vm, caller, zOwnershipAddr, zOwnershipData, 0, 67) + + vm, _ = manager.InitVM(lvm.VMType_PLUGIN, state) newZombie := getZombies(t, vm, caller, zOwnershipAddr, zOwnershipData, 1) // New zombie should look like //{ diff --git a/evm_state.go b/evm_state.go deleted file mode 100644 index a5f143a569..0000000000 --- a/evm_state.go +++ /dev/null @@ -1,86 +0,0 @@ -package loomchain - -import ( - "bytes" - "fmt" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/state" - "github.com/loomnetwork/loomchain/store" -) - -// EVMState contains the mutable EVM state. -type EVMState struct { - sdb *state.StateDB - evmStore *store.EvmStore -} - -// NewEVMState returns the EVM state corresponding to the current version of the given store. -func NewEVMState(evmStore *store.EvmStore) (*EVMState, error) { - evmRoot, _ := evmStore.Version() - sdb, err := state.New(common.BytesToHash(evmRoot), state.NewDatabaseWithTrieDB(evmStore.TrieDB())) - if err != nil { - return nil, err - } - return &EVMState{ - evmStore: evmStore, - sdb: sdb, - }, nil -} - -// Commit writes the state changes that occurred since the previous commit to the underlying store. -func (s *EVMState) Commit() error { - if s.evmStore == nil { - panic("EvmStore is nil") - } - evmStateRoot, err := s.sdb.Commit(true) - if err != nil { - return err - } - s.evmStore.SetCurrentRoot(evmStateRoot[:]) - // Clear out old state data such as logs and cache to free up memory - s.sdb.Reset(evmStateRoot) - return nil -} - -// GetSnapshot returns the EVMState instance containing the state as it was at the given version. -// The specified root is expected to match the root of the returned state, if the roots don't match -// an error will be returned. -// NOTE: Do not call Commit on the returned instance. -func (s *EVMState) GetSnapshot(version int64, root []byte) (*EVMState, error) { - r, v := s.evmStore.GetRootAt(version) - if !bytes.Equal(r, root) { - return nil, fmt.Errorf( - "EVM roots mismatch, expected (%d): %X, actual (%d): %X", - version, root, v, r, - ) - } - // The cachingDB instance created by state.NewDatabaseWithTrieDB() contains a codeSizeCache which - // probably shouldn't be shared between the EVMState instance used by the tx handlers and the - // snapshots instances used by the query server. Which is why NewDatabaseWithTrieDB() is used - // here instead of s.sdb.Database(). - sdb, err := state.New( - common.BytesToHash(r), - state.NewDatabaseWithTrieDB(s.evmStore.TrieDB()), - ) - if err != nil { - return nil, err - } - return &EVMState{ - evmStore: nil, // this will ensure that Commit() will panic - sdb: sdb, - }, nil -} - -// Clone returns a copy of the EVMState instance. -// NOTE: Do not call Commit on the returned instance. -func (s *EVMState) Clone() *EVMState { - return &EVMState{ - evmStore: nil, // this will ensure that Commit() will panic - sdb: s.sdb.Copy(), - } -} - -func (s *EVMState) StateDB() *state.StateDB { - return s.sdb -} diff --git a/plugin/fake_context.go b/plugin/fake_context.go index 2db97990ee..84ddb384fe 100644 --- a/plugin/fake_context.go +++ b/plugin/fake_context.go @@ -10,9 +10,7 @@ import ( "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/types" "github.com/loomnetwork/loomchain" - cdb "github.com/loomnetwork/loomchain/db" levm "github.com/loomnetwork/loomchain/evm" - "github.com/loomnetwork/loomchain/store" abci "github.com/tendermint/tendermint/abci/types" ) @@ -37,18 +35,9 @@ func CreateFakeContextWithEVM(caller, address loom.Address) *FakeContextWithEVM }, ) state := loomchain.NewStoreState(context.Background(), ctx, block, nil, nil) - evmDB, err := cdb.LoadDB("memdb", "", "", 256, 4, false) - if err != nil { - panic(err) - } - evmStore := store.NewEvmStore(evmDB, 100, 0) - evmState, err := loomchain.NewEVMState(evmStore) - if err != nil { - panic(err) - } return &FakeContextWithEVM{ FakeContext: ctx, - State: state.WithEVMState(evmState), + State: state, } } diff --git a/plugin/vm_test.go b/plugin/vm_test.go index 151117ae61..f089fab0e3 100644 --- a/plugin/vm_test.go +++ b/plugin/vm_test.go @@ -19,7 +19,6 @@ import ( ptypes "github.com/loomnetwork/go-loom/plugin/types" "github.com/loomnetwork/go-loom/testdata" "github.com/loomnetwork/loomchain" - "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/eth/subs" "github.com/loomnetwork/loomchain/events" levm "github.com/loomnetwork/loomchain/evm" @@ -123,15 +122,6 @@ func (c *VMTestContract) CheckQueryCaller(ctx contract.StaticContext, args *test return &testdata.StaticCallResult{}, nil } -func mockEVMState() *loomchain.EVMState { - memDb, _ := db.LoadMemDB() - evmState, err := loomchain.NewEVMState(store.NewEvmStore(memDb, 100, 0)) - if err != nil { - panic(err) - } - return evmState -} - func TestPluginVMContractContextCaller(t *testing.T) { fc1 := &VMTestContract{t: t, Name: "fakecontract1"} @@ -147,7 +137,7 @@ func TestPluginVMContractContextCaller(t *testing.T) { Height: int64(34), Time: time.Unix(123456789, 0), } - state := loomchain.NewStoreState(context.Background(), store.NewMemStore(), block, nil, nil).WithEVMState(mockEVMState()) + state := loomchain.NewStoreState(context.Background(), store.NewMemStore(), block, nil, nil) createRegistry, err := registry.NewRegistryFactory(registry.LatestRegistryVersion) require.NoError(t, err) diff --git a/store/evmstore.go b/store/evmstore.go index 5f0d5a5155..cc01297bd3 100644 --- a/store/evmstore.go +++ b/store/evmstore.go @@ -6,8 +6,6 @@ import ( "sort" "time" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/trie" "github.com/go-kit/kit/metrics" kitprometheus "github.com/go-kit/kit/metrics/prometheus" lru "github.com/hashicorp/golang-lru" @@ -16,7 +14,6 @@ import ( "github.com/loomnetwork/loomchain/db" "github.com/pkg/errors" stdprometheus "github.com/prometheus/client_golang/prometheus" - dbm "github.com/tendermint/tendermint/libs/db" ) var ( @@ -56,32 +53,32 @@ func getVersionFromEvmRootKey(key []byte) (int64, error) { // EvmStore persists EVM state to a DB. type EvmStore struct { evmDB db.DBWrapper + cache map[string]cacheItem rootHash []byte lastSavedRoot []byte rootCache *lru.Cache version int64 - trieDB *trie.Database - flushInterval int64 } // NewEvmStore returns a new instance of the store backed by the given DB. -func NewEvmStore(evmDB db.DBWrapper, numCachedRoots int, flushInterval int64) *EvmStore { +func NewEvmStore(evmDB db.DBWrapper, numCachedRoots int) *EvmStore { rootCache, err := lru.New(numCachedRoots) if err != nil { panic(err) } evmStore := &EvmStore{ - evmDB: evmDB, - rootCache: rootCache, - flushInterval: flushInterval, + evmDB: evmDB, + cache: make(map[string]cacheItem), + rootCache: rootCache, } - ethDB := NewLoomEthDB(evmStore) - evmStore.trieDB = trie.NewDatabase(ethDB) return evmStore } -func (s *EvmStore) NewBatch() dbm.Batch { - return s.evmDB.NewBatch() +func (s *EvmStore) setCache(key, val []byte, deleted bool) { + s.cache[string(key)] = cacheItem{ + Value: val, + Deleted: deleted, + } } // Range iterates in-order over the keys in the store prefixed by the given prefix. @@ -105,6 +102,21 @@ func (s *EvmStore) Range(prefix []byte) plugin.RangeData { } } + // Update range cache with data in cache + for key, c := range s.cache { + if util.HasPrefix([]byte(key), prefix) || len(prefix) == 0 { + if c.Deleted { + rangeCacheKeys = remove(rangeCacheKeys, key) + rangeCache[key] = nil + continue + } + if _, ok := rangeCache[key]; !ok { + rangeCacheKeys = append(rangeCacheKeys, string(key)) + } + rangeCache[key] = c.Value + } + } + // Make Range return root hash (vmvmroot) from EvmStore.rootHash if _, exist := rangeCache[string(rootHashKey)]; exist { rangeCache[string(rootHashKey)] = s.rootHash @@ -133,22 +145,42 @@ func (s *EvmStore) Range(prefix []byte) plugin.RangeData { return ret } -// TODO: Range/Has/Get/Delete/Set are probably only called from the MultiWriterAppStore which -// doesn't need to do so anymore, remove these functions when MultiWriterAppStore is cleaned up. func (s *EvmStore) Has(key []byte) bool { + // EvmStore always has Patricia root + if bytes.Equal(key, rootHashKey) { + return true + } + if item, ok := s.cache[string(key)]; ok { + return !item.Deleted + } return s.evmDB.Has(key) } func (s *EvmStore) Get(key []byte) []byte { + if bytes.Equal(key, rootHashKey) { + return s.rootHash + } + + if item, ok := s.cache[string(key)]; ok { + return item.Value + } return s.evmDB.Get(key) } func (s *EvmStore) Delete(key []byte) { - s.evmDB.Delete(key) + if bytes.Equal(key, rootHashKey) { + s.rootHash = nil + } else { + s.setCache(key, nil, true) + } } func (s *EvmStore) Set(key, val []byte) { - s.evmDB.Set(key, val) + if bytes.Equal(key, rootHashKey) { + s.rootHash = val + } else { + s.setCache(key, val, false) + } } func (s *EvmStore) Commit(version int64) []byte { @@ -162,50 +194,30 @@ func (s *EvmStore) Commit(version int64) []byte { if bytes.Equal(currentRoot, []byte{}) { currentRoot = defaultRoot } - - flushInterval := s.flushInterval - - // TODO: Rather than loading the on-chain config here the flush interval override should be passed - // in as a parameter to SaveVersion(). - if flushInterval == 0 { - cfg, err := LoadOnChainConfig(s) - if err != nil { - panic(errors.Wrap(err, "failed to load on-chain config")) - } - if cfg.GetAppStore().GetIAVLFlushInterval() != 0 { - flushInterval = int64(cfg.GetAppStore().GetIAVLFlushInterval()) - } - } else if flushInterval == -1 { - flushInterval = 0 + // save Patricia root of EVM state only if it changes + if !bytes.Equal(currentRoot, s.lastSavedRoot) { + s.Set(evmRootKey(version), currentRoot) } - // Only commit Patricia tree every N blocks - // TODO: What happens to all the roots that don't get committed? Are they just going to accumulate - // in the trie.Database.nodes cache forever? - if flushInterval == 0 || version%flushInterval == 0 { - // If the root hasn't changed since the last call to Commit that means no new state changes - // occurred in the trie DB since then, so we can skip committing. - if !bytes.Equal(defaultRoot, currentRoot) && !bytes.Equal(currentRoot, s.lastSavedRoot) { - // trie.Database.Commit will call NewBatch (indirectly) to batch writes to evmDB - if err := s.trieDB.Commit(common.BytesToHash(currentRoot), false); err != nil { - panic(err) - } - } + s.rootCache.Add(version, currentRoot) - // We don't commit empty root but we need to save default root ([]byte{1}) as a placeholder of empty root - // So the node won't get EVM root mismatch during the EVM root checking - if !bytes.Equal(currentRoot, s.lastSavedRoot) { - s.Set(evmRootKey(version), currentRoot) - s.lastSavedRoot = currentRoot + batch := s.evmDB.NewBatch() + for key, item := range s.cache { + if !item.Deleted { + batch.Set([]byte(key), item.Value) + } else { + batch.Delete([]byte(key)) } } - - s.rootCache.Add(version, currentRoot) + batch.Write() + s.cache = make(map[string]cacheItem) + s.lastSavedRoot = currentRoot s.version = version return currentRoot } func (s *EvmStore) LoadVersion(targetVersion int64) error { + s.cache = make(map[string]cacheItem) // find the last saved root root, version := s.getLastSavedRoot(targetVersion) if bytes.Equal(root, defaultRoot) { @@ -228,20 +240,6 @@ func (s *EvmStore) Version() ([]byte, int64) { return s.rootHash, s.version } -func (s *EvmStore) TrieDB() *trie.Database { - return s.trieDB -} - -// SetCurrentRoot sets the current EVM state root, this root must exist in the current trie DB. -// NOTE: This function must be called prior to each call to Commit. -// TODO: This is clunky, the root should just be passed into Commit! -func (s *EvmStore) SetCurrentRoot(root []byte) { - s.rootHash = root -} - -// getLastSavedRoot retrieves the EVM state root from disk that best matches the given version. -// The roots are not written to disk for every version, they only get written out when they change -// between versions, and even then depending on the flush interval some roots won't be written to disk. func (s *EvmStore) getLastSavedRoot(targetVersion int64) ([]byte, int64) { start := util.PrefixKey(vmPrefix, evmRootPrefix) end := prefixRangeEnd(evmRootKey(targetVersion)) @@ -259,31 +257,18 @@ func (s *EvmStore) getLastSavedRoot(targetVersion int64) ([]byte, int64) { return nil, 0 } -// GetRootAt returns the EVM state root corresponding to the given version. -// The second return value is version of the EVM state that corresponds to the returned root, -// it may be less than the version requested due to the reasons mentioned in getLastSavedRoot. -func (s *EvmStore) GetRootAt(version int64) ([]byte, int64) { - // Expect cache to be almost 100% hit since cache miss yields extremely poor performance. - // There's an assumption here that the cache will almost always contain all the in-mem-only - // roots that haven't been flushed to disk yet, in the rare case where such a root is evicted - // from the cache the last root persisted to disk will be returned instead. This means it's - // possible (though highly unlikely) for queries to return stale state (since they rely on - // snapshots corresponding to specific versions). This could be fixed by storing the in-mem-only - // roots in another map instead of, or in addition to the cache. +func (s *EvmStore) GetSnapshot(version int64) db.Snapshot { + var targetRoot []byte + // Expect cache to be almost 100% hit since cache miss yields extremely poor performance val, exist := s.rootCache.Get(version) if exist { - return val.([]byte), version + targetRoot = val.([]byte) + } else { + targetRoot, _ = s.getLastSavedRoot(version) } - return s.getLastSavedRoot(version) -} - -// TODO: Get rid of this function. EvmStore does not provide snapshot anymore but EVMState does. -func (s *EvmStore) GetSnapshot(version int64) *EvmStoreSnapshot { - root, _ := s.GetRootAt(version) - return NewEvmStoreSnapshot(s.evmDB.GetSnapshot(), root) + return NewEvmStoreSnapshot(s.evmDB.GetSnapshot(), targetRoot) } -// TODO: Get rid of EvmStoreSnapshot. EvmStore does not provide snapshot anymore but EVMState does. func NewEvmStoreSnapshot(snapshot db.Snapshot, rootHash []byte) *EvmStoreSnapshot { return &EvmStoreSnapshot{ Snapshot: snapshot, diff --git a/store/evmstore_test.go b/store/evmstore_test.go index ebb6bc1682..b141051195 100644 --- a/store/evmstore_test.go +++ b/store/evmstore_test.go @@ -24,7 +24,7 @@ func (t *EvmStoreTestSuite) TestEvmStoreRangeAndCommit() { require := t.Require() evmDb, err := db.LoadMemDB() require.NoError(err) - evmStore := NewEvmStore(evmDb, 100, 0) + evmStore := NewEvmStore(evmDb, 100) for i := 0; i <= 100; i++ { key := []byte(fmt.Sprintf("Key%d", i)) evmStore.Set(key, key) @@ -43,7 +43,7 @@ func (t *EvmStoreTestSuite) TestEvmStoreRangeAndCommit() { evmStore.Set([]byte("SSSSS"), []byte("SSSSS")) evmStore.Set([]byte("vvvvv"), []byte("vvv")) dataRange = evmStore.Range(nil) - require.Equal(106+1, len(dataRange)) // +1 default evm root key + require.Equal(107, len(dataRange)) evmStore.Commit(2) evmStore.Set([]byte("SSSSS"), []byte("S1")) ret := evmStore.Get([]byte("SSSSS")) @@ -51,12 +51,12 @@ func (t *EvmStoreTestSuite) TestEvmStoreRangeAndCommit() { evmStore.Delete([]byte("SSSSS")) evmStore.Delete([]byte("hello1")) dataRange = evmStore.Range(nil) - require.Equal(104+1, len(dataRange)) // +1 default evm root key + require.Equal(105, len(dataRange)) evmStore.Commit(3) evmStore.Delete([]byte("SSSSS")) evmStore.Delete([]byte("hello1")) dataRange = evmStore.Range(nil) - require.Equal(104+1, len(dataRange)) // +1 default evm root key + require.Equal(105, len(dataRange)) } func (t *EvmStoreTestSuite) TestEvmStoreBasicMethods() { @@ -64,7 +64,7 @@ func (t *EvmStoreTestSuite) TestEvmStoreBasicMethods() { // Test Get|Set|Has|Delete methods evmDb, err := db.LoadMemDB() require.NoError(err) - evmStore := NewEvmStore(evmDb, 100, 0) + evmStore := NewEvmStore(evmDb, 100) key1 := []byte("hello") key2 := []byte("hello2") value1 := []byte("world") @@ -92,7 +92,7 @@ func (t *EvmStoreTestSuite) TestEvmStoreRangePrefix() { // Test Range Prefix evmDb, err := db.LoadMemDB() require.NoError(err) - evmStore := NewEvmStore(evmDb, 100, 0) + evmStore := NewEvmStore(evmDb, 100) for i := 0; i <= 100; i++ { key := []byte(fmt.Sprintf("Key%d", i)) evmStore.Set(key, key) @@ -143,7 +143,7 @@ func (t *EvmStoreTestSuite) TestLoadVersionEvmStore() { evmDb.Set(evmRootKey(100), []byte{100}) evmDb.Set(evmRootKey(200), []byte{200}) - evmStore := NewEvmStore(evmDb, 100, 0) + evmStore := NewEvmStore(evmDb, 100) err = evmStore.LoadVersion(500) require.NoError(err) root, version := evmStore.Version() diff --git a/store/iavlstore.go b/store/iavlstore.go index 01b79db71f..253d6f5a00 100755 --- a/store/iavlstore.go +++ b/store/iavlstore.go @@ -216,10 +216,6 @@ func (s *IAVLStore) GetSnapshot() Snapshot { } } -func (s *IAVLStore) GetSnapshotAt(version int64) (Snapshot, error) { - panic("not implemented") -} - // NewIAVLStore creates a new IAVLStore. // maxVersions can be used to specify how many versions should be retained, if set to zero then // old versions will never been deleted. diff --git a/store/logstore.go b/store/logstore.go index 4de507b068..2e14a6b981 100644 --- a/store/logstore.go +++ b/store/logstore.go @@ -129,7 +129,3 @@ func (s *LogStore) Prune() error { func (s *LogStore) GetSnapshot() Snapshot { return s.store.GetSnapshot() } - -func (s *LogStore) GetSnapshotAt(version int64) (Snapshot, error) { - return s.store.GetSnapshotAt(version) -} diff --git a/store/loomethdb_test.go b/store/loomethdb_test.go deleted file mode 100644 index f0ff09cdeb..0000000000 --- a/store/loomethdb_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package store - -import ( - "bytes" - "sort" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestSortBarch(t *testing.T) { - test1 := []kvPair{ - {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, - {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, - {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, - {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, - {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, - {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, - {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, - } - sort.Slice(test1, func(j, k int) bool { - return bytes.Compare(test1[j].key, test1[k].key) < 0 - }) - - test2 := []kvPair{ - {[]byte("secure-key-)\n��T�b��E��8o�K���H@�6/���c"), []byte("data3")}, - {[]byte("secure-key-q�����;� ��Z���'=��ks֝B"), []byte("data1")}, - {[]byte("secure-key-؀&*>�Y��F8I听Qia���SQ�6��f@"), []byte("data2")}, - {[]byte("h����Ntԇ�ב��E��K]}�ɐW��a7��"), []byte("data4")}, - {[]byte("�牔!��FQ���e�8���M˫����ܤ�S"), []byte("data5")}, - {[]byte("�Ka����ͯ>/�� �\tߕ|���}j���<<�"), []byte("data6")}, - {[]byte("-�F�bt����S �A������;BT�b�gF"), []byte("data7")}, - } - - sort.Slice(test2, func(j, k int) bool { - return bytes.Compare(test2[j].key, test2[k].key) < 0 - }) - - for i := 0; i < len(test1); i++ { - require.Equal(t, 0, bytes.Compare(test1[i].key, test2[i].key)) - } - -} diff --git a/store/memstore.go b/store/memstore.go index dfe89e143c..badf02b59f 100644 --- a/store/memstore.go +++ b/store/memstore.go @@ -79,7 +79,3 @@ func (m *MemStore) Prune() error { func (m *MemStore) GetSnapshot() Snapshot { panic("not implemented") } - -func (m *MemStore) GetSnapshotAt(version int64) (Snapshot, error) { - panic("not implemented") -} diff --git a/store/multi_writer_app_store.go b/store/multi_writer_app_store.go index a731a9bfee..598da8a296 100644 --- a/store/multi_writer_app_store.go +++ b/store/multi_writer_app_store.go @@ -81,15 +81,6 @@ func init() { ) } -// GetEVMRootFromAppStore retrieves the current EVM root from the given app store. -func GetEVMRootFromAppStore(s KVReader) []byte { - evmRoot := s.Get(rootKey) - if evmRoot == nil { - return defaultRoot - } - return evmRoot -} - // MultiWriterAppStore reads & writes keys that have the "vm" prefix via both the IAVLStore and the EvmStore, // or just the EvmStore, depending on the evmStoreEnabled flag. type MultiWriterAppStore struct { @@ -117,7 +108,7 @@ func NewMultiWriterAppStore( appStoreEvmRoot = defaultRoot } } - evmStoreEvmRoot, version := store.evmStore.GetRootAt(store.appStore.Version()) + evmStoreEvmRoot, version := store.evmStore.getLastSavedRoot(store.appStore.Version()) if !bytes.Equal(appStoreEvmRoot, evmStoreEvmRoot) { return nil, fmt.Errorf("EVM roots mismatch, evm.db(%d): %X, app.db(%d): %X", version, evmStoreEvmRoot, appStore.Version(), appStoreEvmRoot) @@ -128,9 +119,7 @@ func NewMultiWriterAppStore( store.onlySaveEvmStateToEvmStore = bytes.Equal(store.appStore.Get(evmDBFeatureKey), []byte{1}) } - if err := store.setLastSavedTreeToVersion(appStore.Version()); err != nil { - return nil, err - } + store.setLastSavedTreeToVersion(appStore.Version()) return store, nil } @@ -273,32 +262,12 @@ func (s *MultiWriterAppStore) Prune() error { } func (s *MultiWriterAppStore) GetSnapshot() Snapshot { - snapshot, err := s.GetSnapshotAt(0) - if err != nil { - panic(err) - } - return snapshot -} - -func (s *MultiWriterAppStore) GetSnapshotAt(version int64) (Snapshot, error) { defer func(begin time.Time) { getSnapshotDuration.Observe(time.Since(begin).Seconds()) }(time.Now()) - - var err error - var appStoreTree *iavl.ImmutableTree - if version == 0 { - appStoreTree = (*iavl.ImmutableTree)(atomic.LoadPointer(&s.lastSavedTree)) - } else { - appStoreTree, err = s.appStore.tree.GetImmutable(version) - if err != nil { - return nil, errors.Wrapf(err, "failed to load immutable tree for version %v", version) - } - } - // TODO: It's no longer necessary to acquire a snapshot from the EvmStore since it's now provided - // by the EVMState. + appStoreTree := (*iavl.ImmutableTree)(atomic.LoadPointer(&s.lastSavedTree)) evmDbSnapshot := s.evmStore.GetSnapshot(appStoreTree.Version()) - return newMultiWriterStoreSnapshot(evmDbSnapshot, appStoreTree), nil + return newMultiWriterStoreSnapshot(evmDbSnapshot, appStoreTree) } type multiWriterStoreSnapshot struct { diff --git a/store/multi_writer_app_store_test.go b/store/multi_writer_app_store_test.go index 1e3e9e6a2d..f08d244716 100644 --- a/store/multi_writer_app_store_test.go +++ b/store/multi_writer_app_store_test.go @@ -1,6 +1,7 @@ package store import ( + "bytes" "testing" "github.com/gogo/protobuf/proto" @@ -8,7 +9,6 @@ import ( "github.com/loomnetwork/go-loom/util" "github.com/loomnetwork/loomchain/db" "github.com/loomnetwork/loomchain/log" - "github.com/pkg/errors" "github.com/stretchr/testify/suite" ) @@ -26,7 +26,7 @@ func TestMultiWriterAppStoreTestSuite(t *testing.T) { func (m *MultiWriterAppStoreTestSuite) TestEnableDisableMultiWriterAppStore() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // vm keys should be written to both the IAVL & EVM store @@ -56,7 +56,7 @@ func (m *MultiWriterAppStoreTestSuite) TestEnableDisableMultiWriterAppStore() { func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreDelete() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // vm keys should be written to both the IAVL & EVM store @@ -93,10 +93,43 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreDelete() { require.True(store.Has([]byte("abcd"))) } -func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapshotFlushInterval() { +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShot() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + store.Set(evmDBFeatureKey, []byte{1}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("ssssvvv"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("abcd"), []byte("NewData")) + _, _, err = store.SaveVersion() + require.NoError(err) + + store.Set(vmPrefixKey("abcd"), []byte("hellooooooo")) + store.Set(vmPrefixKey("abcde"), []byte("vvvvvvvvv")) + store.Set([]byte("abcd"), []byte("asdfasdf")) + + snapshot := store.GetSnapshot() + require.Equal([]byte("hello"), snapshot.Get(vmPrefixKey("abcd"))) + require.Equal([]byte("NewData"), snapshot.Get([]byte("abcd"))) + require.Equal([]byte("world"), snapshot.Get(vmPrefixKey("abcde"))) + + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + require.Equal([]byte("asdfasdf"), snapshot.Get([]byte("abcd"))) + require.Equal([]byte("hellooooooo"), snapshot.Get(vmPrefixKey("abcd"))) + require.Equal([]byte("vvvvvvvvv"), snapshot.Get(vmPrefixKey("abcde"))) +} + +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShotFlushInterval() { require := m.Require() // flush data to disk every 2 blocks - store, err := mockMultiWriterStore(2, 2) + store, err := mockMultiWriterStore(2) require.NoError(err) // the first version go to memory @@ -128,9 +161,63 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapshotFlushInter require.Equal([]byte("test2"), snapshotv1.Get([]byte("test2"))) } +func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSnapShotRange() { + require := m.Require() + store, err := mockMultiWriterStore(10) + require.NoError(err) + + store.Set(evmDBFeatureKey, []byte{1}) + store.Set(vmPrefixKey("abcd"), []byte("hello")) + store.Set(vmPrefixKey("abcde"), []byte("world")) + store.Set(vmPrefixKey("evmStore"), []byte("yes")) + store.Set(vmPrefixKey("aaaa"), []byte("yes")) + store.Set([]byte("ssssvvv"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("abcd"), []byte("NewData")) + store.Set([]byte("uuuu"), []byte("SSSSSSSSSSSSS")) + store.Set([]byte("sssss"), []byte("NewData")) + + snapshot := store.GetSnapshot() + rangeData := snapshot.Range(vmPrefix) + require.Equal(0, len(rangeData)) + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(4+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcd")), []byte("hello"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) + + // Modifications shouldn't be visible in the snapshot until the next SaveVersion() + store.Delete(vmPrefixKey("abcd")) + store.Delete([]byte("ssssvvv")) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(4+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcd")), []byte("hello"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) + + _, _, err = store.SaveVersion() + require.NoError(err) + + snapshot = store.GetSnapshot() + rangeData = snapshot.Range(vmPrefix) + require.Equal(3+1, len(rangeData)) // +1 for evm root stored by EVM store + require.Equal(0, len(snapshot.Get(vmPrefixKey("abcd")))) // has been deleted + require.Equal(0, len(snapshot.Get([]byte("ssssvvv")))) // has been deleted + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("abcde")), []byte("world"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("evmStore")), []byte("yes"))) + require.Equal(0, bytes.Compare(snapshot.Get(vmPrefixKey("aaaa")), []byte("yes"))) +} + func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { require := m.Require() - store, err := mockMultiWriterStore(10, -1) + store, err := mockMultiWriterStore(10) require.NoError(err) // vm keys should be written to the EVM store @@ -168,7 +255,7 @@ func (m *MultiWriterAppStoreTestSuite) TestMultiWriterAppStoreSaveVersion() { func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // write some vm keys to iavl store @@ -181,11 +268,8 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { iavlStore.Set(vmPrefixKey("dd"), []byte("yes")) iavlStore.Set(vmPrefixKey("vv"), []byte("yes")) _, version, err := store.SaveVersion() - require.NoError(err) require.Equal(int64(1), version) - require.Equal(version, iavlStore.Version()) - _, evmStoreVer := store.evmStore.Version() - require.Equal(version, evmStoreVer) + require.NoError(err) newStore, err := NewMultiWriterAppStore(iavlStore, store.evmStore, false) require.NoError(err) @@ -232,7 +316,7 @@ func (m *MultiWriterAppStoreTestSuite) TestPruningEvmKeys() { func (m *MultiWriterAppStoreTestSuite) TestIAVLRangeWithlimit() { require := m.Require() - store, err := mockMultiWriterStore(10, 10) + store, err := mockMultiWriterStore(10) require.NoError(err) // write some vm keys to iavl store @@ -252,19 +336,14 @@ func (m *MultiWriterAppStoreTestSuite) TestIAVLRangeWithlimit() { require.Equal(4, len(rangeData)) } -func mockMultiWriterStore(appStoreFlushInterval, evmStoreFlushInterval int64) (*MultiWriterAppStore, error) { - // Using different flush intervals for the app & evm stores is not supported. - if appStoreFlushInterval > 0 && evmStoreFlushInterval > 0 && appStoreFlushInterval != evmStoreFlushInterval { - return nil, errors.New("positive flush intervals must be consistent") - } - +func mockMultiWriterStore(flushInterval int64) (*MultiWriterAppStore, error) { memDb, _ := db.LoadMemDB() - iavlStore, err := NewIAVLStore(memDb, 0, 0, appStoreFlushInterval) + iavlStore, err := NewIAVLStore(memDb, 0, 0, flushInterval) if err != nil { return nil, err } memDb, _ = db.LoadMemDB() - evmStore := NewEvmStore(memDb, 100, evmStoreFlushInterval) + evmStore := NewEvmStore(memDb, 100) multiWriterStore, err := NewMultiWriterAppStore(iavlStore, evmStore, false) if err != nil { return nil, err diff --git a/store/pruning_iavlstore.go b/store/pruning_iavlstore.go new file mode 100644 index 0000000000..4e73cbad31 --- /dev/null +++ b/store/pruning_iavlstore.go @@ -0,0 +1,271 @@ +package store + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/go-kit/kit/metrics" + kitprometheus "github.com/go-kit/kit/metrics/prometheus" + "github.com/loomnetwork/go-loom" + "github.com/loomnetwork/go-loom/plugin" + "github.com/loomnetwork/loomchain/log" + "github.com/pkg/errors" + stdprometheus "github.com/prometheus/client_golang/prometheus" + dbm "github.com/tendermint/tendermint/libs/db" +) + +var ( + pruneDuration metrics.Histogram + deleteVersionDuration metrics.Histogram +) + +func init() { + const namespace = "loomchain" + const subsystem = "pruning_iavl_store" + + pruneDuration = kitprometheus.NewSummaryFrom( + stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "prune_duration", + Help: "How long PruningIAVLStore.prune() took to execute (in seconds)", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, []string{"error"}) + deleteVersionDuration = kitprometheus.NewSummaryFrom( + stdprometheus.SummaryOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "delete_version_duration", + Help: "How long it took to delete a single version from the IAVL store (in seconds)", + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }, []string{"error"}) +} + +type PruningIAVLStoreConfig struct { + MaxVersions int64 // maximum number of versions to keep when pruning + BatchSize int64 // maximum number of versions to delete in each cycle + FlushInterval int64 // number of versions before flushing to disk + Interval time.Duration + Logger *loom.Logger +} + +// PruningIAVLStore is a specialized IAVLStore that has a background thread that periodically prunes +// old versions. It should only be used to prune old clusters, on new clusters nodes will delete +// a version each time they save a new one, so the background thread, and all the extra locking +// is unnecessary. +type PruningIAVLStore struct { + store *IAVLStore + mutex *sync.RWMutex + oldestVer int64 + maxVersions int64 + batchSize int64 + batchCount uint64 + logger *loom.Logger +} + +// NewPruningIAVLStore creates a new PruningIAVLStore. +// maxVersions can be used to specify how many versions should be retained, if set to zero then +// old versions will never been deleted. +func NewPruningIAVLStore(db dbm.DB, cfg PruningIAVLStoreConfig) (*PruningIAVLStore, error) { + // always keep at least 2 of the latest versions + maxVersions := cfg.MaxVersions + if (maxVersions != 0) && (maxVersions < 2) { + maxVersions = 2 + } + + store, err := NewIAVLStore(db, maxVersions, 0, cfg.FlushInterval) + if err != nil { + return nil, err + } + + s := &PruningIAVLStore{ + store: store, + mutex: &sync.RWMutex{}, + maxVersions: maxVersions, + batchSize: cfg.BatchSize, + logger: cfg.Logger, + } + + if s.logger == nil { + s.logger = log.Default + } + + if maxVersions != 0 { + latestVer := store.Version() + + oldestVer := int64(0) + if cfg.BatchSize > 1 { + for i := int64(1); i <= latestVer; i++ { + if store.tree.VersionExists(i) { + oldestVer = i + break + } + } + } + s.oldestVer = oldestVer + + go s.loopWithInterval(s.prune, cfg.Interval) + } + + return s, nil +} + +func (s *PruningIAVLStore) Delete(key []byte) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.store.Delete(key) +} + +func (s *PruningIAVLStore) Set(key, val []byte) { + s.mutex.Lock() + defer s.mutex.Unlock() + + s.store.Set(key, val) +} + +func (s *PruningIAVLStore) Has(key []byte) bool { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Has(key) +} + +func (s *PruningIAVLStore) Get(key []byte) []byte { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Get(key) +} + +func (s *PruningIAVLStore) Range(prefix []byte) plugin.RangeData { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Range(prefix) +} + +func (s *PruningIAVLStore) Hash() []byte { + s.mutex.Lock() + defer s.mutex.Unlock() + + return s.store.Hash() +} + +func (s *PruningIAVLStore) Version() int64 { + s.mutex.RLock() + defer s.mutex.RUnlock() + + return s.store.Version() +} + +func (s *PruningIAVLStore) SaveVersion() ([]byte, int64, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + + hash, ver, err := s.store.SaveVersion() + if err == nil && s.oldestVer == 0 { + s.oldestVer = ver + } + return hash, ver, err +} + +func (s *PruningIAVLStore) Prune() error { + // pruning is done in the goroutine, so do nothing here + return nil +} + +func (s *PruningIAVLStore) GetSnapshot() Snapshot { + // This isn't an actual snapshot obviously, and never will be, but lets pretend... + return &pruningIAVLStoreSnapshot{ + PruningIAVLStore: s, + } +} + +func (s *PruningIAVLStore) prune() error { + s.mutex.Lock() + defer s.mutex.Unlock() + + var err error + defer func(begin time.Time) { + lvs := []string{"error", fmt.Sprint(err != nil)} + pruneDuration.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + latestVer := s.store.Version() + endVer := latestVer - s.maxVersions + + if (s.oldestVer == 0) || (s.oldestVer > endVer) { + return nil // nothing to prune yet + } + + if (endVer - s.oldestVer) > s.batchSize { + endVer = s.oldestVer + s.batchSize + } + + if endVer > (latestVer - 2) { + endVer = latestVer - 2 + } + + for i := s.oldestVer; i <= endVer; i++ { + if s.store.tree.VersionExists(i) { + if err = s.deleteVersion(i); err != nil { + return errors.Wrapf(err, "failed to delete tree version %d", i) + } + } + s.oldestVer++ + } + + s.batchCount++ + return nil +} + +func (s *PruningIAVLStore) deleteVersion(ver int64) error { + var err error + defer func(begin time.Time) { + lvs := []string{"error", fmt.Sprint(err != nil)} + deleteVersionDuration.With(lvs...).Observe(time.Since(begin).Seconds()) + }(time.Now()) + + err = s.store.tree.DeleteVersion(ver) + return err +} + +// runWithRecovery should run in a goroutine, it will ensure the given function keeps on running in +// a goroutine as long as it doesn't panic due to a runtime error. +//[MGC] I believe this function shouldn't be used as we should just fail fast if this breaks +func (s *PruningIAVLStore) runWithRecovery(run func()) { + defer func() { + if r := recover(); r != nil { + s.logger.Error("Recovered from panic in PruningIAVLStore goroutine", "r", r) + // Unless it's a runtime error restart the goroutine + if _, ok := r.(runtime.Error); !ok { + time.Sleep(30 * time.Second) + s.logger.Info("Restarting PruningIAVLStore goroutine...\n") + go s.runWithRecovery(run) + } + } + }() + run() +} + +// loopWithInterval will execute the step function in an endless loop, sleeping for the specified +// interval at the end of each loop iteration. +func (s *PruningIAVLStore) loopWithInterval(step func() error, interval time.Duration) { + for { + if err := step(); err != nil { + s.logger.Error("PruneIAVLStore encountered an error", "err", err) + } + time.Sleep(interval) + } +} + +type pruningIAVLStoreSnapshot struct { + *PruningIAVLStore +} + +func (s *pruningIAVLStoreSnapshot) Release() { + // noop +} diff --git a/store/store.go b/store/store.go index 85e0c7fe72..f2aaae4320 100644 --- a/store/store.go +++ b/store/store.go @@ -54,7 +54,6 @@ type VersionedKVStore interface { // Delete old version of the store Prune() error GetSnapshot() Snapshot - GetSnapshotAt(version int64) (Snapshot, error) } type cacheItem struct { diff --git a/store/store_test.go b/store/store_test.go index 98f270bc2f..a2c3841ef5 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -5,6 +5,7 @@ import ( "fmt" "sync" "testing" + "time" "github.com/loomnetwork/go-loom/plugin" "github.com/loomnetwork/go-loom/util" @@ -437,6 +438,140 @@ func (ts *MemStoreTestSuite) SetupSuite() { ts.supportsSnapshots = false } +// +// PruningIAVLStore +// + +func TestPruningIAVLStoreBatching(t *testing.T) { + db := dbm.NewMemDB() + cfg := PruningIAVLStoreConfig{ + MaxVersions: 5, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(db, cfg) + require.NoError(t, err) + + require.Equal(t, int64(0), store.oldestVer) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + curVer := int64(1) + for _, kv := range values { + store.Set(kv.key, kv.val) + _, ver, err := store.SaveVersion() + require.NoError(t, err) + require.Equal(t, curVer, ver) + curVer++ + } + + time.Sleep(5 * time.Second) + + require.True(t, store.Version() > cfg.MaxVersions) + require.Equal(t, store.Version(), store.oldestVer+cfg.MaxVersions-1, "correct number of versions has been kept") + require.Equal(t, uint64(2), store.batchCount, "correct number of batches has been pruned") + + prevOldestVer := store.oldestVer + + store, err = NewPruningIAVLStore(db, cfg) + require.NoError(t, err) + + // the oldest version shouldn't change when the IAVL store is reloaded + require.Equal(t, prevOldestVer, store.oldestVer) +} + +func TestPruningIAVLStoreKeepsAtLeastTwoVersions(t *testing.T) { + cfg := PruningIAVLStoreConfig{ + MaxVersions: 1, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(dbm.NewMemDB(), cfg) + require.NoError(t, err) + require.Equal(t, int64(0), store.Version()) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + } + + for i, kv := range values { + if i == 2 { + break + } + + store.Set(kv.key, kv.val) + _, _, err := store.SaveVersion() + require.NoError(t, err) + } + + time.Sleep(5 * time.Second) + + require.Equal(t, int64(2), store.Version()) + require.Equal(t, int64(1), store.oldestVer) + require.Equal(t, uint64(0), store.batchCount) +} + +func TestPruningIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { + cfg := PruningIAVLStoreConfig{ + MaxVersions: 0, + BatchSize: 5, + Interval: 1 * time.Second, + } + store, err := NewPruningIAVLStore(dbm.NewMemDB(), cfg) + require.NoError(t, err) + require.Equal(t, int64(0), store.Version()) + require.Equal(t, int64(0), store.maxVersions) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + for _, kv := range values { + store.Set(kv.key, kv.val) + _, _, err := store.SaveVersion() + require.NoError(t, err) + } + + time.Sleep(4 * time.Second) + + require.Equal(t, int64(12), store.Version()) + require.Equal(t, uint64(0), store.batchCount) +} + func TestIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { store, err := NewIAVLStore(dbm.NewMemDB(), 0, 0, 0) require.NoError(t, err) @@ -469,3 +604,56 @@ func TestIAVLStoreKeepsAllVersionsIfMaxVersionsIsZero(t *testing.T) { require.Equal(t, int64(12), store.Version()) } + +func TestSwitchFromIAVLStoreToPruningIAVLStore(t *testing.T) { + memDB := dbm.NewMemDB() + store1, err := NewIAVLStore(memDB, 0, 0, 0) + require.NoError(t, err) + + values := []struct { + key []byte + val []byte + }{ + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + {key: key1, val: val1}, + {key: key2, val: val2}, + {key: key3, val: val3}, + {key: key1, val: val3}, + {key: key2, val: val1}, + {key: key3, val: val2}, + } // 12 items + + for _, kv := range values { + store1.Set(kv.key, kv.val) + _, _, err := store1.SaveVersion() + require.NoError(t, err) + } + + require.Equal(t, int64(12), store1.Version()) + + store2, err := NewIAVLStore(memDB, 11, 0, 0) + require.NoError(t, err) + // force the store to prune an old version + store2.Set(key1, val1) + _, _, err = store2.SaveVersion() + require.NoError(t, err) + + require.Equal(t, int64(13), store2.Version()) + + cfg := PruningIAVLStoreConfig{ + MaxVersions: 5, + BatchSize: 5, + Interval: 1 * time.Second, + } + store3, err := NewPruningIAVLStore(memDB, cfg) + require.NoError(t, err) + + time.Sleep(4 * time.Second) + + require.Equal(t, (store3.Version()-cfg.MaxVersions)+1, store3.oldestVer) +} diff --git a/store/versioned_cachingstore.go b/store/versioned_cachingstore.go index 30d80a6182..3d0d914a01 100644 --- a/store/versioned_cachingstore.go +++ b/store/versioned_cachingstore.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/kit/metrics" kitprometheus "github.com/go-kit/kit/metrics/prometheus" loom "github.com/loomnetwork/go-loom" + "github.com/pkg/errors" stdprometheus "github.com/prometheus/client_golang/prometheus" ) @@ -366,43 +367,18 @@ func (c *versionedCachingStore) SaveVersion() ([]byte, int64, error) { // GetSnapshot() is called it won't return the current unpersisted state of the cache, // but rather the last persisted version. c.version = version + 1 - if err = c.cache.Set(rootKey, GetEVMRootFromAppStore(c.VersionedKVStore), version); err != nil { - // Only log error and dont error out - cacheErrors.With("cache_operation", "set").Add(1) - c.logger.Error("[VersionedCachingStore] error while caching EVM root", "err", err) - } } return hash, version, err } func (c *versionedCachingStore) GetSnapshot() Snapshot { - snapshot, err := c.GetSnapshotAt(0) - if err != nil { - panic(err) - } - return snapshot + return newVersionedCachingStoreSnapshot( + c.VersionedKVStore.GetSnapshot(), + c.cache, c.version-1, c.logger, + ) } -func (c *versionedCachingStore) GetSnapshotAt(version int64) (Snapshot, error) { - // TODO: c.version & c.VersionedKVStore.GetSnapshot() could end up corresponding to different - // versions, need to do this atomically. - if version == 0 { - return newVersionedCachingStoreSnapshot( - c.VersionedKVStore.GetSnapshot(), - c.cache, c.version-1, c.logger, - ), nil - } - - snapshot, err := c.VersionedKVStore.GetSnapshotAt(version) - if err != nil { - return nil, err - } - return newVersionedCachingStoreSnapshot(snapshot, c.cache, version, c.logger), nil -} - -// versionedCachingStoreSnapshot is a read-only CachingStore with specified version. -// NOTE: versionedCachingStoreSnapshot.Range is not implemented, so the underlying snapshot's Range -// implementation will be used instead. +// CachingStoreSnapshot is a read-only CachingStore with specified version type versionedCachingStoreSnapshot struct { Snapshot cache *versionedBigCache @@ -410,9 +386,8 @@ type versionedCachingStoreSnapshot struct { logger *loom.Logger } -func newVersionedCachingStoreSnapshot( - snapshot Snapshot, cache *versionedBigCache, version int64, logger *loom.Logger, -) *versionedCachingStoreSnapshot { +func newVersionedCachingStoreSnapshot(snapshot Snapshot, cache *versionedBigCache, + version int64, logger *loom.Logger) *versionedCachingStoreSnapshot { return &versionedCachingStoreSnapshot{ Snapshot: snapshot, cache: cache, @@ -421,6 +396,14 @@ func newVersionedCachingStoreSnapshot( } } +func (c *versionedCachingStoreSnapshot) Delete(key []byte) { + panic("[versionedCachingStoreSnapshot] Delete() not implemented") +} + +func (c *versionedCachingStoreSnapshot) Set(key, val []byte) { + panic("[versionedCachingStoreSnapshot] Set() not implemented") +} + func (c *versionedCachingStoreSnapshot) Has(key []byte) bool { var err error @@ -506,6 +489,14 @@ func (c *versionedCachingStoreSnapshot) Get(key []byte) []byte { return data } +func (c *versionedCachingStoreSnapshot) SaveVersion() ([]byte, int64, error) { + return nil, 0, errors.New("[VersionedCachingStoreSnapshot] SaveVersion() not implemented") +} + +func (c *versionedCachingStoreSnapshot) Prune() error { + return errors.New("[VersionedCachingStoreSnapshot] Prune() not implemented") +} + func (c *versionedCachingStoreSnapshot) Release() { c.Snapshot.Release() } diff --git a/store/versioned_cachingstore_test.go b/store/versioned_cachingstore_test.go index 0315046c25..5345b77e5d 100644 --- a/store/versioned_cachingstore_test.go +++ b/store/versioned_cachingstore_test.go @@ -63,17 +63,14 @@ func (m *MockStore) GetSnapshot() Snapshot { for k, v := range m.storage { snapshotStore[k] = v } + mstore := &MockStore{ + storage: snapshotStore, + } return &mockStoreSnapshot{ - MockStore: &MockStore{ - storage: snapshotStore, - }, + MockStore: mstore, } } -func (m *MockStore) GetSnapshotAt(version int64) (Snapshot, error) { - panic("not implemented") -} - type mockStoreSnapshot struct { *MockStore }