diff --git a/Cargo.lock b/Cargo.lock index 3781bc58947..1fd8fabb94c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -295,7 +295,7 @@ dependencies = [ "hyper 0.9.4 (git+https://github.com/ethcore/hyper)", "lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", - "lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)", + "lru-cache 0.0.7 (git+https://github.com/contain-rs/lru-cache)", "num_cpus 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.3.14 (registry+https://github.com/rust-lang/crates.io-index)", "rayon 0.4.2 (registry+https://github.com/rust-lang/crates.io-index)", @@ -882,7 +882,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "linked-hash-map" -version = "0.0.9" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] @@ -898,9 +898,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "lru-cache" version = "0.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" +source = "git+https://github.com/contain-rs/lru-cache#13255e33c45ceb69a4b143f235a4322df5fb580e" dependencies = [ - "linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)", + "linked-hash-map 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -1951,10 +1951,10 @@ dependencies = [ "checksum language-tags 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" "checksum lazy_static 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "49247ec2a285bb3dcb23cbd9c35193c025e7251bfce77c1d5da97e6362dffe7f" "checksum libc 0.2.15 (registry+https://github.com/rust-lang/crates.io-index)" = "23e3757828fa702a20072c37ff47938e9dd331b92fac6e223d26d4b7a55f7ee2" -"checksum linked-hash-map 0.0.9 (registry+https://github.com/rust-lang/crates.io-index)" = "83f7ff3baae999fdf921cccf54b61842bb3b26868d50d02dff48052ebec8dd79" +"checksum linked-hash-map 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "bda158e0dabeb97ee8a401f4d17e479d6b891a14de0bba79d5cc2d4d325b5e48" "checksum linked-hash-map 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6d262045c5b87c0861b3f004610afd0e2c851e2908d08b6c870cbb9d5f494ecd" "checksum log 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "ab83497bf8bf4ed2a74259c1c802351fcd67a65baa86394b6ba73c36f4838054" -"checksum lru-cache 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)" = "42d50dcb5d9f145df83b1043207e1ac0c37c9c779c4e128ca4655abc3f3cbf8c" +"checksum lru-cache 0.0.7 (git+https://github.com/contain-rs/lru-cache)" = "" "checksum matches 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "15305656809ce5a4805b1ff2946892810992197ce1270ff79baded852187942e" "checksum memchr 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "d8b629fb514376c675b98c1421e80b151d3817ac42d7c667717d282761418d20" "checksum mime 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "a74cc2587bf97c49f3f5bab62860d6abf3902ca73b66b51d9b049fbdcd727bd2" diff --git a/ethcore/Cargo.toml b/ethcore/Cargo.toml index 7bda7e56738..ad9e545cedb 100644 --- a/ethcore/Cargo.toml +++ b/ethcore/Cargo.toml @@ -37,7 +37,7 @@ ethkey = { path = "../ethkey" } ethcore-ipc-nano = { path = "../ipc/nano" } rlp = { path = "../util/rlp" } rand = "0.3" -lru-cache = "0.0.7" +lru-cache = { git = "https://github.com/contain-rs/lru-cache" } ethcore-bloom-journal = { path = "../util/bloom" } byteorder = "0.5" diff --git a/ethcore/src/client/client.rs b/ethcore/src/client/client.rs index a5bc63922d5..7c9e71ee02d 100644 --- a/ethcore/src/client/client.rs +++ b/ethcore/src/client/client.rs @@ -177,7 +177,7 @@ impl Client { }; let journal_db = journaldb::new(db.clone(), config.pruning, ::db::COL_STATE); - let mut state_db = StateDB::new(journal_db); + let mut state_db = StateDB::new(journal_db, config.state_cache_size); if state_db.journal_db().is_empty() && try!(spec.ensure_db_good(&mut state_db)) { let mut batch = DBTransaction::new(&db); try!(state_db.commit(&mut batch, 0, &spec.genesis_header().hash(), None)); @@ -197,7 +197,7 @@ impl Client { let awake = match config.mode { Mode::Dark(..) => false, _ => true }; let factories = Factories { - vm: EvmFactory::new(config.vm_type.clone()), + vm: EvmFactory::new(config.vm_type.clone(), config.jump_table_size), trie: TrieFactory::new(trie_spec), accountdb: Default::default(), }; @@ -694,7 +694,8 @@ impl snapshot::DatabaseRestore for Client { let db = self.db.write(); try!(db.restore(new_db)); - *state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE)); + let cache_size = state_db.cache_size(); + *state_db = StateDB::new(journaldb::new(db.clone(), self.pruning, ::db::COL_STATE), cache_size); *chain = Arc::new(BlockChain::new(self.config.blockchain.clone(), &[], db.clone())); *tracedb = TraceDB::new(self.config.tracing.clone(), db.clone(), chain.clone()); Ok(()) diff --git a/ethcore/src/client/config.rs b/ethcore/src/client/config.rs index 8cf54387b1f..e0ac51f0ad4 100644 --- a/ethcore/src/client/config.rs +++ b/ethcore/src/client/config.rs @@ -96,7 +96,7 @@ pub struct ClientConfig { pub pruning: journaldb::Algorithm, /// The name of the client instance. pub name: String, - /// State db cache-size if not default + /// RocksDB state column cache-size if not default pub db_cache_size: Option, /// State db compaction profile pub db_compaction: DatabaseCompactionProfile, @@ -106,6 +106,10 @@ pub struct ClientConfig { pub mode: Mode, /// Type of block verifier used by client. pub verifier_type: VerifierType, + /// State db cache-size. + pub state_cache_size: usize, + /// EVM jump-tables cache size. + pub jump_table_size: usize, } #[cfg(test)] diff --git a/ethcore/src/client/test_client.rs b/ethcore/src/client/test_client.rs index 76451e7933a..685d2ca9634 100644 --- a/ethcore/src/client/test_client.rs +++ b/ethcore/src/client/test_client.rs @@ -122,7 +122,7 @@ impl TestBlockChainClient { queue_size: AtomicUsize::new(0), miner: Arc::new(Miner::with_spec(&spec)), spec: spec, - vm_factory: EvmFactory::new(VMType::Interpreter), + vm_factory: EvmFactory::new(VMType::Interpreter, 1024 * 1024), latest_block_timestamp: RwLock::new(10_000_000), }; client.add_blocks(1, EachBlockWith::Nothing); // add genesis block @@ -289,7 +289,7 @@ pub fn get_temp_state_db() -> GuardedTempResult { let temp = RandomTempPath::new(); let db = Database::open(&DatabaseConfig::with_columns(NUM_COLUMNS), temp.as_str()).unwrap(); let journal_db = journaldb::new(Arc::new(db), journaldb::Algorithm::EarlyMerge, COL_STATE); - let state_db = StateDB::new(journal_db); + let state_db = StateDB::new(journal_db, 1024 * 1024); GuardedTempResult { _temp: temp, result: Some(state_db) diff --git a/ethcore/src/evm/factory.rs b/ethcore/src/evm/factory.rs index 629b423da55..a3d94bde83c 100644 --- a/ethcore/src/evm/factory.rs +++ b/ethcore/src/evm/factory.rs @@ -118,11 +118,12 @@ impl Factory { } } - /// Create new instance of specific `VMType` factory - pub fn new(evm: VMType) -> Self { + /// Create new instance of specific `VMType` factory, with a size in bytes + /// for caching jump destinations. + pub fn new(evm: VMType, cache_size: usize) -> Self { Factory { evm: evm, - evm_cache: Arc::new(SharedCache::default()), + evm_cache: Arc::new(SharedCache::new(cache_size)), } } @@ -164,22 +165,22 @@ macro_rules! evm_test( #[ignore] #[cfg(feature = "jit")] fn $name_jit() { - $name_test(Factory::new(VMType::Jit)); + $name_test(Factory::new(VMType::Jit, 1024 * 32)); } #[test] fn $name_int() { - $name_test(Factory::new(VMType::Interpreter)); + $name_test(Factory::new(VMType::Interpreter, 1024 * 32)); } }; ($name_test: ident: $name_jit: ident, $name_int: ident) => { #[test] #[cfg(feature = "jit")] fn $name_jit() { - $name_test(Factory::new(VMType::Jit)); + $name_test(Factory::new(VMType::Jit, 1024 * 32)); } #[test] fn $name_int() { - $name_test(Factory::new(VMType::Interpreter)); + $name_test(Factory::new(VMType::Interpreter, 1024 * 32)); } } ); @@ -193,13 +194,13 @@ macro_rules! evm_test_ignore( #[cfg(feature = "jit")] #[cfg(feature = "ignored-tests")] fn $name_jit() { - $name_test(Factory::new(VMType::Jit)); + $name_test(Factory::new(VMType::Jit, 1024 * 32)); } #[test] #[ignore] #[cfg(feature = "ignored-tests")] fn $name_int() { - $name_test(Factory::new(VMType::Interpreter)); + $name_test(Factory::new(VMType::Interpreter, 1024 * 32)); } } ); diff --git a/ethcore/src/evm/interpreter/shared_cache.rs b/ethcore/src/evm/interpreter/shared_cache.rs index ce383bae810..dee55752276 100644 --- a/ethcore/src/evm/interpreter/shared_cache.rs +++ b/ethcore/src/evm/interpreter/shared_cache.rs @@ -21,25 +21,66 @@ use util::sha3::*; use bit_set::BitSet; use super::super::instructions; -const CACHE_CODE_ITEMS: usize = 65536; +const INITIAL_CAPACITY: usize = 32; +const DEFAULT_CACHE_SIZE: usize = 4 * 1024 * 1024; -/// GLobal cache for EVM interpreter +/// Global cache for EVM interpreter pub struct SharedCache { - jump_destinations: Mutex>> + jump_destinations: Mutex>>, + max_size: usize, + cur_size: Mutex, } impl SharedCache { - /// Get jump destincations bitmap for a contract. + /// Create a jump destinations cache with a maximum size in bytes + /// to cache. + pub fn new(max_size: usize) -> Self { + SharedCache { + jump_destinations: Mutex::new(LruCache::new(INITIAL_CAPACITY)), + max_size: max_size * 8, // dealing with bits here. + cur_size: Mutex::new(0), + } + } + + /// Get jump destinations bitmap for a contract. pub fn jump_destinations(&self, code_hash: &H256, code: &[u8]) -> Arc { if code_hash == &SHA3_EMPTY { return Self::find_jump_destinations(code); } + if let Some(d) = self.jump_destinations.lock().get_mut(code_hash) { return d.clone(); } let d = Self::find_jump_destinations(code); - self.jump_destinations.lock().insert(code_hash.clone(), d.clone()); + + { + let mut cur_size = self.cur_size.lock(); + *cur_size += d.capacity(); + + let mut jump_dests = self.jump_destinations.lock(); + let cap = jump_dests.capacity(); + + // grow the cache as necessary; it operates on amount of items + // but we're working based on memory usage. + if jump_dests.len() == cap && *cur_size < self.max_size { + jump_dests.set_capacity(cap * 2); + } + + // account for any element displaced from the cache. + if let Some(lru) = jump_dests.insert(code_hash.clone(), d.clone()) { + *cur_size -= lru.capacity(); + } + + // remove elements until we are below the memory target. + while *cur_size > self.max_size { + match jump_dests.remove_lru() { + Some((_, v)) => *cur_size -= v.capacity(), + _ => break, + } + } + } + d } @@ -57,15 +98,15 @@ impl SharedCache { } position += 1; } + + jump_dests.shrink_to_fit(); Arc::new(jump_dests) } } impl Default for SharedCache { - fn default() -> SharedCache { - SharedCache { - jump_destinations: Mutex::new(LruCache::new(CACHE_CODE_ITEMS)), - } + fn default() -> Self { + SharedCache::new(DEFAULT_CACHE_SIZE) } } diff --git a/ethcore/src/evm/tests.rs b/ethcore/src/evm/tests.rs index eb7d168cf7b..f685e279d25 100644 --- a/ethcore/src/evm/tests.rs +++ b/ethcore/src/evm/tests.rs @@ -817,7 +817,7 @@ fn test_signextend(factory: super::Factory) { #[test] // JIT just returns out of gas fn test_badinstruction_int() { - let factory = super::Factory::new(VMType::Interpreter); + let factory = super::Factory::new(VMType::Interpreter, 1024 * 32); let code = "af".from_hex().unwrap(); let mut params = ActionParams::default(); diff --git a/ethcore/src/executive.rs b/ethcore/src/executive.rs index f3186d6dd99..3c8b6171e3f 100644 --- a/ethcore/src/executive.rs +++ b/ethcore/src/executive.rs @@ -598,7 +598,7 @@ mod tests { #[test] // Tracing is not suported in JIT fn test_call_to_create() { - let factory = Factory::new(VMType::Interpreter); + let factory = Factory::new(VMType::Interpreter, 1024 * 32); // code: // @@ -724,7 +724,7 @@ mod tests { #[test] fn test_create_contract() { // Tracing is not supported in JIT - let factory = Factory::new(VMType::Interpreter); + let factory = Factory::new(VMType::Interpreter, 1024 * 32); // code: // // 60 10 - push 16 diff --git a/ethcore/src/json_tests/executive.rs b/ethcore/src/json_tests/executive.rs index 5576f9ad4ed..8979b825359 100644 --- a/ethcore/src/json_tests/executive.rs +++ b/ethcore/src/json_tests/executive.rs @@ -191,7 +191,7 @@ fn do_json_test_for(vm_type: &VMType, json_data: &[u8]) -> Vec { state.populate_from(From::from(vm.pre_state.clone())); let info = From::from(vm.env); let engine = TestEngine::new(1); - let vm_factory = Factory::new(vm_type.clone()); + let vm_factory = Factory::new(vm_type.clone(), 1024 * 32); let params = ActionParams::from(vm.transaction); let mut substate = Substate::new(); diff --git a/ethcore/src/state_db.rs b/ethcore/src/state_db.rs index 04db274c424..8724a5d73e9 100644 --- a/ethcore/src/state_db.rs +++ b/ethcore/src/state_db.rs @@ -26,17 +26,19 @@ use bloom_journal::{Bloom, BloomJournal}; use db::COL_ACCOUNT_BLOOM; use byteorder::{LittleEndian, ByteOrder}; -const STATE_CACHE_ITEMS: usize = 256000; -const STATE_CACHE_BLOCKS: usize = 8; - pub const ACCOUNT_BLOOM_SPACE: usize = 1048576; pub const DEFAULT_ACCOUNT_PRESET: usize = 1000000; pub const ACCOUNT_BLOOM_HASHCOUNT_KEY: &'static [u8] = b"account_hash_count"; +const STATE_CACHE_BLOCKS: usize = 8; + + /// Shared canonical state cache. struct AccountCache { /// DB Account cache. `None` indicates that account is known to be missing. + // When changing the type of the values here, be sure to update `mem_used` and + // `new`. accounts: LruCache>, /// Information on the modifications in recently committed blocks; specifically which addresses /// changed in which block. Ordered by block number. @@ -92,6 +94,7 @@ pub struct StateDB { local_cache: Vec, /// Shared account bloom. Does not handle chain reorganizations. account_bloom: Arc>, + cache_size: usize, /// Hash of the block on top of which this instance was created or /// `None` if cache is disabled parent_hash: Option, @@ -102,6 +105,30 @@ pub struct StateDB { } impl StateDB { + + /// Create a new instance wrapping `JournalDB` and the maximum allowed size + /// of the LRU cache in bytes. Actual used memory may (read: will) be higher due to bookkeeping. + // TODO: make the cache size actually accurate by moving the account storage cache + // into the `AccountCache` structure as its own `LruCache<(Address, H256), H256>`. + pub fn new(db: Box, cache_size: usize) -> StateDB { + let bloom = Self::load_bloom(db.backing()); + let cache_items = cache_size / ::std::mem::size_of::>(); + + StateDB { + db: db, + account_cache: Arc::new(Mutex::new(AccountCache { + accounts: LruCache::new(cache_items), + modifications: VecDeque::new(), + })), + local_cache: Vec::new(), + account_bloom: Arc::new(Mutex::new(bloom)), + cache_size: cache_size, + parent_hash: None, + commit_hash: None, + commit_number: None, + } + } + /// Loads accounts bloom from the database /// This bloom is used to handle request for the non-existant account fast pub fn load_bloom(db: &Database) -> Bloom { @@ -129,23 +156,6 @@ impl StateDB { bloom } - /// Create a new instance wrapping `JournalDB` - pub fn new(db: Box) -> StateDB { - let bloom = Self::load_bloom(db.backing()); - StateDB { - db: db, - account_cache: Arc::new(Mutex::new(AccountCache { - accounts: LruCache::new(STATE_CACHE_ITEMS), - modifications: VecDeque::new(), - })), - local_cache: Vec::new(), - account_bloom: Arc::new(Mutex::new(bloom)), - parent_hash: None, - commit_hash: None, - commit_number: None, - } - } - pub fn check_account_bloom(&self, address: &Address) -> bool { trace!(target: "account_bloom", "Check account bloom: {:?}", address); let bloom = self.account_bloom.lock(); @@ -298,6 +308,7 @@ impl StateDB { account_cache: self.account_cache.clone(), local_cache: Vec::new(), account_bloom: self.account_bloom.clone(), + cache_size: self.cache_size, parent_hash: None, commit_hash: None, commit_number: None, @@ -311,6 +322,7 @@ impl StateDB { account_cache: self.account_cache.clone(), local_cache: Vec::new(), account_bloom: self.account_bloom.clone(), + cache_size: self.cache_size, parent_hash: Some(parent.clone()), commit_hash: None, commit_number: None, @@ -324,7 +336,8 @@ impl StateDB { /// Heap size used. pub fn mem_used(&self) -> usize { - self.db.mem_used() //TODO: + self.account_cache.lock().heap_size_of_children() + // TODO: account for LRU-cache overhead; this is a close approximation. + self.db.mem_used() + self.account_cache.lock().accounts.len() * ::std::mem::size_of::>() } /// Returns underlying `JournalDB`. @@ -365,6 +378,11 @@ impl StateDB { cache.accounts.get_mut(a).map(|c| f(c.as_mut())) } + /// Query how much memory is set aside for the accounts cache (in bytes). + pub fn cache_size(&self) -> usize { + self.cache_size + } + /// Check if the account can be returned from cache by matching current block parent hash against canonical /// state and filtering out account modified in later blocks. fn is_allowed(addr: &Address, parent_hash: &Option, modifications: &VecDeque) -> bool { @@ -404,77 +422,77 @@ impl StateDB { #[cfg(test)] mod tests { -use util::{U256, H256, FixedHash, Address, DBTransaction}; -use tests::helpers::*; -use state::Account; -use util::log::init_log; - -#[test] -fn state_db_smoke() { - init_log(); - - let mut state_db_result = get_temp_state_db(); - let state_db = state_db_result.take(); - let root_parent = H256::random(); - let address = Address::random(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - let mut batch = DBTransaction::new(state_db.journal_db().backing()); - - // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] - // balance [ 5 5 4 3 2 2 ] - let mut s = state_db.boxed_clone_canon(&root_parent); - s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false); - s.commit(&mut batch, 0, &h0, None).unwrap(); - s.sync_cache(&[], &[], true); - - let mut s = state_db.boxed_clone_canon(&h0); - s.commit(&mut batch, 1, &h1a, None).unwrap(); - s.sync_cache(&[], &[], true); - - let mut s = state_db.boxed_clone_canon(&h0); - s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true); - s.commit(&mut batch, 1, &h1b, None).unwrap(); - s.sync_cache(&[], &[], false); - - let mut s = state_db.boxed_clone_canon(&h1b); - s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true); - s.commit(&mut batch, 2, &h2b, None).unwrap(); - s.sync_cache(&[], &[], false); - - let mut s = state_db.boxed_clone_canon(&h1a); - s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true); - s.commit(&mut batch, 2, &h2a, None).unwrap(); - s.sync_cache(&[], &[], true); - - let mut s = state_db.boxed_clone_canon(&h2a); - s.commit(&mut batch, 3, &h3a, None).unwrap(); - s.sync_cache(&[], &[], true); - - let s = state_db.boxed_clone_canon(&h3a); - assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5)); - - let s = state_db.boxed_clone_canon(&h1a); - assert!(s.get_cached_account(&address).is_none()); - - let s = state_db.boxed_clone_canon(&h2b); - assert!(s.get_cached_account(&address).is_none()); - - let s = state_db.boxed_clone_canon(&h1b); - assert!(s.get_cached_account(&address).is_none()); - - // reorg to 3b - // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = state_db.boxed_clone_canon(&h2b); - s.commit(&mut batch, 3, &h3b, None).unwrap(); - s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true); - let s = state_db.boxed_clone_canon(&h3a); - assert!(s.get_cached_account(&address).is_none()); -} + use util::{U256, H256, FixedHash, Address, DBTransaction}; + use tests::helpers::*; + use state::Account; + use util::log::init_log; + + #[test] + fn state_db_smoke() { + init_log(); + + let mut state_db_result = get_temp_state_db(); + let state_db = state_db_result.take(); + let root_parent = H256::random(); + let address = Address::random(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + let mut batch = DBTransaction::new(state_db.journal_db().backing()); + + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // balance [ 5 5 4 3 2 2 ] + let mut s = state_db.boxed_clone_canon(&root_parent); + s.add_to_account_cache(address, Some(Account::new_basic(2.into(), 0.into())), false); + s.commit(&mut batch, 0, &h0, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h0); + s.commit(&mut batch, 1, &h1a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h0); + s.add_to_account_cache(address, Some(Account::new_basic(3.into(), 0.into())), true); + s.commit(&mut batch, 1, &h1b, None).unwrap(); + s.sync_cache(&[], &[], false); + + let mut s = state_db.boxed_clone_canon(&h1b); + s.add_to_account_cache(address, Some(Account::new_basic(4.into(), 0.into())), true); + s.commit(&mut batch, 2, &h2b, None).unwrap(); + s.sync_cache(&[], &[], false); + + let mut s = state_db.boxed_clone_canon(&h1a); + s.add_to_account_cache(address, Some(Account::new_basic(5.into(), 0.into())), true); + s.commit(&mut batch, 2, &h2a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let mut s = state_db.boxed_clone_canon(&h2a); + s.commit(&mut batch, 3, &h3a, None).unwrap(); + s.sync_cache(&[], &[], true); + + let s = state_db.boxed_clone_canon(&h3a); + assert_eq!(s.get_cached_account(&address).unwrap().unwrap().balance(), &U256::from(5)); + + let s = state_db.boxed_clone_canon(&h1a); + assert!(s.get_cached_account(&address).is_none()); + + let s = state_db.boxed_clone_canon(&h2b); + assert!(s.get_cached_account(&address).is_none()); + + let s = state_db.boxed_clone_canon(&h1b); + assert!(s.get_cached_account(&address).is_none()); + + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = state_db.boxed_clone_canon(&h2b); + s.commit(&mut batch, 3, &h3b, None).unwrap(); + s.sync_cache(&[h1b.clone(), h2b.clone(), h3b.clone()], &[h1a.clone(), h2a.clone(), h3a.clone()], true); + let s = state_db.boxed_clone_canon(&h3a); + assert!(s.get_cached_account(&address).is_none()); + } } diff --git a/ethcore/src/tests/helpers.rs b/ethcore/src/tests/helpers.rs index acbf4e641e2..787f90262cb 100644 --- a/ethcore/src/tests/helpers.rs +++ b/ethcore/src/tests/helpers.rs @@ -346,7 +346,7 @@ pub fn get_temp_state() -> GuardedTempResult { pub fn get_temp_state_db_in(path: &Path) -> StateDB { let db = new_db(path.to_str().expect("Only valid utf8 paths for tests.")); let journal_db = journaldb::new(db.clone(), journaldb::Algorithm::EarlyMerge, COL_STATE); - StateDB::new(journal_db) + StateDB::new(journal_db, 5 * 1024 * 1024) } pub fn get_temp_state_in(path: &Path) -> State { diff --git a/parity/cache.rs b/parity/cache.rs index 12fe3f472e0..d2fc30d6ed4 100644 --- a/parity/cache.rs +++ b/parity/cache.rs @@ -21,15 +21,13 @@ const MIN_DB_CACHE_MB: u32 = 2; const MIN_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 16; const DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB: u32 = 50; const DEFAULT_TRACE_CACHE_SIZE: u32 = 20; +const DEFAULT_STATE_CACHE_SIZE: u32 = 25; /// Configuration for application cache sizes. /// All values are represented in MB. #[derive(Debug, PartialEq)] pub struct CacheConfig { - /// Size of database cache set using option `set_block_cache_size_mb` - /// 50% is blockchain - /// 25% is tracing - /// 25% is state + /// Size of rocksDB cache. Almost all goes to the state column. db: u32, /// Size of blockchain cache. blockchain: u32, @@ -37,11 +35,13 @@ pub struct CacheConfig { queue: u32, /// Size of traces cache. traces: u32, + /// Size of the state cache. + state: u32, } impl Default for CacheConfig { fn default() -> Self { - CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB) + CacheConfig::new(64, 8, DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, DEFAULT_STATE_CACHE_SIZE) } } @@ -49,26 +49,28 @@ impl CacheConfig { /// Creates new cache config with cumulative size equal `total`. pub fn new_with_total_cache_size(total: u32) -> Self { CacheConfig { - db: total * 7 / 8, - blockchain: total / 8, + db: total * 7 / 10, + blockchain: total / 10, queue: DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, traces: DEFAULT_TRACE_CACHE_SIZE, + state: total * 2 / 10, } } /// Creates new cache config with gitven details. - pub fn new(db: u32, blockchain: u32, queue: u32) -> Self { + pub fn new(db: u32, blockchain: u32, queue: u32, state: u32) -> Self { CacheConfig { db: db, blockchain: blockchain, queue: queue, traces: DEFAULT_TRACE_CACHE_SIZE, + state: state, } } /// Size of db cache for blockchain. pub fn db_blockchain_cache_size(&self) -> u32 { - max(MIN_DB_CACHE_MB, self.blockchain / 4) + max(MIN_DB_CACHE_MB, self.db / 4) } /// Size of db cache for state. @@ -90,6 +92,16 @@ impl CacheConfig { pub fn traces(&self) -> u32 { self.traces } + + /// Size of the state cache. + pub fn state(&self) -> u32 { + self.state * 3 / 4 + } + + /// Size of the jump-tables cache. + pub fn jump_tables(&self) -> u32 { + self.state / 4 + } } #[cfg(test)] @@ -99,21 +111,24 @@ mod tests { #[test] fn test_cache_config_constructor() { let config = CacheConfig::new_with_total_cache_size(200); - assert_eq!(config.db, 175); - assert_eq!(config.blockchain(), 25); + assert_eq!(config.db, 140); + assert_eq!(config.blockchain(), 20); assert_eq!(config.queue(), 50); + assert_eq!(config.state(), 30); + assert_eq!(config.jump_tables(), 10); } #[test] fn test_cache_config_db_cache_sizes() { let config = CacheConfig::new_with_total_cache_size(400); - assert_eq!(config.db, 350); - assert_eq!(config.db_blockchain_cache_size(), 12); - assert_eq!(config.db_state_cache_size(), 262); + assert_eq!(config.db, 280); + assert_eq!(config.db_blockchain_cache_size(), 70); + assert_eq!(config.db_state_cache_size(), 210); } #[test] fn test_cache_config_default() { - assert_eq!(CacheConfig::default(), CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB)); + assert_eq!(CacheConfig::default(), + CacheConfig::new(64, 8, super::DEFAULT_BLOCK_QUEUE_SIZE_LIMIT_MB, super::DEFAULT_STATE_CACHE_SIZE)); } } diff --git a/parity/cli/config.full.toml b/parity/cli/config.full.toml index 5b47e11844a..e11a26521fa 100644 --- a/parity/cli/config.full.toml +++ b/parity/cli/config.full.toml @@ -79,6 +79,7 @@ pruning = "auto" cache_size_db = 64 cache_size_blocks = 8 cache_size_queue = 50 +cache_size_state = 25 cache_size = 128 # Overrides above caches with total size fast_and_loose = false db_compaction = "ssd" diff --git a/parity/cli/config.toml b/parity/cli/config.toml index a5ad55d403e..cda1d4cb93b 100644 --- a/parity/cli/config.toml +++ b/parity/cli/config.toml @@ -48,6 +48,7 @@ pruning = "fast" cache_size_db = 128 cache_size_blocks = 16 cache_size_queue = 100 +cache_size_state = 25 db_compaction = "ssd" fat_db = "off" diff --git a/parity/cli/mod.rs b/parity/cli/mod.rs index 8044032eb6a..06c9d4945ca 100644 --- a/parity/cli/mod.rs +++ b/parity/cli/mod.rs @@ -211,6 +211,8 @@ usage! { or |c: &Config| otry!(c.footprint).cache_size_blocks.clone(), flag_cache_size_queue: u32 = 50u32, or |c: &Config| otry!(c.footprint).cache_size_queue.clone(), + flag_cache_size_state: u32 = 25u32, + or |c: &Config| otry!(c.footprint).cache_size_state.clone(), flag_cache_size: Option = None, or |c: &Config| otry!(c.footprint).cache_size.clone().map(Some), flag_fast_and_loose: bool = false, @@ -361,6 +363,7 @@ struct Footprint { cache_size_db: Option, cache_size_blocks: Option, cache_size_queue: Option, + cache_size_state: Option, db_compaction: Option, fat_db: Option, } @@ -532,6 +535,7 @@ mod tests { flag_cache_size_db: 64u32, flag_cache_size_blocks: 8u32, flag_cache_size_queue: 50u32, + flag_cache_size_state: 25u32, flag_cache_size: Some(128), flag_fast_and_loose: false, flag_db_compaction: "ssd".into(), @@ -686,6 +690,7 @@ mod tests { cache_size_db: Some(128), cache_size_blocks: Some(16), cache_size_queue: Some(100), + cache_size_state: Some(25), db_compaction: Some("ssd".into()), fat_db: Some("off".into()), }), diff --git a/parity/cli/usage.txt b/parity/cli/usage.txt index 6ad0cec219e..dbae151dc2b 100644 --- a/parity/cli/usage.txt +++ b/parity/cli/usage.txt @@ -210,6 +210,8 @@ Footprint Options: megabytes (default: {flag_cache_size_blocks}). --cache-size-queue MB Specify the maximum size of memory to use for block queue (default: {flag_cache_size_queue}). + --cache-size-state MB Specify the maximum size of memory to use for + the state cache (default: {flag_cache_size_state}). --cache-size MB Set total amount of discretionary memory to use for the entire system, overrides other cache and queue options.a (default: {flag_cache_size:?}) diff --git a/parity/configuration.rs b/parity/configuration.rs index 4503b0f2f66..389c3c6f378 100644 --- a/parity/configuration.rs +++ b/parity/configuration.rs @@ -291,7 +291,12 @@ impl Configuration { fn cache_config(&self) -> CacheConfig { match self.args.flag_cache_size.or(self.args.flag_cache) { Some(size) => CacheConfig::new_with_total_cache_size(size), - None => CacheConfig::new(self.args.flag_cache_size_db, self.args.flag_cache_size_blocks, self.args.flag_cache_size_queue), + None => CacheConfig::new( + self.args.flag_cache_size_db, + self.args.flag_cache_size_blocks, + self.args.flag_cache_size_queue, + self.args.flag_cache_size_state, + ), } } diff --git a/parity/helpers.rs b/parity/helpers.rs index cdee9ede006..5a740d52c17 100644 --- a/parity/helpers.rs +++ b/parity/helpers.rs @@ -215,6 +215,10 @@ pub fn to_client_config( client_config.tracing.max_cache_size = cache_config.traces() as usize * mb; // in bytes client_config.tracing.pref_cache_size = cache_config.traces() as usize * 3 / 4 * mb; + // in bytes + client_config.state_cache_size = cache_config.state() as usize * mb; + // in bytes + client_config.jump_table_size = cache_config.jump_tables() as usize * mb; client_config.mode = mode; client_config.tracing.enabled = tracing;