From bac585f59eb43439b06c79c7898ade70d9a3de71 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Thu, 2 Nov 2023 01:57:02 +0000 Subject: [PATCH 1/2] Avoid storage caching in benchmarks - Disabled cache for blocks in the table - Removed usage of in memory transaction in benchmarks and replaced it with checkpoint - Added bloom filter - Added customization of the block cache --- benches/benches/vm.rs | 16 ++-- benches/benches/vm_set/blockchain.rs | 39 ++------- benches/src/bin/collect.rs | 24 +++-- crates/fuel-core/src/database.rs | 4 + crates/fuel-core/src/state.rs | 8 ++ crates/fuel-core/src/state/rocks_db.rs | 116 ++++++++++++++++++++++--- 6 files changed, 139 insertions(+), 68 deletions(-) diff --git a/benches/benches/vm.rs b/benches/benches/vm.rs index b067a3610cb..89732406b7c 100644 --- a/benches/benches/vm.rs +++ b/benches/benches/vm.rs @@ -13,7 +13,6 @@ use criterion::{ use contract::*; use fuel_core_benches::*; -use fuel_core_storage::transactional::Transaction; use fuel_core_types::fuel_asm::Instruction; use vm_set::*; @@ -33,14 +32,12 @@ where instruction, diff, } = &mut i; - let original_db = vm.as_mut().database_mut().clone(); - let mut db_txn = { - let db = vm.as_mut().database_mut(); - let db_txn = db.transaction(); - // update vm database in-place to use transaction - *db = db_txn.as_ref().clone(); - db_txn - }; + let checkpoint = vm + .as_mut() + .database_mut() + .checkpoint() + .expect("Should be able to create a checkpoint"); + let original_db = core::mem::replace(vm.as_mut().database_mut(), checkpoint); let final_time; loop { @@ -80,7 +77,6 @@ where } } - db_txn.commit().unwrap(); // restore original db *vm.as_mut().database_mut() = original_db; final_time diff --git a/benches/benches/vm_set/blockchain.rs b/benches/benches/vm_set/blockchain.rs index f0cb9d63658..6ec606ab13e 100644 --- a/benches/benches/vm_set/blockchain.rs +++ b/benches/benches/vm_set/blockchain.rs @@ -1,7 +1,5 @@ use std::{ - env, iter::successors, - path::PathBuf, sync::Arc, }; @@ -13,7 +11,10 @@ use criterion::{ }; use fuel_core::{ database::vm_database::VmDatabase, - state::rocks_db::RocksDb, + state::rocks_db::{ + RocksDb, + ShallowTempDir, + }, }; use fuel_core_benches::*; use fuel_core_types::{ @@ -37,27 +38,6 @@ use rand::{ SeedableRng, }; -/// Reimplementation of `tempdir::TempDir` that allows creating a new -/// instance without actually creating a new directory on the filesystem. -/// This is needed since rocksdb requires empty directory for checkpoints. -pub struct ShallowTempDir { - path: PathBuf, -} -impl ShallowTempDir { - pub fn new() -> Self { - let mut rng = rand::thread_rng(); - let mut path = env::temp_dir(); - path.push(format!("fuel-core-bench-rocksdb-{}", rng.next_u64())); - Self { path } - } -} -impl Drop for ShallowTempDir { - fn drop(&mut self) { - // Ignore errors - let _ = std::fs::remove_dir_all(&self.path); - } -} - pub struct BenchDb { db: RocksDb, /// Used for RAII cleanup. Contents of this directory are deleted on drop. @@ -70,7 +50,7 @@ impl BenchDb { fn new(contract: &ContractId) -> anyhow::Result { let tmp_dir = ShallowTempDir::new(); - let db = Arc::new(RocksDb::default_open(&tmp_dir.path, None).unwrap()); + let db = Arc::new(RocksDb::default_open(tmp_dir.path(), None).unwrap()); let mut database = Database::new(db.clone()); database.init_contract_state( @@ -106,14 +86,9 @@ impl BenchDb { /// Create a new separate database instance using a rocksdb checkpoint fn checkpoint(&self) -> VmDatabase { - let tmp_dir = ShallowTempDir::new(); - self.db - .checkpoint(&tmp_dir.path) + use fuel_core::state::TransactableStorage; + let database = TransactableStorage::checkpoint(&self.db) .expect("Unable to create checkpoint"); - let db = RocksDb::default_open(&tmp_dir.path, None).unwrap(); - let database = Database::new(Arc::new(db)).with_drop(Box::new(move || { - drop(tmp_dir); - })); VmDatabase::default_from_database(database) } } diff --git a/benches/src/bin/collect.rs b/benches/src/bin/collect.rs index c75fe7e760e..7d0a69b78ab 100644 --- a/benches/src/bin/collect.rs +++ b/benches/src/bin/collect.rs @@ -633,6 +633,7 @@ fn linear_regression(x_y: Vec<(u64, u64)>) -> f64 { fn dependent_cost(name: &String, x_y: Vec<(u64, u64)>) -> (u64, u64) { const NEAR_LINEAR: f64 = 0.1; + #[derive(PartialEq, Eq)] enum Type { /// The points have a linear property. The first point /// and the last points are almost the same(The difference is < 0.1). @@ -671,7 +672,7 @@ fn dependent_cost(name: &String, x_y: Vec<(u64, u64)>) -> (u64, u64) { let linear_regression = linear_regression(x_y.clone()); - let mut x_y = x_y + let x_y = x_y .into_iter() .map(|(x, y)| Point { x, y }) .collect::>(); @@ -704,7 +705,15 @@ fn dependent_cost(name: &String, x_y: Vec<(u64, u64)>) -> (u64, u64) { .unwrap(); (base, amount as u64) } - Type::Logarithm => { + Type::Logarithm | Type::Exp => { + if expected_type == Type::Exp { + println!( + "The {} is exponential. We don't support exponential chart. \ + The opcode should be limited with upper bound. \n {:?}", + name, x_y + ); + } + // The logarithm function slows down fast, and the point where it becomes more // linear is the base point. After this point we use linear strategy. let last = x_y.last().unwrap().amount(); @@ -726,17 +735,6 @@ fn dependent_cost(name: &String, x_y: Vec<(u64, u64)>) -> (u64, u64) { .unwrap_or(last); (base.y, amount as u64) } - Type::Exp => { - println!( - "The {} is exponential. We don't support exponential chart. \ - The opcode should be limited with upper bound. \n {:?}", - name, x_y - ); - - x_y.sort_unstable_by_key(|a| a.amount() as u64); - let base = x_y.last().unwrap(); - (base.y, 0) - } } } diff --git a/crates/fuel-core/src/database.rs b/crates/fuel-core/src/database.rs index dc645acd104..936407da82f 100644 --- a/crates/fuel-core/src/database.rs +++ b/crates/fuel-core/src/database.rs @@ -233,6 +233,10 @@ impl Database { self.into() } + pub fn checkpoint(&self) -> DatabaseResult { + self.data.checkpoint() + } + pub fn flush(self) -> DatabaseResult<()> { self.data.flush() } diff --git a/crates/fuel-core/src/state.rs b/crates/fuel-core/src/state.rs index 2b1cac37baf..066f27eb0f3 100644 --- a/crates/fuel-core/src/state.rs +++ b/crates/fuel-core/src/state.rs @@ -1,5 +1,7 @@ use crate::database::{ Column, + Database, + Error as DatabaseError, Result as DatabaseResult, }; use fuel_core_storage::iter::{ @@ -87,6 +89,12 @@ pub enum WriteOperation { } pub trait TransactableStorage: BatchOperations + Debug + Send + Sync { + fn checkpoint(&self) -> DatabaseResult { + Err(DatabaseError::Other(anyhow::anyhow!( + "Checkpoint is not supported" + ))) + } + fn flush(&self) -> DatabaseResult<()>; } diff --git a/crates/fuel-core/src/state/rocks_db.rs b/crates/fuel-core/src/state/rocks_db.rs index 09aebbba494..e02d3d78edf 100644 --- a/crates/fuel-core/src/state/rocks_db.rs +++ b/crates/fuel-core/src/state/rocks_db.rs @@ -2,6 +2,7 @@ use crate::{ database::{ convert_to_rocksdb_direction, Column, + Database, Error as DatabaseError, Result as DatabaseResult, }, @@ -20,8 +21,10 @@ use fuel_core_storage::iter::{ BoxedIter, IntoBoxedIter, }; +use rand::RngCore; use rocksdb::{ checkpoint::Checkpoint, + BlockBasedOptions, BoundColumnFamily, Cache, ColumnFamilyDescriptor, @@ -35,15 +38,56 @@ use rocksdb::{ WriteBatch, }; use std::{ + env, iter, - path::Path, + path::{ + Path, + PathBuf, + }, sync::Arc, }; type DB = DBWithThreadMode; + +/// Reimplementation of `tempdir::TempDir` that allows creating a new +/// instance without actually creating a new directory on the filesystem. +/// This is needed since rocksdb requires empty directory for checkpoints. +pub struct ShallowTempDir { + path: PathBuf, +} + +impl Default for ShallowTempDir { + fn default() -> Self { + Self::new() + } +} + +impl ShallowTempDir { + /// Creates a random directory. + pub fn new() -> Self { + let mut rng = rand::thread_rng(); + let mut path = env::temp_dir(); + path.push(format!("fuel-core-shallow-{}", rng.next_u64())); + Self { path } + } + + /// Returns the path of teh directory. + pub fn path(&self) -> &PathBuf { + &self.path + } +} + +impl Drop for ShallowTempDir { + fn drop(&mut self) { + // Ignore errors + let _ = std::fs::remove_dir_all(&self.path); + } +} + #[derive(Debug)] pub struct RocksDb { db: DB, + capacity: Option, } impl RocksDb { @@ -63,16 +107,43 @@ impl RocksDb { columns: Vec, capacity: Option, ) -> DatabaseResult { - let cf_descriptors = columns - .clone() - .into_iter() - .map(|i| ColumnFamilyDescriptor::new(RocksDb::col_name(i), Self::cf_opts(i))); + let mut block_opts = BlockBasedOptions::default(); + // See https://github.com/facebook/rocksdb/blob/a1523efcdf2f0e8133b9a9f6e170a0dad49f928f/include/rocksdb/table.h#L246-L271 for details on what the format versions are/do. + block_opts.set_format_version(5); + + if let Some(capacity) = capacity { + // Set cache size 1/3 of the capacity as recommended by + // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size + let block_cache_size = capacity / 3; + let cache = Cache::new_lru_cache(block_cache_size); + block_opts.set_block_cache(&cache); + // "index and filter blocks will be stored in block cache, together with all other data blocks." + // See: https://github.com/facebook/rocksdb/wiki/Memory-usage-in-RocksDB#indexes-and-filter-blocks + block_opts.set_cache_index_and_filter_blocks(true); + // Don't evict L0 filter/index blocks from the cache + block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true); + } else { + block_opts.disable_cache(); + } + block_opts.set_bloom_filter(10.0, true); + + let cf_descriptors = columns.clone().into_iter().map(|i| { + ColumnFamilyDescriptor::new( + RocksDb::col_name(i), + Self::cf_opts(i, &block_opts), + ) + }); let mut opts = Options::default(); opts.create_if_missing(true); opts.set_compression_type(DBCompressionType::Lz4); if let Some(capacity) = capacity { - let cache = Cache::new_lru_cache(capacity); + // Set cache size 1/3 of the capacity. Another 1/3 is + // used by block cache and the last 1 / 3 remains for other purposes: + // + // https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning#block-cache-size + let row_cache_size = capacity / 3; + let cache = Cache::new_lru_cache(row_cache_size); opts.set_row_cache(&cache); } @@ -82,7 +153,7 @@ impl RocksDb { match DB::open_cf(&opts, &path, &[] as &[&str]) { Ok(db) => { for i in columns { - let opts = Self::cf_opts(i); + let opts = Self::cf_opts(i, &block_opts); db.create_cf(RocksDb::col_name(i), &opts) .map_err(|e| DatabaseError::Other(e.into()))?; } @@ -96,7 +167,7 @@ impl RocksDb { let cf_descriptors = columns.clone().into_iter().map(|i| { ColumnFamilyDescriptor::new( RocksDb::col_name(i), - Self::cf_opts(i), + Self::cf_opts(i, &block_opts), ) }); DB::open_cf_descriptors(&opts, &path, cf_descriptors) @@ -106,12 +177,19 @@ impl RocksDb { ok => ok, } .map_err(|e| DatabaseError::Other(e.into()))?; - let rocks_db = RocksDb { db }; + let rocks_db = RocksDb { db, capacity }; Ok(rocks_db) } - pub fn checkpoint>(&self, path: P) -> Result<(), rocksdb::Error> { - Checkpoint::new(&self.db)?.create_checkpoint(path) + pub fn checkpoint>(&self, path: P) -> DatabaseResult<()> { + Checkpoint::new(&self.db) + .and_then(|checkpoint| checkpoint.create_checkpoint(path)) + .map_err(|e| { + DatabaseError::Other(anyhow::anyhow!( + "Failed to create a checkpoint: {}", + e + )) + }) } fn cf(&self, column: Column) -> Arc { @@ -121,13 +199,14 @@ impl RocksDb { } fn col_name(column: Column) -> String { - format!("column-{}", column.as_usize()) + format!("col-{}", column.as_usize()) } - fn cf_opts(column: Column) -> Options { + fn cf_opts(column: Column, block_opts: &BlockBasedOptions) -> Options { let mut opts = Options::default(); opts.create_if_missing(true); opts.set_compression_type(DBCompressionType::Lz4); + opts.set_block_based_table_factory(block_opts); // All double-keys should be configured here match column { @@ -402,6 +481,17 @@ impl BatchOperations for RocksDb { } impl TransactableStorage for RocksDb { + fn checkpoint(&self) -> DatabaseResult { + let tmp_dir = ShallowTempDir::new(); + self.checkpoint(&tmp_dir.path)?; + let db = RocksDb::default_open(&tmp_dir.path, self.capacity)?; + let database = Database::new(Arc::new(db)).with_drop(Box::new(move || { + drop(tmp_dir); + })); + + Ok(database) + } + fn flush(&self) -> DatabaseResult<()> { self.db .flush_wal(true) From 627adf0d884956f68e22e933c2ddd7cb5d2b5283 Mon Sep 17 00:00:00 2001 From: xgreenx Date: Thu, 2 Nov 2023 02:02:55 +0000 Subject: [PATCH 2/2] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index de57f089f32..908294ad1d3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). Description of the upcoming release here. ### Added + +- [#1469](https://github.com/FuelLabs/fuel-core/pull/1469): Added support of bloom filter for RocksDB tables and increased the block cache. - [#1642](https://github.com/FuelLabs/fuel-core/pull/1462): Added benchmark to measure the performance of contract state and contract ID calculation; use for gas costing. - [#1465](https://github.com/FuelLabs/fuel-core/pull/1465): Improvements for keygen cli and crates - [#1457](https://github.com/FuelLabs/fuel-core/pull/1457): Fixing incorrect measurement for fast(µs) opcodes. @@ -44,6 +46,7 @@ Description of the upcoming release here. ### Changed +- [#1469](https://github.com/FuelLabs/fuel-core/pull/1469): Replaced usage of `MemoryTransactionView` by `Checkpoint` database in the benchmarks. - [#1466](https://github.com/FuelLabs/fuel-core/pull/1466): Handling overflows during arithmetic operations. - [#1468](https://github.com/FuelLabs/fuel-core/pull/1468): Bumped version of the `fuel-vm` to `v0.40.0`. It brings some breaking changes into consensus parameters API because of changes in the underlying types. - [#1460](https://github.com/FuelLabs/fuel-core/pull/1460): Change tracking branch from main to master for releasy tests.