Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Parametrize max open files and col state cache size #6584

Merged
merged 29 commits into from
Apr 14, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 31 additions & 10 deletions core/store/src/db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,8 @@ pub struct RocksDBOptions {
free_space_threshold: bytesize::ByteSize,
warn_treshold: bytesize::ByteSize,
enable_statistics: bool,
max_open_files: i32,
col_state_cache_size: usize,
}

/// Sets [`RocksDBOptions::check_free_space_interval`] to 256,
Expand All @@ -486,6 +488,8 @@ impl Default for RocksDBOptions {
free_space_threshold: bytesize::ByteSize::mb(16),
warn_treshold: bytesize::ByteSize::mb(256),
enable_statistics: false,
max_open_files: 10 * 1000,
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
col_state_cache_size: 512 * 1024 * 1024,
}
}
}
Expand Down Expand Up @@ -531,8 +535,9 @@ impl RocksDBOptions {
/// Opens a read only database.
pub fn read_only<P: AsRef<std::path::Path>>(self, path: P) -> Result<RocksDB, DBError> {
use strum::IntoEnumIterator;
let options = self.rocksdb_options.unwrap_or_else(rocksdb_options);
let cf_with_opts = DBCol::iter().map(|col| (col_name(col), rocksdb_column_options(col)));
let options = self.rocksdb_options.unwrap_or_else(|| rocksdb_options(self.max_open_files));
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
let cf_with_opts = DBCol::iter()
.map(|col| (col_name(col), rocksdb_column_options(col, self.col_state_cache_size)));
let db = DB::open_cf_with_opts_for_read_only(&options, path, cf_with_opts, false)?;
let cfs = DBCol::iter()
.map(|col| db.cf_handle(&col_name(col)).unwrap() as *const ColumnFamily)
Expand All @@ -552,15 +557,21 @@ impl RocksDBOptions {
/// Opens the database in read/write mode.
pub fn read_write<P: AsRef<std::path::Path>>(self, path: P) -> Result<RocksDB, DBError> {
use strum::IntoEnumIterator;
let mut options = self.rocksdb_options.unwrap_or_else(rocksdb_options);
let mut options =
self.rocksdb_options.unwrap_or_else(|| rocksdb_options(self.max_open_files));
if self.enable_statistics {
options = enable_statistics(options);
}
let cf_names =
self.cf_names.unwrap_or_else(|| DBCol::iter().map(|col| col_name(col)).collect());
let cf_descriptors = self.cf_descriptors.unwrap_or_else(|| {
DBCol::iter()
.map(|col| ColumnFamilyDescriptor::new(col_name(col), rocksdb_column_options(col)))
.map(|col| {
ColumnFamilyDescriptor::new(
col_name(col),
rocksdb_column_options(col, self.col_state_cache_size),
)
})
.collect()
});
let db = DB::open_cf_descriptors(&options, path, cf_descriptors)?;
Expand Down Expand Up @@ -590,6 +601,16 @@ impl RocksDBOptions {
self.enable_statistics = true;
self
}

pub fn max_open_files(mut self, max_open_files: i32) -> Self {
self.max_open_files = max_open_files;
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
self
}

pub fn col_state_cache_size(mut self, col_state_cache_size: usize) -> Self {
self.col_state_cache_size = col_state_cache_size;
self
}
}

pub struct TestDB {
Expand Down Expand Up @@ -811,14 +832,14 @@ fn set_compression_options(opts: &mut Options) {
}

/// DB level options
fn rocksdb_options() -> Options {
fn rocksdb_options(max_open_files: i32) -> Options {
let mut opts = Options::default();

set_compression_options(&mut opts);
opts.create_missing_column_families(true);
opts.create_if_missing(true);
opts.set_use_fsync(false);
opts.set_max_open_files(512);
opts.set_max_open_files(max_open_files);
opts.set_keep_log_file_num(1);
opts.set_bytes_per_sync(bytesize::MIB);
opts.set_write_buffer_size(256 * bytesize::MIB as usize);
Expand Down Expand Up @@ -869,18 +890,18 @@ fn rocksdb_block_based_options(cache_size: usize) -> BlockBasedOptions {
}

// TODO(#5213) Use ByteSize package to represent sizes.
fn choose_cache_size(col: DBCol) -> usize {
fn choose_cache_size(col: DBCol, col_state_cache_size: usize) -> usize {
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
match col {
DBCol::ColState => 512 * 1024 * 1024,
DBCol::ColState => col_state_cache_size,
_ => 32 * 1024 * 1024,
}
}

fn rocksdb_column_options(col: DBCol) -> Options {
fn rocksdb_column_options(col: DBCol, col_state_cache_size: usize) -> Options {
let mut opts = Options::default();
set_compression_options(&mut opts);
opts.set_level_compaction_dynamic_level_bytes(true);
let cache_size = choose_cache_size(col);
let cache_size = choose_cache_size(col, col_state_cache_size);
opts.set_block_based_table_factory(&rocksdb_block_based_options(cache_size));

// Note that this function changes a lot of rustdb parameters including:
Expand Down
2 changes: 1 addition & 1 deletion core/store/src/db/v6_to_v7.rs
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ fn merge_refcounted_records_v6(result: &mut Vec<u8>, val: &[u8]) {
}

fn rocksdb_column_options_v6(col: DBCol) -> Options {
let mut opts = rocksdb_column_options(DBCol::ColDbVersion);
let mut opts = rocksdb_column_options(DBCol::ColDbVersion, 512 * 1024 * 1024);

if col == DBCol::ColState {
opts.set_merge_operator("refcount merge", refcount_merge_v6, refcount_merge_v6);
Expand Down
21 changes: 19 additions & 2 deletions core/store/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -289,21 +289,38 @@ pub fn read_with_cache<'a, T: BorshDeserialize + 'a>(
Ok(None)
}

#[derive(Default, Debug)]
#[derive(Debug)]
pub struct StoreConfig {
/// Attempted writes to the DB will fail. Doesn't require a `LOCK` file.
pub read_only: bool,
/// Re-export storage layer statistics as prometheus metrics.
/// Minor performance impact is expected.
pub enable_statistics: bool,
/// Maximum number of store files being opened simultaneously.
pub max_open_files: i32,
/// Cache size for ColState column.
pub col_state_cache_size: usize,
}

impl Default for StoreConfig {
fn default() -> StoreConfig {
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
StoreConfig {
read_only: false,
enable_statistics: false,
max_open_files: 10 * 1000,
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
col_state_cache_size: 512 * 1024 * 1024,
}
}
}

pub fn create_store(path: &Path) -> Store {
create_store_with_config(path, StoreConfig::default())
}

pub fn create_store_with_config(path: &Path, store_config: StoreConfig) -> Store {
let mut opts = RocksDBOptions::default();
let mut opts = RocksDBOptions::default()
.max_open_files(store_config.max_open_files)
.col_state_cache_size(store_config.col_state_cache_size);
if store_config.enable_statistics {
opts = opts.enable_statistics();
}
Expand Down
29 changes: 29 additions & 0 deletions nearcore/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -334,6 +334,33 @@ fn default_enable_rocksdb_statistics() -> bool {
false
}

#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct StoreConfig {
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
/// Maximum number of store files being opened simultaneously.
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
#[serde(default = "default_max_open_files")]
pub max_open_files: i32,
/// Cache size for ColState column.
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
#[serde(default = "default_col_state_cache_size")]
pub col_state_cache_size: usize,
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
}

impl Default for StoreConfig {
fn default() -> Self {
StoreConfig {
max_open_files: default_max_open_files(),
col_state_cache_size: default_col_state_cache_size(),
}
}
}

fn default_max_open_files() -> i32 {
10 * 1000
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
}

fn default_col_state_cache_size() -> usize {
512 * 1024 * 1024
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
}

#[derive(Serialize, Deserialize, Clone, Debug)]
pub struct Consensus {
/// Minimum number of peers to start syncing.
Expand Down Expand Up @@ -458,6 +485,7 @@ pub struct Config {
pub db_migration_snapshot_path: Option<PathBuf>,
#[serde(default = "default_enable_rocksdb_statistics")]
pub enable_rocksdb_statistics: bool,
pub store: StoreConfig,
}

impl Default for Config {
Expand Down Expand Up @@ -487,6 +515,7 @@ impl Default for Config {
db_migration_snapshot_path: None,
use_db_migration_snapshot: true,
enable_rocksdb_statistics: false,
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
store: StoreConfig::default(),
}
}
}
Expand Down
15 changes: 11 additions & 4 deletions nearcore/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -373,6 +373,8 @@ pub fn init_and_migrate_store(home_dir: &Path, near_config: &NearConfig) -> Stor
StoreConfig {
read_only: false,
enable_statistics: near_config.config.enable_rocksdb_statistics,
max_open_files: near_config.config.store.max_open_files,
col_state_cache_size: near_config.config.store.col_state_cache_size,
},
);
if !store_exists {
Expand Down Expand Up @@ -518,9 +520,9 @@ pub fn recompress_storage(home_dir: &Path, opts: RecompressOpts) -> anyhow::Resu
use strum::IntoEnumIterator;

let config_path = home_dir.join(config::CONFIG_FILENAME);
let archive = config::Config::from_file(&config_path)
.map_err(|err| anyhow::anyhow!("{}: {}", config_path.display(), err))?
.archive;
let config = config::Config::from_file(&config_path)
.map_err(|err| anyhow::anyhow!("{}: {}", config_path.display(), err))?;
let archive = config.archive;
let mut skip_columns = Vec::new();
if archive && !opts.keep_partial_chunks {
skip_columns.push(near_store::db::DBCol::ColPartialChunks);
Expand Down Expand Up @@ -570,7 +572,12 @@ pub fn recompress_storage(home_dir: &Path, opts: RecompressOpts) -> anyhow::Resu
info!(target: "recompress", src = %src_dir.display(), dest = %opts.dest_dir.display(), "Recompressing database");
let src_store = create_store_with_config(
&src_dir,
StoreConfig { read_only: true, enable_statistics: false },
StoreConfig {
read_only: true,
EdvardD marked this conversation as resolved.
Show resolved Hide resolved
enable_statistics: false,
max_open_files: config.store.max_open_files,
col_state_cache_size: config.store.col_state_cache_size,
},
);

let final_head_height = if skip_columns.contains(&DBCol::ColPartialChunks) {
Expand Down
7 changes: 6 additions & 1 deletion tools/state-viewer/src/cli.rs
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,12 @@ impl StateViewerSubCommand {
.unwrap_or_else(|e| panic!("Error loading config: {:#}", e));
let store = create_store_with_config(
&get_store_path(home_dir),
StoreConfig { read_only: true, enable_statistics: false },
StoreConfig {
read_only: true,
enable_statistics: false,
max_open_files: near_config.config.store.max_open_files,
col_state_cache_size: near_config.config.store.col_state_cache_size,
},
);
match self {
StateViewerSubCommand::Peers => peers(store),
Expand Down