Skip to content

Commit

Permalink
Importer Metrics (#1355)
Browse files Browse the repository at this point in the history
closes: #1163 

Adds the following metrics to prometheus:

- `importer_tx_count`
- `importer_block_height`
- `importer_latest_block_commit_timestamp_s`
- `importer_execute_and_commit_duration_s`

Additional related changes:
- New field in the metadata table to keep track of the total number of
transactions so that the metric is accurate across restarts
- Removed the compiler feature flag for "metrics" since it was sneaked
into all builds anyways with the addition of futures tracking.

This allows us to _finally_ start measuring the following:
- Transactions per second
- Delays in synchronization (ie. comparing the block heights between
nodes, and comparing the committed timestamps)
- Various measures of time related to block importing/execution on
synchronizing nodes (i.e. validation mode) such as avg, max, min etc

---------

Co-authored-by: Brandon Vrooman <brandon.vrooman@gmail.com>
  • Loading branch information
Voxelot and bvrooman authored Sep 8, 2023
1 parent c46c8f3 commit ca241e2
Show file tree
Hide file tree
Showing 33 changed files with 296 additions and 146 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ Description of the upcoming release here.

### Added

- [#1355](https://github.com/FuelLabs/fuel-core/pull/1355): Added new metrics related to block importing, such as tps, sync delays etc
- [#1324](https://github.com/FuelLabs/fuel-core/pull/1324): Added pyroscope profiling to fuel-core, intended to be used by a secondary docker image that has debug symbols enabled.
- [#1309](https://github.com/FuelLabs/fuel-core/pull/1309): Add documentation for running debug builds with CLion and Visual Studio Code.
- [#1308](https://github.com/FuelLabs/fuel-core/pull/1308): Add support for loading .env files when compiling with the `env` feature. This allows users to conveniently supply CLI arguments in a secure and IDE-agnostic way.
Expand All @@ -30,6 +31,7 @@ Description of the upcoming release here.
- [#1342](https://github.com/FuelLabs/fuel-core/pull/1342): Add error handling for P2P requests to return `None` to requester and log error

### Breaking
- [#1355](https://github.com/FuelLabs/fuel-core/pull/1355): Removed the `metrics` feature flag from the fuel-core crate, and metrics are now included by default.
- [#1318](https://github.com/FuelLabs/fuel-core/pull/1318): Removed the `--sync-max-header-batch-requests` CLI argument, and renamed `--sync-max-get-txns` to `--sync-block-stream-buffer-size` to better represent the current behavior in the import.
- [#1290](https://github.com/FuelLabs/fuel-core/pull/1290): Standardize CLI args to use `-` instead of `_`.
- [#1279](https://github.com/FuelLabs/fuel-core/pull/1279): Added a new CLI flag to enable the Relayer service `--enable-relayer`, and disabled the Relayer service by default. When supplying the `--enable-relayer` flag, the `--relayer` argument becomes mandatory, and omitting it is an error. Similarly, providing a `--relayer` argument without the `--enable-relayer` flag is an error. Lastly, providing the `--keypair` or `--network` arguments will also produce an error if the `--enable-p2p` flag is not set.
Expand Down
2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion benches/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ ctrlc = "3.2.3"
ed25519-dalek = "1.0" # TODO: upgrade to 2.0 when it's released, and remove rand below
ed25519-dalek_old_rand = { package = "rand", version = "0.7.3" }
ethnum = "1.3"
fuel-core = { path = "../crates/fuel-core", default-features = false, features = ["metrics", "rocksdb-production"] }
fuel-core = { path = "../crates/fuel-core", default-features = false, features = ["rocksdb-production"] }
fuel-core-services = { path = "./../crates/services" }
fuel-core-storage = { path = "./../crates/storage" }
fuel-core-sync = { path = "./../crates/services/sync", features = ["benchmarking"] }
Expand Down
2 changes: 1 addition & 1 deletion bin/e2e-test-client/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,6 @@ insta = { workspace = true }
tempfile = { workspace = true }

[features]
default = ["fuel-core?/default", "fuel-core?/metrics"]
default = ["fuel-core?/default"]
p2p = ["fuel-core?/p2p"]
dev-deps = ["fuel-core/test-helpers"]
5 changes: 2 additions & 3 deletions bin/fuel-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,12 +41,11 @@ url = { version = "2.2", optional = true }
test-case = { workspace = true }

[features]
default = ["env", "metrics", "relayer", "rocksdb"]
default = ["env", "relayer", "rocksdb"]
env = ["dep:dotenvy"]
metrics = ["fuel-core/metrics"]
p2p = ["fuel-core/p2p", "const_format"]
relayer = ["fuel-core/relayer", "dep:url", "dep:serde_json"]
rocksdb = ["fuel-core/rocksdb"]
rocksdb-production = ["fuel-core/rocksdb-production"]
# features to enable in production, but increase build times
production = ["env", "metrics", "relayer", "rocksdb-production", "p2p"]
production = ["env", "relayer", "rocksdb-production", "p2p"]
5 changes: 2 additions & 3 deletions crates/fuel-core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ fuel-core-consensus-module = { workspace = true }
fuel-core-database = { workspace = true }
fuel-core-executor = { workspace = true }
fuel-core-importer = { workspace = true }
fuel-core-metrics = { workspace = true, optional = true }
fuel-core-metrics = { workspace = true }
fuel-core-p2p = { workspace = true, optional = true }
fuel-core-poa = { workspace = true }
fuel-core-producer = { workspace = true }
Expand Down Expand Up @@ -73,8 +73,7 @@ test-case = { workspace = true }
test-strategy = { workspace = true }

[features]
default = ["metrics", "rocksdb"]
metrics = ["dep:fuel-core-metrics"]
default = ["rocksdb"]
p2p = ["dep:fuel-core-p2p", "dep:fuel-core-sync"]
relayer = ["dep:fuel-core-relayer"]
rocksdb = ["dep:rocksdb", "dep:tempfile"]
Expand Down
18 changes: 18 additions & 0 deletions crates/fuel-core/src/database/metadata.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,9 @@ use fuel_core_chain_config::ChainConfig;

pub(crate) const DB_VERSION_KEY: &[u8] = b"version";
pub(crate) const CHAIN_NAME_KEY: &[u8] = b"chain_name";
/// Tracks the total number of transactions written to the chain
/// It's useful for analyzing TPS or other metrics.
pub(crate) const TX_COUNT: &[u8] = b"total_tx_count";

/// Can be used to perform migrations in the future.
pub(crate) const DB_VERSION: u32 = 0x00;
Expand Down Expand Up @@ -45,4 +48,19 @@ impl Database {
pub fn get_chain_name(&self) -> DatabaseResult<Option<String>> {
self.get(CHAIN_NAME_KEY, Column::Metadata)
}

pub fn increase_tx_count(&self, new_txs: u64) -> DatabaseResult<u64> {
// TODO: how should tx count be initialized after regenesis?
let current_tx_count: u64 =
self.get(TX_COUNT, Column::Metadata)?.unwrap_or_default();
// Using saturating_add because this value doesn't significantly impact the correctness of execution.
let new_tx_count = current_tx_count.saturating_add(new_txs);
self.insert::<_, _, u64>(TX_COUNT, Column::Metadata, &new_tx_count)?;
Ok(new_tx_count)
}

pub fn get_tx_count(&self) -> DatabaseResult<u64> {
self.get(TX_COUNT, Column::Metadata)
.map(|v| v.unwrap_or_default())
}
}
1 change: 0 additions & 1 deletion crates/fuel-core/src/graphql_api.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@ use fuel_core_types::{
};
use std::net::SocketAddr;

#[cfg(feature = "metrics")]
pub(crate) mod metrics_extension;
pub mod ports;
pub mod service;
Expand Down
6 changes: 3 additions & 3 deletions crates/fuel-core/src/graphql_api/metrics_extension.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use async_graphql::{
Value,
Variables,
};
use fuel_core_metrics::graphql_metrics::GRAPHQL_METRICS;
use fuel_core_metrics::graphql_metrics::graphql_metrics;
use std::{
sync::{
Arc,
Expand Down Expand Up @@ -59,7 +59,7 @@ impl Extension for MetricsExtInner {
let start_time = Instant::now();
let result = next.run(ctx).await;
let seconds = start_time.elapsed().as_secs_f64();
GRAPHQL_METRICS.graphql_observe("request", seconds);
graphql_metrics().graphql_observe("request", seconds);

result
}
Expand Down Expand Up @@ -95,7 +95,7 @@ impl Extension for MetricsExtInner {
let elapsed = start_time.elapsed();

if let Some(field_name) = field_name {
GRAPHQL_METRICS.graphql_observe(field_name, elapsed.as_secs_f64());
graphql_metrics().graphql_observe(field_name, elapsed.as_secs_f64());
}

if elapsed > self.log_threshold_ms {
Expand Down
20 changes: 9 additions & 11 deletions crates/fuel-core/src/graphql_api/service.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,14 @@
#[cfg(feature = "metrics")]
use crate::graphql_api::metrics_extension::MetricsExtension;
use crate::{
fuel_core_graphql_api::ports::{
BlockProducerPort,
ConsensusModulePort,
DatabasePort,
TxPoolPort,
},
graphql_api::Config,
graphql_api::{
metrics_extension::MetricsExtension,
Config,
},
schema::{
CoreSchema,
CoreSchemaBuilder,
Expand Down Expand Up @@ -166,18 +167,15 @@ pub fn new_service(
) -> anyhow::Result<Service> {
let network_addr = config.addr;

let builder = schema
let schema = schema
.data(config)
.data(database)
.data(txpool)
.data(producer)
.data(consensus_module);
let builder = builder.extension(async_graphql::extensions::Tracing);

#[cfg(feature = "metrics")]
let builder = builder.extension(MetricsExtension::new(_log_threshold_ms));

let schema = builder.finish();
.data(consensus_module)
.extension(async_graphql::extensions::Tracing)
.extension(MetricsExtension::new(_log_threshold_ms))
.finish();

let router = Router::new()
.route("/playground", get(graphql_playground))
Expand Down
8 changes: 7 additions & 1 deletion crates/fuel-core/src/service/adapters/block_importer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,10 @@ impl BlockImporterAdapter {
executor: ExecutorAdapter,
verifier: VerifierAdapter,
) -> Self {
let importer = Importer::new(config, database, executor, verifier);
importer.init_metrics();
Self {
block_importer: Arc::new(Importer::new(config, database, executor, verifier)),
block_importer: Arc::new(importer),
}
}

Expand Down Expand Up @@ -113,6 +115,10 @@ impl ImporterDatabase for Database {
fn latest_block_height(&self) -> StorageResult<BlockHeight> {
self.latest_height()
}

fn increase_tx_count(&self, new_txs_count: u64) -> StorageResult<u64> {
self.increase_tx_count(new_txs_count).map_err(Into::into)
}
}

impl ExecutorDatabase for Database {
Expand Down
11 changes: 1 addition & 10 deletions crates/fuel-core/src/service/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,17 +3,8 @@ use axum::{
http::Request,
response::IntoResponse,
};
#[cfg(feature = "metrics")]
use fuel_core_metrics::response::encode_metrics_response;

pub async fn metrics(_req: Request<Body>) -> impl IntoResponse {
#[cfg(feature = "metrics")]
{
encode_metrics_response()
}
#[cfg(not(feature = "metrics"))]
{
use axum::http::StatusCode;
(StatusCode::NOT_FOUND, "Metrics collection disabled")
}
encode_metrics_response()
}
81 changes: 32 additions & 49 deletions crates/fuel-core/src/state/rocks_db.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@ use crate::{
WriteOperation,
},
};
#[cfg(feature = "metrics")]
use fuel_core_metrics::core_metrics::DATABASE_METRICS;
use fuel_core_metrics::core_metrics::database_metrics;
use fuel_core_storage::iter::{
BoxedIter,
IntoBoxedIter,
Expand Down Expand Up @@ -142,13 +141,12 @@ impl RocksDb {
item.map(|(key, value)| {
let value_as_vec = Vec::from(value);
let key_as_vec = Vec::from(key);
#[cfg(feature = "metrics")]
{
DATABASE_METRICS.read_meter.inc();
DATABASE_METRICS
.bytes_read
.observe((key_as_vec.len() + value_as_vec.len()) as f64);
}

database_metrics().read_meter.inc();
database_metrics()
.bytes_read
.observe((key_as_vec.len() + value_as_vec.len()) as f64);

(key_as_vec, Arc::new(value_as_vec))
})
.map_err(|e| DatabaseError::Other(e.into()))
Expand All @@ -158,18 +156,16 @@ impl RocksDb {

impl KeyValueStore for RocksDb {
fn get(&self, key: &[u8], column: Column) -> DatabaseResult<Option<Value>> {
#[cfg(feature = "metrics")]
DATABASE_METRICS.read_meter.inc();
database_metrics().read_meter.inc();
let value = self
.db
.get_cf(&self.cf(column), key)
.map_err(|e| DatabaseError::Other(e.into()));
#[cfg(feature = "metrics")]
{
if let Ok(Some(value)) = &value {
DATABASE_METRICS.bytes_read.observe(value.len() as f64);
}

if let Ok(Some(value)) = &value {
database_metrics().bytes_read.observe(value.len() as f64);
}

value.map(|value| value.map(Arc::new))
}

Expand All @@ -179,11 +175,9 @@ impl KeyValueStore for RocksDb {
column: Column,
value: Value,
) -> DatabaseResult<Option<Value>> {
#[cfg(feature = "metrics")]
{
DATABASE_METRICS.write_meter.inc();
DATABASE_METRICS.bytes_written.observe(value.len() as f64);
}
database_metrics().write_meter.inc();
database_metrics().bytes_written.observe(value.len() as f64);

// FIXME: This is a race condition. We should use a transaction.
let prev = self.get(key, column)?;
// FIXME: This is a race condition. We should use a transaction.
Expand Down Expand Up @@ -273,8 +267,7 @@ impl KeyValueStore for RocksDb {
}

fn size_of_value(&self, key: &[u8], column: Column) -> DatabaseResult<Option<usize>> {
#[cfg(feature = "metrics")]
DATABASE_METRICS.read_meter.inc();
database_metrics().read_meter.inc();

Ok(self
.db
Expand All @@ -289,8 +282,7 @@ impl KeyValueStore for RocksDb {
column: Column,
mut buf: &mut [u8],
) -> DatabaseResult<Option<usize>> {
#[cfg(feature = "metrics")]
DATABASE_METRICS.read_meter.inc();
database_metrics().read_meter.inc();

let r = self
.db
Expand All @@ -304,21 +296,16 @@ impl KeyValueStore for RocksDb {
})
.transpose()?;

#[cfg(feature = "metrics")]
{
if let Some(r) = &r {
DATABASE_METRICS.bytes_read.observe(*r as f64);
}
if let Some(r) = &r {
database_metrics().bytes_read.observe(*r as f64);
}

Ok(r)
}

fn write(&self, key: &[u8], column: Column, buf: &[u8]) -> DatabaseResult<usize> {
#[cfg(feature = "metrics")]
{
DATABASE_METRICS.write_meter.inc();
DATABASE_METRICS.bytes_written.observe(buf.len() as f64);
}
database_metrics().write_meter.inc();
database_metrics().bytes_written.observe(buf.len() as f64);

let r = buf.len();
self.db
Expand All @@ -329,21 +316,18 @@ impl KeyValueStore for RocksDb {
}

fn read_alloc(&self, key: &[u8], column: Column) -> DatabaseResult<Option<Value>> {
#[cfg(feature = "metrics")]
DATABASE_METRICS.read_meter.inc();
database_metrics().read_meter.inc();

let r = self
.db
.get_pinned_cf(&self.cf(column), key)
.map_err(|e| DatabaseError::Other(e.into()))?
.map(|value| value.to_vec());

#[cfg(feature = "metrics")]
{
if let Some(r) = &r {
DATABASE_METRICS.bytes_read.observe(r.len() as f64);
}
if let Some(r) = &r {
database_metrics().bytes_read.observe(r.len() as f64);
}

Ok(r.map(Arc::new))
}

Expand Down Expand Up @@ -389,13 +373,12 @@ impl BatchOperations for RocksDb {
}
}
}
#[cfg(feature = "metrics")]
{
DATABASE_METRICS.write_meter.inc();
DATABASE_METRICS
.bytes_written
.observe(batch.size_in_bytes() as f64);
}

database_metrics().write_meter.inc();
database_metrics()
.bytes_written
.observe(batch.size_in_bytes() as f64);

self.db
.write(batch)
.map_err(|e| DatabaseError::Other(e.into()))
Expand Down
Loading

0 comments on commit ca241e2

Please sign in to comment.