diff --git a/Cargo.toml b/Cargo.toml index 845f28e317..52ebf9956f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -190,7 +190,7 @@ wasmtime = { version = "7", default-features = false, features = ["cranelift"] } # We use the "ondemand" feature to allow connecting after the start, # and reconnecting, from the tracy client to the database. # TODO(George): Need to be able to remove "broadcast" in some build configurations. -tracing-tracy = { version = "0.10.2", features = [ +tracing-tracy = { version = "0.10.3", features = [ "enable", "system-tracing", "context-switch-tracing", diff --git a/crates/bench/benches/generic.rs b/crates/bench/benches/generic.rs index ee32d7273c..c71e7a0aa2 100644 --- a/crates/bench/benches/generic.rs +++ b/crates/bench/benches/generic.rs @@ -3,6 +3,7 @@ use criterion::{ measurement::{Measurement, WallTime}, Bencher, BenchmarkGroup, Criterion, }; +use spacetimedb::db::datastore::traits::TableSchema; use spacetimedb_bench::{ database::BenchDatabase, schemas::{create_sequential, BenchTable, IndexStrategy, Location, Person, RandomTable, BENCH_PKEY_INDEX}, @@ -69,13 +70,16 @@ fn table_suite(g: &mut Group, db for (index_strategy, table_id, table_params) in &tables { if *index_strategy == IndexStrategy::Unique { iterate::(g, table_params, db, table_id, 100)?; + sql_select::(g, table_params, db, table_id, 100)?; if table_params.contains("person") { // perform "find" benchmarks + sql_find::(g, db, table_id, index_strategy, BENCH_PKEY_INDEX, 1000, 100)?; find::(g, db, table_id, index_strategy, BENCH_PKEY_INDEX, 1000, 100)?; } } else { // perform "filter" benchmarks + sql_where::(g, db, table_id, index_strategy, 1, 1000, 100)?; filter::(g, db, table_id, index_strategy, 1, 1000, 100)?; } } @@ -122,7 +126,7 @@ fn bench_harness< #[inline(never)] fn empty(g: &mut Group, db: &mut DB) -> ResultBench<()> { - let id = format!("empty"); + let id = "empty".to_string(); g.bench_function(&id, |b| { bench_harness( b, @@ -159,11 +163,11 @@ fn insert_1( let mut data = data.clone(); db.clear_table(table_id)?; let row = data.pop().unwrap(); - db.insert_bulk(&table_id, data)?; + db.insert_bulk(table_id, data)?; Ok(row) }, |db, row| { - db.insert(&table_id, row)?; + db.insert(table_id, row)?; Ok(()) }, ) @@ -196,12 +200,46 @@ fn insert_bulk( db.clear_table(table_id)?; let to_insert = data.split_off(load as usize); if !data.is_empty() { - db.insert_bulk(&table_id, data)?; + db.insert_bulk(table_id, data)?; } Ok(to_insert) }, |db, to_insert| { - db.insert_bulk(&table_id, to_insert)?; + db.insert_bulk(table_id, to_insert)?; + Ok(()) + }, + ) + }); + db.clear_table(table_id)?; + Ok(()) +} + +#[inline(never)] +fn sql_select( + g: &mut Group, + table_params: &str, + db: &mut DB, + table_id: &DB::TableId, + count: u32, +) -> ResultBench<()> { + let id = format!("sql_select/{table_params}/count={count}"); + let data = create_sequential::(0xdeadbeef, count, 1000); + + db.insert_bulk(table_id, data)?; + + // Each iteration performs a single transaction, + // though it iterates across many rows. + g.throughput(criterion::Throughput::Elements(1)); + + let table = db.get_table::(table_id)?; + + g.bench_function(&id, |b| { + bench_harness( + b, + db, + |_| Ok(()), + |db, _| { + db.sql_select(&table)?; Ok(()) }, ) @@ -244,15 +282,17 @@ fn iterate( /// Implements both "filter" and "find" benchmarks. #[inline(never)] -fn filter( +#[allow(clippy::too_many_arguments)] +fn _filter_setup( g: &mut Group, db: &mut DB, + bench_name: &str, table_id: &DB::TableId, index_strategy: &IndexStrategy, column_index: u32, load: u32, buckets: u32, -) -> ResultBench<()> { +) -> ResultBench<(String, TableSchema, Vec)> { let filter_column_type = match &T::product_type().elements[column_index as usize].algebraic_type { AlgebraicType::Builtin(BuiltinType::String) => "string", AlgebraicType::Builtin(BuiltinType::U32) => "u32", @@ -265,15 +305,33 @@ fn filter( IndexStrategy::NonUnique => "non_indexed", _ => unimplemented!(), }; - let id = format!("filter/{filter_column_type}/{indexed}/load={load}/count={mean_result_count}"); + let id = format!("{bench_name}/{filter_column_type}/{indexed}/load={load}/count={mean_result_count}"); let data = create_sequential::(0xdeadbeef, load, buckets as u64); - db.insert_bulk(&table_id, data.clone())?; + db.insert_bulk(table_id, data.clone())?; // Each iteration performs a single transaction. g.throughput(criterion::Throughput::Elements(1)); + let table = db.get_table::(table_id)?; + + Ok((id, table, data)) +} + +#[inline(never)] +fn filter( + g: &mut Group, + db: &mut DB, + table_id: &DB::TableId, + index_strategy: &IndexStrategy, + column_index: u32, + load: u32, + buckets: u32, +) -> ResultBench<()> { + let (id, table, data) = + _filter_setup::(g, db, "filter", table_id, index_strategy, column_index, load, buckets)?; + // We loop through all buckets found in the sample data. // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. // Note that all databases have EXACTLY the same sample data. @@ -290,7 +348,7 @@ fn filter( Ok(value) }, |db, value| { - db.filter::(&table_id, column_index, value)?; + db.sql_where::(&table, column_index, value)?; Ok(()) }, ) @@ -299,31 +357,132 @@ fn filter( Ok(()) } -/// Implements both "filter" and "find" benchmarks. #[inline(never)] -fn find( +fn sql_where( g: &mut Group, db: &mut DB, table_id: &DB::TableId, index_strategy: &IndexStrategy, - column_id: u32, + column_index: u32, load: u32, buckets: u32, ) -> ResultBench<()> { + let (id, table, data) = _filter_setup::( + g, + db, + "sql_where", + table_id, + index_strategy, + column_index, + load, + buckets, + )?; + + // We loop through all buckets found in the sample data. + // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. + // Note that all databases have EXACTLY the same sample data. + let mut i = 0; + + g.bench_function(&id, |b| { + bench_harness( + b, + db, + |_| { + // pick something to look for + let value = data[i].clone().into_product_value().elements[column_index as usize].clone(); + i = (i + 1) % load as usize; + Ok(value) + }, + |db, value| { + db.sql_where::(&table, column_index, value)?; + Ok(()) + }, + ) + }); + db.clear_table(table_id)?; + Ok(()) +} + +/// Implements both "filter" and "find" benchmarks. +#[inline(never)] +fn _find_setup( + g: &mut Group, + db: &mut DB, + bench_name: &str, + table_id: &DB::TableId, + index_strategy: &IndexStrategy, + load: u32, + buckets: u32, +) -> ResultBench<(String, TableSchema, Vec)> { assert_eq!( *index_strategy, IndexStrategy::Unique, "find benchmarks require unique key" ); - let id = format!("find_unique/u32/load={load}"); + let id = format!("{bench_name}/u32/load={load}"); let data = create_sequential::(0xdeadbeef, load, buckets as u64); - db.insert_bulk(&table_id, data.clone())?; + db.insert_bulk(table_id, data.clone())?; // Each iteration performs a single transaction. g.throughput(criterion::Throughput::Elements(1)); + let table = db.get_table::(table_id)?; + + Ok((id, table, data)) +} + +#[inline(never)] +fn find( + g: &mut Group, + db: &mut DB, + table_id: &DB::TableId, + index_strategy: &IndexStrategy, + column_id: u32, + load: u32, + buckets: u32, +) -> ResultBench<()> { + let (id, table, data) = _find_setup::(g, db, "find_unique", table_id, index_strategy, load, buckets)?; + + // We loop through all buckets found in the sample data. + // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. + // Note that all benchmarks use exactly the same sample data. + let mut i = 0; + + g.bench_function(&id, |b| { + bench_harness( + b, + db, + |_| { + let value = data[i].clone().into_product_value().elements[column_id as usize].clone(); + i = (i + 1) % load as usize; + Ok(value) + }, + |db, value| { + db.filter::(&table, column_id, value)?; + Ok(()) + }, + ) + }); + db.clear_table(table_id)?; + Ok(()) +} + +/// Implements both "filter" and "find" benchmarks. +#[inline(never)] +fn sql_find( + g: &mut Group, + db: &mut DB, + table_id: &DB::TableId, + index_strategy: &IndexStrategy, + column_id: u32, + load: u32, + buckets: u32, +) -> ResultBench<()> { + let (id, table, data) = + _find_setup::(g, db, "sql_where_find_unique", table_id, index_strategy, load, buckets)?; + // We loop through all buckets found in the sample data. // This mildly increases variance on the benchmark, but makes "mean_result_count" more accurate. // Note that all benchmarks use exactly the same sample data. @@ -339,7 +498,7 @@ fn find( Ok(value) }, |db, value| { - db.filter::(&table_id, column_id, value)?; + db.sql_where::(&table, column_id, value)?; Ok(()) }, ) diff --git a/crates/bench/src/database.rs b/crates/bench/src/database.rs index 1cb0829c05..51b5cf0296 100644 --- a/crates/bench/src/database.rs +++ b/crates/bench/src/database.rs @@ -1,3 +1,4 @@ +use spacetimedb::db::datastore::traits::TableSchema; use spacetimedb_lib::AlgebraicValue; use crate::schemas::{BenchTable, IndexStrategy}; @@ -19,6 +20,9 @@ pub trait BenchDatabase: Sized { fn create_table(&mut self, table_style: IndexStrategy) -> ResultBench; + /// Return table metadata so we can remove this from the hot path + fn get_table(&mut self, table_id: &Self::TableId) -> ResultBench; + /// Should not drop the table, only delete all the rows. fn clear_table(&mut self, table_id: &Self::TableId) -> ResultBench<()>; @@ -41,7 +45,20 @@ pub trait BenchDatabase: Sized { /// Filter the table on the specified column index for the specified value. fn filter( &mut self, - table_id: &Self::TableId, + table: &TableSchema, + column_index: u32, + value: AlgebraicValue, + ) -> ResultBench<()>; + + /// Perform a `SELECT * FROM table` + /// Note: this can be non-generic because none of the implementations use the relevant generic argument. + fn sql_select(&mut self, table: &TableSchema) -> ResultBench<()>; + + /// Perform a `SELECT * FROM table WHERE column = value` + /// Note: this can be non-generic because none of the implementations use the relevant generic argument. + fn sql_where( + &mut self, + table: &TableSchema, column_index: u32, value: AlgebraicValue, ) -> ResultBench<()>; diff --git a/crates/bench/src/lib.rs b/crates/bench/src/lib.rs index 25f5d1b708..c55b51ea3e 100644 --- a/crates/bench/src/lib.rs +++ b/crates/bench/src/lib.rs @@ -1,3 +1,7 @@ +use crate::schemas::BenchTable; +use spacetimedb::db::datastore::traits::{ColumnSchema, TableSchema}; +use spacetimedb_lib::auth::{StAccess, StTableType}; + pub mod database; pub mod schemas; pub mod spacetime_module; @@ -6,6 +10,30 @@ pub mod sqlite; pub type ResultBench = Result; +pub(crate) fn create_schema(table_name: &str) -> TableSchema { + let columns = T::product_type() + .elements + .into_iter() + .enumerate() + .map(|(pos, col)| ColumnSchema { + table_id: 0, + col_id: pos as u32, + col_name: col.name.unwrap(), + col_type: col.algebraic_type, + is_autoinc: false, + }); + + TableSchema { + table_id: 0, + table_name: table_name.to_string(), + indexes: vec![], + columns: columns.collect(), + constraints: vec![], + table_type: StTableType::System, + table_access: StAccess::Public, + } +} + #[cfg(test)] mod tests { use crate::{ diff --git a/crates/bench/src/spacetime_module.rs b/crates/bench/src/spacetime_module.rs index 7e57d7da74..a68a2d57ea 100644 --- a/crates/bench/src/spacetime_module.rs +++ b/crates/bench/src/spacetime_module.rs @@ -1,9 +1,12 @@ +use spacetimedb::db::datastore::traits::TableSchema; use spacetimedb::db::{Config, FsyncPolicy, Storage}; +use spacetimedb_lib::sats::BuiltinValue; use spacetimedb_lib::{sats::ArrayValue, AlgebraicValue, ProductValue}; use spacetimedb_testing::modules::{start_runtime, CompiledModule, ModuleHandle}; use tokio::runtime::Runtime; use crate::{ + create_schema, database::BenchDatabase, schemas::{snake_case_table_name, table_name, BenchTable}, ResultBench, @@ -82,6 +85,10 @@ impl BenchDatabase for SpacetimeModule { }) } + fn get_table(&mut self, table_id: &Self::TableId) -> ResultBench { + Ok(create_schema::(&table_id.pascal_case)) + } + fn clear_table(&mut self, table_id: &Self::TableId) -> ResultBench<()> { let SpacetimeModule { runtime, module } = self; let module = module.as_mut().unwrap(); @@ -173,16 +180,16 @@ impl BenchDatabase for SpacetimeModule { fn filter( &mut self, - table_id: &Self::TableId, + table: &TableSchema, column_index: u32, value: AlgebraicValue, ) -> ResultBench<()> { let SpacetimeModule { runtime, module } = self; let module = module.as_mut().unwrap(); - let product_type = T::product_type(); - let column_name = product_type.elements[column_index as usize].name.as_ref().unwrap(); - let reducer_name = format!("filter_{}_by_{}", table_id.snake_case, column_name); + //let product_type = T::product_type(); + let column_name = &table.columns[column_index as usize].col_name; + let reducer_name = format!("filter_{}_by_{}", table.table_name, column_name); runtime.block_on(async move { module @@ -191,6 +198,46 @@ impl BenchDatabase for SpacetimeModule { Ok(()) }) } + + fn sql_select(&mut self, table: &TableSchema) -> ResultBench<()> { + let SpacetimeModule { runtime, module } = self; + let module = module.as_mut().unwrap(); + let sql = format!("SELECT * FROM {}", table.table_name); + let id = module.client.id.identity; + runtime.block_on(async move { + module.client.module.one_off_query(id, sql).await?; + Ok(()) + }) + } + + fn sql_where( + &mut self, + table: &TableSchema, + column_index: u32, + value: AlgebraicValue, + ) -> ResultBench<()> { + let column = &table.columns[column_index as usize].col_name; + + let value = match value.as_builtin().unwrap() { + BuiltinValue::U32(x) => x.to_string(), + BuiltinValue::U64(x) => x.to_string(), + BuiltinValue::String(x) => format!("'{}'", x), + _ => { + unreachable!() + } + }; + + let SpacetimeModule { runtime, module } = self; + let module = module.as_mut().unwrap(); + let table_name = &table.table_name; + let sql_query = format!("SELECT * FROM {table_name} WHERE {column} = {value}"); + + let id = module.client.id.identity; + runtime.block_on(async move { + module.client.module.one_off_query(id, sql_query).await?; + Ok(()) + }) + } } #[derive(Clone)] diff --git a/crates/bench/src/spacetime_raw.rs b/crates/bench/src/spacetime_raw.rs index de17163101..79325c3627 100644 --- a/crates/bench/src/spacetime_raw.rs +++ b/crates/bench/src/spacetime_raw.rs @@ -3,8 +3,12 @@ use crate::{ schemas::{table_name, BenchTable, IndexStrategy}, ResultBench, }; -use spacetimedb::db::datastore::traits::{IndexDef, TableDef}; +use spacetimedb::db::datastore::traits::{IndexDef, TableDef, TableSchema}; use spacetimedb::db::relational_db::{open_db, RelationalDB}; +use spacetimedb::error::DBError; +use spacetimedb::sql::execute::run; +use spacetimedb_lib::identity::AuthCtx; +use spacetimedb_lib::sats::BuiltinValue; use spacetimedb_lib::AlgebraicValue; use std::hint::black_box; use tempdir::TempDir; @@ -60,6 +64,16 @@ impl BenchDatabase for SpacetimeRaw { }) } + fn get_table(&mut self, table_id: &Self::TableId) -> ResultBench { + let schema = self.db.with_auto_commit(|tx| { + //TODO: For some reason this not retrieve the table name, wait for the PR that fix the bootstraping issues + let mut t = self.db.schema_for_table(tx, *table_id)?; + t.table_name = self.db.table_name_from_id(tx, *table_id)?.unwrap(); + Ok::(t) + })?; + Ok(schema) + } + fn clear_table(&mut self, table_id: &Self::TableId) -> ResultBench<()> { self.db.with_auto_commit(|tx| { self.db.clear_table(tx, *table_id)?; @@ -103,15 +117,53 @@ impl BenchDatabase for SpacetimeRaw { fn filter( &mut self, - table_id: &Self::TableId, + table: &TableSchema, column_index: u32, value: AlgebraicValue, ) -> ResultBench<()> { self.db.with_auto_commit(|tx| { - for row in self.db.iter_by_col_eq(tx, *table_id, column_index, value)? { + for row in self.db.iter_by_col_eq(tx, table.table_id, column_index, value)? { black_box(row); } Ok(()) }) } + + fn sql_select(&mut self, table: &TableSchema) -> ResultBench<()> { + self.db.with_auto_commit(|tx| { + let sql_query = format!("SELECT * FROM {}", table.table_name); + + run(&self.db, tx, &sql_query, AuthCtx::for_testing())?; + + Ok(()) + }) + } + + fn sql_where( + &mut self, + table: &TableSchema, + column_index: u32, + value: AlgebraicValue, + ) -> ResultBench<()> { + self.db.with_auto_commit(|tx| { + let column = &table.columns[column_index as usize].col_name; + + let table_name = &table.table_name; + + let value = match value.as_builtin().unwrap() { + BuiltinValue::U32(x) => x.to_string(), + BuiltinValue::U64(x) => x.to_string(), + BuiltinValue::String(x) => format!("'{}'", x), + _ => { + unreachable!() + } + }; + + let sql_query = format!("SELECT * FROM {table_name} WHERE {column} = {value}"); + + run(&self.db, tx, &sql_query, AuthCtx::for_testing())?; + + Ok(()) + }) + } } diff --git a/crates/bench/src/sqlite.rs b/crates/bench/src/sqlite.rs index 7812810ae0..4f9bd62701 100644 --- a/crates/bench/src/sqlite.rs +++ b/crates/bench/src/sqlite.rs @@ -1,4 +1,5 @@ use crate::{ + create_schema, database::BenchDatabase, schemas::{table_name, BenchTable, IndexStrategy}, ResultBench, @@ -6,6 +7,7 @@ use crate::{ use ahash::AHashMap; use lazy_static::lazy_static; use rusqlite::Connection; +use spacetimedb::db::datastore::traits::TableSchema; use spacetimedb_lib::{ sats::{self}, AlgebraicType, AlgebraicValue, ProductType, @@ -29,6 +31,8 @@ impl BenchDatabase for SQLite { "sqlite" } + type TableId = String; + fn build(in_memory: bool, fsync: bool) -> ResultBench where Self: Sized, @@ -53,8 +57,6 @@ impl BenchDatabase for SQLite { }) } - type TableId = String; - /// We derive the SQLite schema from the AlgebraicType of the table. fn create_table( &mut self, @@ -98,6 +100,10 @@ impl BenchDatabase for SQLite { Ok(table_name) } + fn get_table(&mut self, table_id: &Self::TableId) -> ResultBench { + Ok(create_schema::(table_id)) + } + fn clear_table(&mut self, table_id: &Self::TableId) -> ResultBench<()> { self.db.execute_batch(&format!("DELETE FROM {table_id};"))?; Ok(()) @@ -171,17 +177,13 @@ impl BenchDatabase for SQLite { fn filter( &mut self, - table_id: &Self::TableId, + table: &TableSchema, column_index: u32, value: AlgebraicValue, ) -> ResultBench<()> { - let statement = memo_query(BenchName::Filter, table_id, || { - let column = T::product_type() - .elements - .swap_remove(column_index as usize) - .name - .unwrap(); - format!("SELECT * FROM {table_id} WHERE {column} = ?") + let statement = memo_query(BenchName::Filter, &table.table_name, || { + let column = &table.columns[column_index as usize].col_name; + format!("SELECT * FROM {} WHERE {column} = ?", table.table_name) }); let mut begin = self.db.prepare_cached(BEGIN_TRANSACTION)?; @@ -214,6 +216,19 @@ impl BenchDatabase for SQLite { commit.execute(())?; Ok(()) } + + fn sql_select(&mut self, table: &TableSchema) -> ResultBench<()> { + self.iterate(&table.table_name) + } + + fn sql_where( + &mut self, + table: &TableSchema, + column_index: u32, + value: AlgebraicValue, + ) -> ResultBench<()> { + self.filter::(table, column_index, value) + } } /// Note: The rusqlite transaction API just invokes these statements, diff --git a/crates/core/src/db/datastore/traits.rs b/crates/core/src/db/datastore/traits.rs index d27efa2279..44fbd26313 100644 --- a/crates/core/src/db/datastore/traits.rs +++ b/crates/core/src/db/datastore/traits.rs @@ -119,11 +119,11 @@ impl From for IndexDef { #[derive(Debug, Clone, PartialEq, Eq)] pub struct ColumnSchema { - pub(crate) table_id: u32, - pub(crate) col_id: u32, - pub(crate) col_name: String, - pub(crate) col_type: AlgebraicType, - pub(crate) is_autoinc: bool, + pub table_id: u32, + pub col_id: u32, + pub col_name: String, + pub col_type: AlgebraicType, + pub is_autoinc: bool, } impl From<&ColumnSchema> for spacetimedb_lib::table::ColumnDef { @@ -198,13 +198,13 @@ pub struct ConstraintDef { #[derive(Debug, Clone, PartialEq, Eq)] pub struct TableSchema { - pub(crate) table_id: u32, - pub(crate) table_name: String, - pub(crate) columns: Vec, - pub(crate) indexes: Vec, - pub(crate) constraints: Vec, - pub(crate) table_type: StTableType, - pub(crate) table_access: StAccess, + pub table_id: u32, + pub table_name: String, + pub columns: Vec, + pub indexes: Vec, + pub constraints: Vec, + pub table_type: StTableType, + pub table_access: StAccess, } impl TableSchema { diff --git a/crates/core/src/sql/compiler.rs b/crates/core/src/sql/compiler.rs index 87b358ff84..e9e441cdb6 100644 --- a/crates/core/src/sql/compiler.rs +++ b/crates/core/src/sql/compiler.rs @@ -1,5 +1,6 @@ use nonempty::NonEmpty; use std::collections::HashMap; +use tracing::info; use crate::db::datastore::locking_tx_datastore::MutTxId; use crate::db::datastore::traits::{IndexSchema, TableSchema}; @@ -16,8 +17,9 @@ use spacetimedb_vm::expr::{ColumnOp, CrudExpr, DbType, Expr, IndexJoin, JoinExpr use spacetimedb_vm::operator::OpCmp; /// Compile the `SQL` expression into a `ast` -#[tracing::instrument(skip(db, tx))] +#[tracing::instrument(skip_all)] pub fn compile_sql(db: &RelationalDB, tx: &MutTxId, sql_text: &str) -> Result, DBError> { + info!(sql = sql_text); let ast = compile_to_ast(db, tx, sql_text)?; let mut results = Vec::with_capacity(ast.len()); diff --git a/crates/core/src/sql/execute.rs b/crates/core/src/sql/execute.rs index fbaaa9e391..32e2a59af5 100644 --- a/crates/core/src/sql/execute.rs +++ b/crates/core/src/sql/execute.rs @@ -3,6 +3,7 @@ use spacetimedb_lib::relation::MemTable; use spacetimedb_lib::{ProductType, ProductValue}; use spacetimedb_vm::eval::run_ast; use spacetimedb_vm::expr::{CodeResult, CrudExpr, Expr}; +use tracing::info; use crate::database_instance_context_controller::DatabaseInstanceContextController; use crate::db::datastore::locking_tx_datastore::MutTxId; @@ -27,6 +28,7 @@ pub fn execute( sql_text: String, auth: AuthCtx, ) -> Result, DBError> { + info!(sql = sql_text); if let Some((database_instance_context, _)) = db_inst_ctx_controller.get(database_instance_id) { database_instance_context .relational_db @@ -52,7 +54,7 @@ fn collect_result(result: &mut Vec, r: CodeResult) -> Result<(), DBErr Ok(()) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip(db, tx, auth))] pub fn execute_single_sql( db: &RelationalDB, tx: &mut MutTxId, @@ -87,12 +89,7 @@ pub fn execute_sql( /// Run the `SQL` string using the `auth` credentials #[tracing::instrument(skip_all)] -pub(crate) fn run( - db: &RelationalDB, - tx: &mut MutTxId, - sql_text: &str, - auth: AuthCtx, -) -> Result, DBError> { +pub fn run(db: &RelationalDB, tx: &mut MutTxId, sql_text: &str, auth: AuthCtx) -> Result, DBError> { let ast = compile_sql(db, tx, sql_text)?; execute_sql(db, tx, ast, auth) } diff --git a/crates/core/src/vm.rs b/crates/core/src/vm.rs index 6eeaa05209..a8ec261fa9 100644 --- a/crates/core/src/vm.rs +++ b/crates/core/src/vm.rs @@ -20,6 +20,7 @@ use spacetimedb_vm::program::{ProgramRef, ProgramVm}; use spacetimedb_vm::rel_ops::RelOps; use std::collections::HashMap; use std::ops::RangeBounds; +use tracing::debug; //TODO: This is partially duplicated from the `vm` crate to avoid borrow checker issues //and pull all that crate in core. Will be revisited after trait refactor @@ -259,8 +260,10 @@ impl<'db, 'tx> DbProgram<'db, 'tx> { } } + #[tracing::instrument(skip_all)] fn _eval_query(&mut self, query: QueryCode) -> Result { let table_access = query.table.table_access(); + debug!(table = query.table.table_name()); let result = build_query(self.db, self.tx, query)?; let head = result.head().clone(); diff --git a/run_standalone_temp.sh b/run_standalone_temp.sh index fc681ab53a..53566ce549 100755 --- a/run_standalone_temp.sh +++ b/run_standalone_temp.sh @@ -35,4 +35,4 @@ export SPACETIMEDB_TRACY=1 echo "DATABASE AT ${STDB_PATH}" echo "LOGS AT $STDB_PATH/logs" -cargo run -p spacetimedb-standalone -- start -l 127.0.0.1:3000 +cargo run -p spacetimedb-standalone -- start -l 127.0.0.1:3000 --enable-tracy