diff --git a/Cargo.lock b/Cargo.lock index f1168906765..bdc1d543ab0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -220,7 +220,7 @@ checksum = "fd45deb3dbe5da5cdb8d6a670a7736d735ba65b455328440f236dfb113727a3d" dependencies = [ "Inflector", "async-graphql-parser", - "darling", + "darling 0.20.10", "proc-macro-crate", "proc-macro2", "quote", @@ -1098,8 +1098,18 @@ version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.10", + "darling_macro 0.20.10", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -1116,13 +1126,38 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.106", +] + [[package]] name = "darling_macro" version = "0.20.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ - "darling_core", + "darling_core 0.20.10", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", "quote", "syn 2.0.106", ] @@ -1169,6 +1204,9 @@ name = "deadpool-runtime" version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "092966b41edc516079bdf31ec78a2e0588d1d0c08f78b91d8307215928642b2b" +dependencies = [ + "tokio", +] [[package]] name = "debugid" @@ -1243,15 +1281,16 @@ dependencies = [ [[package]] name = "diesel" -version = "2.2.7" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04001f23ba8843dc315804fa324000376084dfb1c30794ff68dd279e6e5696d5" +checksum = "e8496eeb328dce26ee9d9b73275d396d9bddb433fa30106cf6056dd8c3c2764c" dependencies = [ "bigdecimal 0.3.1", "bitflags 2.9.0", "byteorder", "chrono", "diesel_derives", + "downcast-rs", "itoa", "num-bigint 0.4.6", "num-integer", @@ -1261,6 +1300,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "diesel-async" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c69eded9cb72c7e112505caec23da00149d4dd49f4c96b3c83b2b63f0aa3da5f" +dependencies = [ + "deadpool", + "diesel", + "futures-core", + "futures-util", + "scoped-futures", + "tokio", + "tokio-postgres", +] + [[package]] name = "diesel-derive-enum" version = "2.1.0" @@ -1275,18 +1329,18 @@ dependencies = [ [[package]] name = "diesel-dynamic-schema" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061bbe2d02508364c50153226524b7fc224f56031a5e927b0bc5f1f2b48de6a6" +checksum = "030a2287b125235908614c5f32f9b3bdc43c4d639846853d66e8a68c75a02756" dependencies = [ "diesel", ] [[package]] name = "diesel_derives" -version = "2.2.7" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b96984c469425cb577bf6f17121ecb3e4fe1e81de5d8f780dd372802858d756" +checksum = "09af0e983035368439f1383011cd87c46f41da81d0f21dc3727e2857d5a43c8e" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", @@ -1297,9 +1351,9 @@ dependencies = [ [[package]] name = "diesel_migrations" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a73ce704bad4231f001bff3314d91dce4aba0770cee8b233991859abc15c1f6" +checksum = "ee060f709c3e3b1cadd83fcd0f61711f7a8cf493348f758d3a1c1147d70b3c97" dependencies = [ "diesel", "migrations_internals", @@ -1308,9 +1362,9 @@ dependencies = [ [[package]] name = "diesel_table_macro_syntax" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "209c735641a413bc68c4923a9d6ad4bcb3ca306b794edaa7eb0b3228a99ffb25" +checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ "syn 2.0.106", ] @@ -1410,13 +1464,19 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "downcast-rs" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "117240f60069e65410b3ae1bb213295bd828f707b5bec6596a1afc8793ce0cbc" + [[package]] name = "dsl_auto_type" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0892a17df262a24294c382f0d5997571006e7a4348b4327557c4ff1cd4a8bccc" +checksum = "dd122633e4bef06db27737f21d3738fb89c8f6d5360d6d9d7635dda142a7757e" dependencies = [ - "darling", + "darling 0.21.3", "either", "heck 0.5.0", "proc-macro2", @@ -2007,6 +2067,7 @@ name = "graph-chain-ethereum" version = "0.36.0" dependencies = [ "anyhow", + "async-trait", "base64 0.22.1", "envconfig", "graph", @@ -2021,6 +2082,8 @@ dependencies = [ "serde", "thiserror 2.0.16", "tiny-keccak 1.5.0", + "tokio", + "tokio-stream", "tonic-build", ] @@ -2029,6 +2092,7 @@ name = "graph-chain-near" version = "0.36.0" dependencies = [ "anyhow", + "async-trait", "diesel", "graph", "graph-runtime-derive", @@ -2036,6 +2100,7 @@ dependencies = [ "prost", "prost-types", "serde", + "tokio", "tonic-build", "trigger-filters", ] @@ -2045,6 +2110,7 @@ name = "graph-chain-substreams" version = "0.36.0" dependencies = [ "anyhow", + "async-trait", "base64 0.22.1", "graph", "graph-runtime-wasm", @@ -2055,6 +2121,7 @@ dependencies = [ "semver", "serde", "tokio", + "tokio-stream", "tonic-build", ] @@ -2074,6 +2141,8 @@ dependencies = [ "graph-runtime-wasm", "serde_yaml", "thiserror 2.0.16", + "tokio", + "tokio-retry", "tower 0.5.2 (git+https://github.com/tower-rs/tower.git)", "tower-test", "wiremock", @@ -2085,6 +2154,7 @@ version = "0.36.0" dependencies = [ "anyhow", "async-recursion", + "async-trait", "crossbeam", "graph", "graphql-tools", @@ -2101,6 +2171,7 @@ dependencies = [ "anyhow", "clap", "diesel", + "diesel-async", "env_logger", "git-testament", "globset", @@ -2125,6 +2196,7 @@ dependencies = [ "serde", "shellexpand", "termcolor", + "tokio", "url", ] @@ -2142,6 +2214,7 @@ dependencies = [ name = "graph-runtime-test" version = "0.36.0" dependencies = [ + "async-trait", "graph", "graph-chain-ethereum", "graph-runtime-derive", @@ -2175,6 +2248,7 @@ dependencies = [ name = "graph-server-http" version = "0.36.0" dependencies = [ + "async-trait", "graph", "graph-core", "graph-graphql", @@ -2185,6 +2259,7 @@ dependencies = [ name = "graph-server-index-node" version = "0.36.0" dependencies = [ + "async-trait", "blake3 1.8.2", "git-testament", "graph", @@ -2220,8 +2295,10 @@ dependencies = [ "blake3 1.8.2", "chrono", "clap", + "deadpool", "derive_more 2.0.1", "diesel", + "diesel-async", "diesel-derive-enum", "diesel-dynamic-schema", "diesel_derives", @@ -2244,6 +2321,8 @@ dependencies = [ "serde", "serde_json", "stable-hash 0.3.4", + "tokio", + "tokio-stream", ] [[package]] @@ -2253,6 +2332,7 @@ dependencies = [ "anyhow", "assert-json-diff", "async-stream", + "async-trait", "graph", "graph-chain-ethereum", "graph-chain-substreams", @@ -2287,6 +2367,7 @@ version = "0.36.0" dependencies = [ "anyhow", "diesel", + "diesel-async", "graph", "graph-store-postgres", "graphman-store", @@ -2305,6 +2386,7 @@ dependencies = [ "axum 0.8.4", "chrono", "diesel", + "diesel-async", "graph", "graph-store-postgres", "graphman", @@ -2325,6 +2407,7 @@ name = "graphman-store" version = "0.36.0" dependencies = [ "anyhow", + "async-trait", "chrono", "diesel", "strum", @@ -3387,19 +3470,19 @@ dependencies = [ [[package]] name = "migrations_internals" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd01039851e82f8799046eabbb354056283fb265c8ec0996af940f4e85a380ff" +checksum = "36c791ecdf977c99f45f23280405d7723727470f6689a5e6dbf513ac547ae10d" dependencies = [ "serde", - "toml 0.8.15", + "toml 0.9.7", ] [[package]] name = "migrations_macros" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffb161cc72176cb37aa47f1fc520d3ef02263d67d661f44f05d05a079e1237fd" +checksum = "36fc5ac76be324cfd2d3f2cf0fdf5d5d3c4f14ed8aaebadb09e304ba42282703" dependencies = [ "migrations_internals", "proc-macro2", @@ -4723,6 +4806,15 @@ dependencies = [ "parking_lot", ] +[[package]] +name = "scoped-futures" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b24aae2d0636530f359e9d5ef0c04669d11c5e756699b27a6a6d845d8329091" +dependencies = [ + "pin-project-lite", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -4917,7 +5009,7 @@ version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" dependencies = [ - "darling", + "darling 0.20.10", "proc-macro2", "quote", "syn 2.0.106", @@ -5473,7 +5565,9 @@ dependencies = [ name = "test-store" version = "0.36.0" dependencies = [ + "async-trait", "diesel", + "diesel-async", "graph", "graph-chain-ethereum", "graph-graphql", @@ -5484,6 +5578,7 @@ dependencies = [ "lazy_static", "pretty_assertions", "prost-types", + "tokio", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 99791a52402..9d39510559d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -42,6 +42,7 @@ license = "MIT OR Apache-2.0" anyhow = "1.0" async-graphql = { version = "7.0.17", features = ["chrono"] } async-graphql-axum = "7.0.17" +async-trait = "0.1.74" axum = "0.8.4" chrono = "0.4.42" bs58 = "0.5.1" @@ -55,6 +56,7 @@ diesel = { version = "2.2.7", features = [ "chrono", "i-implement-a-third-party-backend-and-opt-into-breaking-changes", ] } +diesel-async = { version = "0.7.3", features = ["deadpool", "async-connection-wrapper", "tokio", "postgres"] } diesel-derive-enum = { version = "2.1.0", features = ["postgres"] } diesel-dynamic-schema = { version = "0.2.3", features = ["postgres"] } diesel_derives = "2.2.7" @@ -90,7 +92,11 @@ strum = { version = "0.26", features = ["derive"] } syn = { version = "2.0.106", features = ["full"] } test-store = { path = "./store/test-store" } thiserror = "2.0.16" +deadpool = { version = "0.12", features = ["rt_tokio_1", "managed"] } tokio = { version = "1.45.1", features = ["full"] } +tokio-stream = { version = "0.1.15", features = ["sync"] } +tokio-retry = "0.3.0" + tonic = { version = "0.12.3", features = ["tls-roots", "gzip"] } tonic-build = { version = "0.12.3", features = ["prost"] } tower-http = { version = "0.6.6", features = ["cors"] } diff --git a/chain/ethereum/Cargo.toml b/chain/ethereum/Cargo.toml index ee350ea69a7..1a395fa536e 100644 --- a/chain/ethereum/Cargo.toml +++ b/chain/ethereum/Cargo.toml @@ -4,6 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] +async-trait = { workspace = true } envconfig = "0.11.0" jsonrpc-core = "18.0.0" graph = { path = "../../graph" } @@ -15,6 +16,8 @@ tiny-keccak = "1.5.0" hex = "0.4.3" semver = "1.0.27" thiserror = { workspace = true } +tokio = { workspace = true } +tokio-stream = { workspace = true } itertools = "0.14.0" diff --git a/chain/ethereum/src/adapter.rs b/chain/ethereum/src/adapter.rs index 19befd31ca3..efadb95c089 100644 --- a/chain/ethereum/src/adapter.rs +++ b/chain/ethereum/src/adapter.rs @@ -1,4 +1,5 @@ use anyhow::Error; +use async_trait::async_trait; use ethabi::{Error as ABIError, ParamType, Token}; use graph::blockchain::ChainIdentifier; use graph::components::subgraph::MappingError; diff --git a/chain/ethereum/src/buffered_call_cache.rs b/chain/ethereum/src/buffered_call_cache.rs index 8a51bd9a0a4..c6e0040b570 100644 --- a/chain/ethereum/src/buffered_call_cache.rs +++ b/chain/ethereum/src/buffered_call_cache.rs @@ -3,6 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; +use async_trait::async_trait; use graph::{ cheap_clone::CheapClone, components::store::EthereumCallCache, @@ -47,8 +48,9 @@ impl BufferedCallCache { } } +#[async_trait] impl EthereumCallCache for BufferedCallCache { - fn get_call( + async fn get_call( &self, call: &call::Request, block: BlockPtr, @@ -59,7 +61,7 @@ impl EthereumCallCache for BufferedCallCache { return Ok(Some(value)); } - let result = self.call_cache.get_call(&call, block)?; + let result = self.call_cache.get_call(&call, block).await?; let mut buffer = self.buffer.lock().unwrap(); if let Some(call::Response { @@ -73,7 +75,7 @@ impl EthereumCallCache for BufferedCallCache { Ok(result) } - fn get_calls( + async fn get_calls( &self, reqs: &[call::Request], block: BlockPtr, @@ -90,7 +92,7 @@ impl EthereumCallCache for BufferedCallCache { } } - let (stored, calls) = self.call_cache.get_calls(&missing, block)?; + let (stored, calls) = self.call_cache.get_calls(&missing, block).await?; { let mut buffer = self.buffer.lock().unwrap(); @@ -103,15 +105,15 @@ impl EthereumCallCache for BufferedCallCache { Ok((resps, calls)) } - fn get_calls_in_block( + async fn get_calls_in_block( &self, block: BlockPtr, ) -> Result, graph::prelude::Error> { - self.call_cache.get_calls_in_block(block) + self.call_cache.get_calls_in_block(block).await } - fn set_call( - &self, + async fn set_call( + self: Arc, logger: &Logger, call: call::Request, block: BlockPtr, @@ -130,15 +132,14 @@ impl EthereumCallCache for BufferedCallCache { let cache = self.call_cache.cheap_clone(); let logger = logger.cheap_clone(); - let _ = graph::spawn_blocking_allow_panic(move || { - cache - .set_call(&logger, call.cheap_clone(), block, return_value) - .map_err(|e| { - error!(logger, "BufferedCallCache: call cache set error"; + if let Err(e) = cache + .set_call(&logger, call.cheap_clone(), block, return_value) + .await + { + error!(logger, "BufferedCallCache: call cache set error"; "contract_address" => format!("{:?}", call.address), "error" => e.to_string()) - }) - }); + } Ok(()) } diff --git a/chain/ethereum/src/chain.rs b/chain/ethereum/src/chain.rs index 35c155b9c0f..11ca025e0e2 100644 --- a/chain/ethereum/src/chain.rs +++ b/chain/ethereum/src/chain.rs @@ -1,5 +1,6 @@ use anyhow::{anyhow, bail, Result}; use anyhow::{Context, Error}; +use async_trait::async_trait; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::{FirehoseBlockIngestor, Transforms}; use graph::blockchain::{ @@ -32,8 +33,8 @@ use graph::{ components::store::DeploymentLocator, firehose, prelude::{ - async_trait, o, serde_json as json, BlockNumber, ChainStore, EthereumBlockWithCalls, - Logger, LoggerFactory, + o, serde_json as json, BlockNumber, ChainStore, EthereumBlockWithCalls, Logger, + LoggerFactory, }, }; use prost::Message; @@ -551,9 +552,11 @@ impl Blockchain for Chain { self.block_refetcher.get_block(self, logger, cursor).await } - fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + async fn runtime( + &self, + ) -> anyhow::Result<(Arc>, Self::DecoderHook)> { let call_cache = Arc::new(BufferedCallCache::new(self.call_cache.cheap_clone())); - let chain_ident = self.chain_store.chain_identifier()?; + let chain_ident = self.chain_store.chain_identifier().await?; let builder = self.runtime_adapter_builder.build( self.eth_adapters.cheap_clone(), @@ -1266,7 +1269,7 @@ impl FirehoseMapperTrait for FirehoseMapper { #[cfg(test)] mod tests { use graph::blockchain::mock::MockChainStore; - use graph::{slog, tokio}; + use graph::slog; use super::*; use std::sync::Arc; @@ -1283,7 +1286,7 @@ mod tests { } } - #[tokio::test] + #[graph::test] async fn test_fetch_unique_blocks_single_block() { let logger = Logger::root(slog::Discard, o!()); let mut chain_store = MockChainStore::default(); @@ -1302,7 +1305,7 @@ mod tests { assert!(missing.is_empty()); } - #[tokio::test] + #[graph::test] async fn test_fetch_unique_blocks_duplicate_blocks() { let logger = Logger::root(slog::Discard, o!()); let mut chain_store = MockChainStore::default(); @@ -1325,7 +1328,7 @@ mod tests { assert_eq!(missing[0], 1); } - #[tokio::test] + #[graph::test] async fn test_fetch_unique_blocks_missing_blocks() { let logger = Logger::root(slog::Discard, o!()); let mut chain_store = MockChainStore::default(); @@ -1344,7 +1347,7 @@ mod tests { assert_eq!(missing, vec![2]); } - #[tokio::test] + #[graph::test] async fn test_fetch_unique_blocks_multiple_valid_blocks() { let logger = Logger::root(slog::Discard, o!()); let mut chain_store = MockChainStore::default(); @@ -1366,7 +1369,7 @@ mod tests { assert!(missing.is_empty()); } - #[tokio::test] + #[graph::test] async fn test_fetch_unique_blocks_mixed_scenario() { let logger = Logger::root(slog::Discard, o!()); let mut chain_store = MockChainStore::default(); diff --git a/chain/ethereum/src/data_source.rs b/chain/ethereum/src/data_source.rs index 68a6f2371b9..e314b5a158f 100644 --- a/chain/ethereum/src/data_source.rs +++ b/chain/ethereum/src/data_source.rs @@ -1,5 +1,6 @@ use anyhow::{anyhow, Error}; use anyhow::{ensure, Context}; +use async_trait::async_trait; use graph::blockchain::{BlockPtr, TriggerWithHandler}; use graph::components::link_resolver::LinkResolverContext; use graph::components::metrics::subgraph::SubgraphInstanceMetrics; @@ -33,7 +34,6 @@ use tiny_keccak::{keccak256, Keccak}; use graph::{ blockchain::{self, Blockchain}, prelude::{ - async_trait, ethabi::{Address, Event, Function, LogParam, ParamType, RawLog}, serde_json, warn, web3::types::{Log, Transaction, H256}, diff --git a/chain/ethereum/src/ethereum_adapter.rs b/chain/ethereum/src/ethereum_adapter.rs index 3ca046f359b..c4dd377fa58 100644 --- a/chain/ethereum/src/ethereum_adapter.rs +++ b/chain/ethereum/src/ethereum_adapter.rs @@ -1,4 +1,8 @@ +use async_trait::async_trait; use futures03::{future::BoxFuture, stream::FuturesUnordered}; +use tokio::sync::RwLock; +use tokio::time::timeout; + use graph::blockchain::client::ChainClient; use graph::blockchain::BlockHash; use graph::blockchain::ChainIdentifier; @@ -22,14 +26,11 @@ use graph::prelude::ethabi::Token; use graph::prelude::tokio::try_join; use graph::prelude::web3::types::U256; use graph::slog::o; -use graph::tokio::sync::RwLock; -use graph::tokio::time::timeout; use graph::{ blockchain::{block_stream::BlockWithTriggers, BlockPtr, IngestorError}, prelude::{ anyhow::{self, anyhow, bail, ensure, Context}, - async_trait, debug, error, ethabi, hex, info, retry, serde_json as json, tiny_keccak, - trace, warn, + debug, error, ethabi, hex, info, retry, serde_json as json, tiny_keccak, trace, warn, web3::{ self, types::{ @@ -733,18 +734,19 @@ impl EthereumAdapter { call.gas, ) .await?; - let _ = cache + if let Err(e) = cache .set_call( &logger, req.cheap_clone(), call.block_ptr.cheap_clone(), result.clone(), ) - .map_err(|e| { - error!(logger, "EthereumAdapter: call cache set error"; + .await + { + error!(logger, "EthereumAdapter: call cache set error"; "contract_address" => format!("{:?}", req.address), - "error" => e.to_string()) - }); + "error" => e.to_string()); + } Ok(req.response(result, call::Source::Rpc)) } @@ -1653,6 +1655,7 @@ impl EthereumAdapterTrait for EthereumAdapter { let (mut resps, missing) = cache .get_calls(&reqs, block_ptr) + .await .map_err(|e| error!(logger, "call cache get error"; "error" => e.to_string())) .unwrap_or_else(|_| (Vec::new(), reqs)); @@ -1729,7 +1732,7 @@ impl EthereumAdapterTrait for EthereumAdapter { .iter() .map(|block| block as &dyn graph::blockchain::Block) .collect(); - if let Err(e) = chain_store.upsert_light_blocks(block_refs.as_slice()) { + if let Err(e) = chain_store.upsert_light_blocks(block_refs.as_slice()).await { error!(logger, "Error writing to block cache {}", e); } blocks.extend(new_blocks); @@ -2367,7 +2370,7 @@ async fn fetch_individual_receipts_with_retry( } // Use a stream to fetch receipts individually - let hash_stream = graph::tokio_stream::iter(hashes); + let hash_stream = tokio_stream::iter(hashes); let receipt_stream = hash_stream .map(move |tx_hash| { fetch_transaction_receipt_with_retry( @@ -2379,7 +2382,7 @@ async fn fetch_individual_receipts_with_retry( }) .buffered(ENV_VARS.block_ingestor_max_concurrent_json_rpc_calls); - graph::tokio_stream::StreamExt::collect::>, IngestorError>>( + tokio_stream::StreamExt::collect::>, IngestorError>>( receipt_stream, ) .await @@ -2672,7 +2675,6 @@ mod tests { }; use graph::blockchain::BlockPtr; use graph::prelude::ethabi::ethereum_types::U64; - use graph::prelude::tokio::{self}; use graph::prelude::web3::transports::test::TestTransport; use graph::prelude::web3::types::{Address, Block, Bytes, H256}; use graph::prelude::web3::Web3; @@ -2720,7 +2722,7 @@ mod tests { ); } - #[tokio::test] + #[graph::test] async fn test_check_block_receipts_support() { let mut transport = TestTransport::default(); diff --git a/chain/ethereum/src/ingestor.rs b/chain/ethereum/src/ingestor.rs index 935cb525936..47cae0b93c5 100644 --- a/chain/ethereum/src/ingestor.rs +++ b/chain/ethereum/src/ingestor.rs @@ -1,5 +1,6 @@ use crate::{chain::BlockFinality, ENV_VARS}; use crate::{EthereumAdapter, EthereumAdapterTrait as _}; +use async_trait::async_trait; use graph::blockchain::client::ChainClient; use graph::blockchain::BlockchainKind; use graph::components::network_provider::ChainName; @@ -9,8 +10,8 @@ use graph::{ blockchain::{BlockHash, BlockIngestor, BlockPtr, IngestorError}, cheap_clone::CheapClone, prelude::{ - async_trait, error, ethabi::ethereum_types::H256, info, tokio, trace, warn, ChainStore, - Error, EthereumBlockWithCalls, LogCode, Logger, + error, ethabi::ethereum_types::H256, info, tokio, trace, warn, ChainStore, Error, + EthereumBlockWithCalls, LogCode, Logger, }, }; use std::{sync::Arc, time::Duration}; @@ -43,8 +44,12 @@ impl PollingBlockIngestor { }) } - fn cleanup_cached_blocks(&self) { - match self.chain_store.cleanup_cached_blocks(self.ancestor_count) { + async fn cleanup_cached_blocks(&self) { + match self + .chain_store + .cleanup_cached_blocks(self.ancestor_count) + .await + { Ok(Some((min_block, count))) => { if count > 0 { info!( @@ -256,7 +261,7 @@ impl BlockIngestor for PollingBlockIngestor { } if ENV_VARS.cleanup_blocks { - self.cleanup_cached_blocks() + self.cleanup_cached_blocks().await } tokio::time::sleep(self.polling_interval).await; diff --git a/chain/ethereum/src/network.rs b/chain/ethereum/src/network.rs index 59a698ab20b..ca45411cdc2 100644 --- a/chain/ethereum/src/network.rs +++ b/chain/ethereum/src/network.rs @@ -1,4 +1,5 @@ use anyhow::{anyhow, bail}; +use async_trait::async_trait; use graph::blockchain::ChainIdentifier; use graph::components::network_provider::ChainName; use graph::components::network_provider::NetworkDetails; @@ -12,7 +13,7 @@ use itertools::Itertools; use std::sync::Arc; pub use graph::impl_slog_value; -use graph::prelude::{async_trait, Error}; +use graph::prelude::Error; use crate::adapter::EthereumAdapter as _; use crate::capabilities::NodeCapabilities; @@ -319,7 +320,6 @@ mod tests { firehose::SubgraphLimit, prelude::MetricsRegistry, slog::{o, Discard, Logger}, - tokio, url::Url, }; use std::sync::Arc; @@ -383,7 +383,7 @@ mod tests { assert_eq!(true, &full_traces >= &full_traces); } - #[tokio::test] + #[graph::test] async fn adapter_selector_selects_eth_call() { let metrics = Arc::new(EndpointMetrics::mock()); let logger = graph::log::logger(true); @@ -489,7 +489,7 @@ mod tests { } } - #[tokio::test] + #[graph::test] async fn adapter_selector_unlimited() { let metrics = Arc::new(EndpointMetrics::mock()); let logger = graph::log::logger(true); @@ -560,7 +560,7 @@ mod tests { assert_eq!(keep.iter().any(|a| !a.is_call_only()), false); } - #[tokio::test] + #[graph::test] async fn adapter_selector_disable_call_only_fallback() { let metrics = Arc::new(EndpointMetrics::mock()); let logger = graph::log::logger(true); @@ -627,7 +627,7 @@ mod tests { ); } - #[tokio::test] + #[graph::test] async fn adapter_selector_no_call_only_fallback() { let metrics = Arc::new(EndpointMetrics::mock()); let logger = graph::log::logger(true); @@ -673,7 +673,7 @@ mod tests { ); } - #[tokio::test] + #[graph::test] async fn eth_adapter_selection_multiple_adapters() { let logger = Logger::root(Discard, o!()); let unavailable_provider = "unavailable-provider"; @@ -786,7 +786,7 @@ mod tests { ); } - #[tokio::test] + #[graph::test] async fn eth_adapter_selection_single_adapter() { let logger = Logger::root(Discard, o!()); let unavailable_provider = "unavailable-provider"; diff --git a/chain/ethereum/src/polling_block_stream.rs b/chain/ethereum/src/polling_block_stream.rs index a215f775685..9802f7b7d5d 100644 --- a/chain/ethereum/src/polling_block_stream.rs +++ b/chain/ethereum/src/polling_block_stream.rs @@ -1,11 +1,11 @@ use anyhow::{anyhow, Error}; -use graph::tokio; use std::cmp; use std::collections::VecDeque; use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; use std::time::Duration; +use tokio; use graph::blockchain::block_stream::{ BlockStream, BlockStreamError, BlockStreamEvent, BlockWithTriggers, ChainHeadUpdateStream, diff --git a/chain/ethereum/src/runtime/abi.rs b/chain/ethereum/src/runtime/abi.rs index a88e482bc0c..a716c4ea3a8 100644 --- a/chain/ethereum/src/runtime/abi.rs +++ b/chain/ethereum/src/runtime/abi.rs @@ -2,9 +2,10 @@ use super::runtime_adapter::UnresolvedContractCall; use crate::trigger::{ EthereumBlockData, EthereumCallData, EthereumEventData, EthereumTransactionData, }; +use async_trait::async_trait; use graph::{ prelude::{ - async_trait, ethabi, + ethabi, web3::{ self, types::{Log, TransactionReceipt, H256}, diff --git a/chain/ethereum/src/trigger.rs b/chain/ethereum/src/trigger.rs index 6acd326f76e..bbbaa69a8d2 100644 --- a/chain/ethereum/src/trigger.rs +++ b/chain/ethereum/src/trigger.rs @@ -1,10 +1,10 @@ +use async_trait::async_trait; use graph::blockchain::MappingTriggerTrait; use graph::blockchain::TriggerData; use graph::data::subgraph::API_VERSION_0_0_2; use graph::data::subgraph::API_VERSION_0_0_6; use graph::data::subgraph::API_VERSION_0_0_7; use graph::data_source::common::DeclaredCall; -use graph::prelude::async_trait; use graph::prelude::ethabi::ethereum_types::H160; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::ethabi::ethereum_types::U128; diff --git a/chain/near/Cargo.toml b/chain/near/Cargo.toml index 708d137921d..6984c831cd8 100644 --- a/chain/near/Cargo.toml +++ b/chain/near/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true tonic-build = { workspace = true } [dependencies] +async-trait = { workspace = true } graph = { path = "../../graph" } prost = { workspace = true } prost-types = { workspace = true } @@ -19,3 +20,4 @@ graph-runtime-derive = { path = "../../runtime/derive" } [dev-dependencies] diesel = { workspace = true } trigger-filters.path = "../../substreams/trigger-filters" +tokio = { workspace = true } \ No newline at end of file diff --git a/chain/near/src/chain.rs b/chain/near/src/chain.rs index 58b0e23ac2d..7bf2b50a6a8 100644 --- a/chain/near/src/chain.rs +++ b/chain/near/src/chain.rs @@ -1,4 +1,5 @@ use anyhow::anyhow; +use async_trait::async_trait; use graph::blockchain::client::ChainClient; use graph::blockchain::firehose_block_ingestor::FirehoseBlockIngestor; use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; @@ -29,7 +30,7 @@ use graph::{ }, components::store::DeploymentLocator, firehose::{self as firehose, ForkStep}, - prelude::{async_trait, o, BlockNumber, Error, Logger, LoggerFactory}, + prelude::{o, BlockNumber, Error, Logger, LoggerFactory}, }; use prost::Message; use std::collections::BTreeSet; @@ -292,7 +293,9 @@ impl Blockchain for Chain { .await } - fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + async fn runtime( + &self, + ) -> anyhow::Result<(Arc>, Self::DecoderHook)> { Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } @@ -590,7 +593,7 @@ mod test { use graph::{ blockchain::{block_stream::BlockWithTriggers, DataSource as _, TriggersAdapter as _}, data::subgraph::LATEST_VERSION, - prelude::{tokio, Link}, + prelude::Link, semver::Version, slog::{self, o, Logger}, }; @@ -914,7 +917,7 @@ mod test { } } - #[tokio::test] + #[graph::test] async fn test_trigger_filter_empty() { let account1: String = "account1".into(); @@ -932,7 +935,7 @@ mod test { assert_eq!(block_with_triggers.trigger_count(), 0); } - #[tokio::test] + #[graph::test] async fn test_trigger_filter_every_block() { let account1: String = "account1".into(); @@ -958,7 +961,7 @@ mod test { assert_eq!(height, vec![1]); } - #[tokio::test] + #[graph::test] async fn test_trigger_filter_every_receipt() { let account1: String = "account1".into(); diff --git a/chain/near/src/data_source.rs b/chain/near/src/data_source.rs index 6eac3e2d92d..e64197eeb0d 100644 --- a/chain/near/src/data_source.rs +++ b/chain/near/src/data_source.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use graph::anyhow::Context; use graph::blockchain::{Block, TriggerWithHandler}; use graph::components::link_resolver::LinkResolverContext; @@ -8,7 +9,7 @@ use graph::prelude::SubgraphManifestValidationError; use graph::{ anyhow::{anyhow, Error}, blockchain::{self, Blockchain}, - prelude::{async_trait, BlockNumber, CheapClone, Deserialize, Link, LinkResolver, Logger}, + prelude::{BlockNumber, CheapClone, Deserialize, Link, LinkResolver, Logger}, semver, }; use std::collections::HashSet; diff --git a/chain/near/src/runtime/abi.rs b/chain/near/src/runtime/abi.rs index 7b6da023c95..87e224dc4d0 100644 --- a/chain/near/src/runtime/abi.rs +++ b/chain/near/src/runtime/abi.rs @@ -1,7 +1,7 @@ use crate::codec; use crate::trigger::ReceiptWithOutcome; +use async_trait::async_trait; use graph::anyhow::anyhow; -use graph::prelude::async_trait; use graph::runtime::gas::GasCounter; use graph::runtime::{asc_new, AscHeap, AscPtr, DeterministicHostError, HostExportError, ToAscObj}; use graph_runtime_wasm::asc_abi::class::{Array, AscEnum, EnumPayload, Uint8Array}; diff --git a/chain/near/src/trigger.rs b/chain/near/src/trigger.rs index a05ea7d4d22..d604f97bc14 100644 --- a/chain/near/src/trigger.rs +++ b/chain/near/src/trigger.rs @@ -1,8 +1,8 @@ +use async_trait::async_trait; use graph::blockchain::Block; use graph::blockchain::MappingTriggerTrait; use graph::blockchain::TriggerData; use graph::derive::CheapClone; -use graph::prelude::async_trait; use graph::prelude::hex; use graph::prelude::web3::types::H256; use graph::prelude::BlockNumber; @@ -162,11 +162,10 @@ mod tests { data::subgraph::API_VERSION_0_0_5, prelude::{hex, BigInt}, runtime::{gas::GasCounter, DeterministicHostError, HostExportError}, - tokio, util::mem::init_slice, }; - #[tokio::test] + #[graph::test] async fn block_trigger_to_asc_ptr() { let mut heap = BytesHeap::new(API_VERSION_0_0_5); let trigger = NearTrigger::Block(Arc::new(block())); @@ -177,7 +176,7 @@ mod tests { assert!(result.is_ok()); } - #[tokio::test] + #[graph::test] async fn receipt_trigger_to_asc_ptr() { let mut heap = BytesHeap::new(API_VERSION_0_0_5); let trigger = NearTrigger::Receipt(Arc::new(ReceiptWithOutcome { diff --git a/chain/substreams/Cargo.toml b/chain/substreams/Cargo.toml index 80293945879..12cdca8840a 100644 --- a/chain/substreams/Cargo.toml +++ b/chain/substreams/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true tonic-build = { workspace = true } [dependencies] +async-trait = { workspace = true } graph = { path = "../../graph" } graph-runtime-wasm = { path = "../../runtime/wasm" } lazy_static = "1.5.0" @@ -17,6 +18,7 @@ anyhow = "1.0" hex = "0.4.3" semver = "1.0.27" base64 = "0.22.1" +tokio-stream = { workspace = true } [dev-dependencies] tokio = { version = "1", features = ["full"] } diff --git a/chain/substreams/examples/substreams.rs b/chain/substreams/examples/substreams.rs index a5af2bbe25c..d2277580c37 100644 --- a/chain/substreams/examples/substreams.rs +++ b/chain/substreams/examples/substreams.rs @@ -5,12 +5,12 @@ use graph::blockchain::substreams_block_stream::SubstreamsBlockStream; use graph::endpoint::EndpointMetrics; use graph::firehose::{FirehoseEndpoints, SubgraphLimit}; use graph::prelude::{info, tokio, DeploymentHash, MetricsRegistry, Registry}; -use graph::tokio_stream::StreamExt; use graph::{env::env_var, firehose::FirehoseEndpoint, log::logger, substreams}; use graph_chain_substreams::mapper::Mapper; use prost::Message; use std::env; use std::sync::Arc; +use tokio_stream::StreamExt; #[tokio::main] async fn main() -> Result<(), Error> { diff --git a/chain/substreams/src/block_ingestor.rs b/chain/substreams/src/block_ingestor.rs index f176f549647..46966e9e4eb 100644 --- a/chain/substreams/src/block_ingestor.rs +++ b/chain/substreams/src/block_ingestor.rs @@ -2,6 +2,7 @@ use std::{sync::Arc, time::Duration}; use crate::mapper::Mapper; use anyhow::{Context, Error}; +use async_trait::async_trait; use graph::blockchain::block_stream::{BlockStreamError, FirehoseCursor}; use graph::blockchain::BlockchainKind; use graph::blockchain::{ @@ -12,14 +13,14 @@ use graph::components::store::ChainHeadStore; use graph::prelude::MetricsRegistry; use graph::slog::trace; use graph::substreams::Package; -use graph::tokio_stream::StreamExt; use graph::{ blockchain::block_stream::BlockStreamEvent, cheap_clone::CheapClone, - prelude::{async_trait, error, info, DeploymentHash, Logger}, + prelude::{error, info, DeploymentHash, Logger}, util::backoff::ExponentialBackoff, }; use prost::Message; +use tokio_stream::StreamExt; const SUBSTREAMS_HEAD_TRACKER_BYTES: &[u8; 89935] = include_bytes!( "../../../substreams/substreams-head-tracker/substreams-head-tracker-v1.0.0.spkg" @@ -54,7 +55,7 @@ impl SubstreamsBlockIngestor { let mut backoff = ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); loop { - match self.chain_store.clone().chain_head_cursor() { + match self.chain_store.clone().chain_head_cursor().await { Ok(cursor) => return cursor.unwrap_or_default(), Err(e) => { error!(self.logger, "Fetching chain head cursor failed: {:#}", e); diff --git a/chain/substreams/src/block_stream.rs b/chain/substreams/src/block_stream.rs index 8008694f66b..daad94bae20 100644 --- a/chain/substreams/src/block_stream.rs +++ b/chain/substreams/src/block_stream.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use async_trait::async_trait; use std::sync::Arc; use graph::{ @@ -11,7 +12,7 @@ use graph::{ }, components::store::{DeploymentLocator, SourceableStore}, data::subgraph::UnifiedMappingApiVersion, - prelude::{async_trait, BlockNumber, BlockPtr}, + prelude::{BlockNumber, BlockPtr}, schema::InputSchema, slog::o, }; diff --git a/chain/substreams/src/chain.rs b/chain/substreams/src/chain.rs index 1c44d77bde1..0213d01a39e 100644 --- a/chain/substreams/src/chain.rs +++ b/chain/substreams/src/chain.rs @@ -1,6 +1,7 @@ use crate::block_ingestor::SubstreamsBlockIngestor; use crate::{data_source::*, EntityChanges, TriggerData, TriggerFilter, TriggersAdapter}; use anyhow::Error; +use async_trait::async_trait; use graph::blockchain::client::ChainClient; use graph::blockchain::{ BasicBlockchainBuilder, BlockIngestor, BlockTime, EmptyNodeCapabilities, NoopDecoderHook, @@ -19,7 +20,7 @@ use graph::{ }, components::store::DeploymentLocator, data::subgraph::UnifiedMappingApiVersion, - prelude::{async_trait, BlockNumber}, + prelude::BlockNumber, slog::Logger, }; @@ -185,7 +186,9 @@ impl Blockchain for Chain { number, }) } - fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)> { + async fn runtime( + &self, + ) -> anyhow::Result<(Arc>, Self::DecoderHook)> { Ok((Arc::new(NoopRuntimeAdapter::default()), NoopDecoderHook)) } diff --git a/chain/substreams/src/data_source.rs b/chain/substreams/src/data_source.rs index a85f9a8d6cf..a30d92173c5 100644 --- a/chain/substreams/src/data_source.rs +++ b/chain/substreams/src/data_source.rs @@ -1,6 +1,7 @@ use std::{collections::HashSet, sync::Arc}; use anyhow::{anyhow, Context, Error}; +use async_trait::async_trait; use graph::{ blockchain, cheap_clone::CheapClone, @@ -9,7 +10,7 @@ use graph::{ subgraph::InstanceDSTemplateInfo, }, data::subgraph::DeploymentHash, - prelude::{async_trait, BlockNumber, Link}, + prelude::{BlockNumber, Link}, slog::Logger, }; @@ -340,11 +341,12 @@ mod test { use std::{str::FromStr, sync::Arc}; use anyhow::Error; + use async_trait::async_trait; use graph::{ blockchain::{DataSource as _, UnresolvedDataSource as _}, components::link_resolver::{LinkResolver, LinkResolverContext}, data::subgraph::{DeploymentHash, LATEST_VERSION, SPEC_VERSION_1_2_0}, - prelude::{async_trait, serde_yaml, JsonValueStream, Link}, + prelude::{serde_yaml, JsonValueStream, Link}, slog::{o, Discard, Logger}, substreams::{ module::{ @@ -441,7 +443,7 @@ mod test { assert_eq!(ds, expected); } - #[tokio::test] + #[graph::test] async fn data_source_conversion() { let ds: UnresolvedDataSource = serde_yaml::from_str(TEMPLATE_DATA_SOURCE).unwrap(); let link_resolver: Arc = Arc::new(NoopLinkResolver {}); @@ -475,7 +477,7 @@ mod test { assert_eq!(ds, expected); } - #[tokio::test] + #[graph::test] async fn data_source_conversion_override_params() { let mut package = gen_package(); let mut modules = package.modules.unwrap(); diff --git a/chain/substreams/src/mapper.rs b/chain/substreams/src/mapper.rs index bd7a30053c1..78788186795 100644 --- a/chain/substreams/src/mapper.rs +++ b/chain/substreams/src/mapper.rs @@ -3,6 +3,7 @@ use std::str::FromStr; use crate::codec::{entity_change, EntityChanges}; use anyhow::{anyhow, Error}; +use async_trait::async_trait; use graph::blockchain::block_stream::{ BlockStreamError, BlockStreamEvent, BlockStreamMapper, BlockWithTriggers, FirehoseCursor, SubstreamsError, @@ -12,8 +13,8 @@ use graph::data::store::scalar::{Bytes, Timestamp}; use graph::data::store::IdType; use graph::data::value::Word; use graph::data_source::CausalityRegion; -use graph::prelude::{async_trait, BigInt, BlockHash, BlockNumber, Logger, Value}; use graph::prelude::{BigDecimal, BlockPtr}; +use graph::prelude::{BigInt, BlockHash, BlockNumber, Logger, Value}; use graph::schema::InputSchema; use graph::slog::error; use graph::substreams::Clock; diff --git a/chain/substreams/src/trigger.rs b/chain/substreams/src/trigger.rs index 0d9a8c7898f..6593a079970 100644 --- a/chain/substreams/src/trigger.rs +++ b/chain/substreams/src/trigger.rs @@ -1,4 +1,5 @@ use anyhow::Error; +use async_trait::async_trait; use graph::{ blockchain::{ self, block_stream::BlockWithTriggers, BlockPtr, EmptyNodeCapabilities, MappingTriggerTrait, @@ -8,9 +9,7 @@ use graph::{ subgraph::{MappingError, ProofOfIndexingEvent, SharedProofOfIndexing}, trigger_processor::HostedTrigger, }, - prelude::{ - anyhow, async_trait, BlockHash, BlockNumber, BlockState, CheapClone, RuntimeHostBuilder, - }, + prelude::{anyhow, BlockHash, BlockNumber, BlockState, CheapClone, RuntimeHostBuilder}, slog::Logger, substreams::Modules, }; @@ -224,12 +223,15 @@ where logger, ); - state.entity_cache.set( - key, - entity, - block.number, - Some(&mut state.write_capacity_remaining), - )?; + state + .entity_cache + .set( + key, + entity, + block.number, + Some(&mut state.write_capacity_remaining), + ) + .await?; } ParsedChanges::Delete(entity_key) => { let entity_type = entity_key.entity_type.cheap_clone(); diff --git a/core/Cargo.toml b/core/Cargo.toml index 0a5440b2b30..c914d0de935 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -4,7 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] -async-trait = "0.1.50" +async-trait = { workspace = true} atomic_refcell = "0.1.13" bytes = "1.0" graph = { path = "../graph" } @@ -13,6 +13,8 @@ graph-chain-near = { path = "../chain/near" } graph-chain-substreams = { path = "../chain/substreams" } graph-runtime-wasm = { path = "../runtime/wasm" } serde_yaml = { workspace = true } +tokio = { workspace = true } +tokio-retry = { workspace = true } # Switch to crates.io once tower 0.5 is released tower = { git = "https://github.com/tower-rs/tower.git", features = ["full"] } thiserror = { workspace = true } diff --git a/core/graphman/Cargo.toml b/core/graphman/Cargo.toml index 001a683f4aa..858cc1b9012 100644 --- a/core/graphman/Cargo.toml +++ b/core/graphman/Cargo.toml @@ -6,6 +6,7 @@ edition.workspace = true [dependencies] anyhow = { workspace = true } diesel = { workspace = true } +diesel-async = { workspace = true } graph = { workspace = true } graph-store-postgres = { workspace = true } graphman-store = { workspace = true } diff --git a/core/graphman/src/commands/deployment/info.rs b/core/graphman/src/commands/deployment/info.rs index f4087b3a5e0..7cf0e87c758 100644 --- a/core/graphman/src/commands/deployment/info.rs +++ b/core/graphman/src/commands/deployment/info.rs @@ -26,17 +26,17 @@ pub struct DeploymentStatus { pub chain_head_block: Option, } -pub fn load_deployments( +pub async fn load_deployments( primary_pool: ConnectionPool, deployment: &DeploymentSelector, version: &DeploymentVersionSelector, ) -> Result, GraphmanError> { - let mut primary_conn = primary_pool.get()?; + let mut primary_conn = primary_pool.get().await?; - crate::deployment::load_deployments(&mut primary_conn, &deployment, &version) + crate::deployment::load_deployments(&mut primary_conn, &deployment, &version).await } -pub fn load_deployment_statuses( +pub async fn load_deployment_statuses( store: Arc, deployments: &[Deployment], ) -> Result, GraphmanError> { @@ -48,7 +48,8 @@ pub fn load_deployment_statuses( .collect_vec(); let deployment_statuses = store - .status(Filter::DeploymentIds(deployment_ids))? + .status(Filter::DeploymentIds(deployment_ids)) + .await? .into_iter() .map(|status| { let id = status.id.0; diff --git a/core/graphman/src/commands/deployment/pause.rs b/core/graphman/src/commands/deployment/pause.rs index d7197d42fb3..9b2e78102ed 100644 --- a/core/graphman/src/commands/deployment/pause.rs +++ b/core/graphman/src/commands/deployment/pause.rs @@ -33,22 +33,24 @@ impl ActiveDeployment { } } -pub fn load_active_deployment( +pub async fn load_active_deployment( primary_pool: ConnectionPool, deployment: &DeploymentSelector, ) -> Result { - let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let mut primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let locator = crate::deployment::load_deployment_locator( &mut primary_conn, deployment, &DeploymentVersionSelector::All, - )?; + ) + .await?; let mut catalog_conn = catalog::Connection::new(primary_conn); let site = catalog_conn .locate_site(locator.clone()) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) @@ -56,6 +58,7 @@ pub fn load_active_deployment( let (_, is_paused) = catalog_conn .assignment_status(&site) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("assignment status not found for '{locator}'")) @@ -68,16 +71,18 @@ pub fn load_active_deployment( Ok(ActiveDeployment { locator, site }) } -pub fn pause_active_deployment( +pub async fn pause_active_deployment( primary_pool: ConnectionPool, notification_sender: Arc, active_deployment: ActiveDeployment, ) -> Result<(), GraphmanError> { - let primary_conn = primary_pool.get()?; + let primary_conn = primary_pool.get().await?; let mut catalog_conn = catalog::Connection::new(primary_conn); - let changes = catalog_conn.pause_subgraph(&active_deployment.site)?; - catalog_conn.send_store_event(¬ification_sender, &StoreEvent::new(changes))?; + let changes = catalog_conn.pause_subgraph(&active_deployment.site).await?; + catalog_conn + .send_store_event(¬ification_sender, &StoreEvent::new(changes)) + .await?; Ok(()) } diff --git a/core/graphman/src/commands/deployment/reassign.rs b/core/graphman/src/commands/deployment/reassign.rs index 9ca1f66d83c..d3979dadba7 100644 --- a/core/graphman/src/commands/deployment/reassign.rs +++ b/core/graphman/src/commands/deployment/reassign.rs @@ -25,14 +25,15 @@ impl Deployment { &self.locator } - pub fn assigned_node( + pub async fn assigned_node( &self, primary_pool: ConnectionPool, ) -> Result, GraphmanError> { - let primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let mut catalog_conn = catalog::Connection::new(primary_conn); let node = catalog_conn .assigned_node(&self.site) + .await .map_err(GraphmanError::from)?; Ok(node) } @@ -53,22 +54,24 @@ pub enum ReassignResult { CompletedWithWarnings(Vec), } -pub fn load_deployment( +pub async fn load_deployment( primary_pool: ConnectionPool, deployment: &DeploymentSelector, ) -> Result { - let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let mut primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let locator = crate::deployment::load_deployment_locator( &mut primary_conn, deployment, &DeploymentVersionSelector::All, - )?; + ) + .await?; let mut catalog_conn = catalog::Connection::new(primary_conn); let site = catalog_conn .locate_site(locator.clone()) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) @@ -77,14 +80,14 @@ pub fn load_deployment( Ok(Deployment { locator, site }) } -pub fn reassign_deployment( +pub async fn reassign_deployment( primary_pool: ConnectionPool, notification_sender: Arc, deployment: &Deployment, node: &NodeId, curr_node: Option, ) -> Result { - let primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let mut catalog_conn = catalog::Connection::new(primary_conn); let changes: Vec = match &curr_node { Some(curr) => { @@ -93,11 +96,13 @@ pub fn reassign_deployment( } else { catalog_conn .reassign_subgraph(&deployment.site, &node) + .await .map_err(GraphmanError::from)? } } None => catalog_conn .assign_subgraph(&deployment.site, &node) + .await .map_err(GraphmanError::from)?, }; @@ -110,11 +115,13 @@ pub fn reassign_deployment( catalog_conn .send_store_event(¬ification_sender, &StoreEvent::new(changes)) + .await .map_err(GraphmanError::from)?; let mirror = catalog::Mirror::primary_only(primary_pool); let count = mirror .assignments(&node) + .await .map_err(GraphmanError::from)? .len(); if count == 1 { diff --git a/core/graphman/src/commands/deployment/resume.rs b/core/graphman/src/commands/deployment/resume.rs index ab394ef4791..0e91a997c01 100644 --- a/core/graphman/src/commands/deployment/resume.rs +++ b/core/graphman/src/commands/deployment/resume.rs @@ -33,22 +33,24 @@ impl PausedDeployment { } } -pub fn load_paused_deployment( +pub async fn load_paused_deployment( primary_pool: ConnectionPool, deployment: &DeploymentSelector, ) -> Result { - let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let mut primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let locator = crate::deployment::load_deployment_locator( &mut primary_conn, deployment, &DeploymentVersionSelector::All, - )?; + ) + .await?; let mut catalog_conn = catalog::Connection::new(primary_conn); let site = catalog_conn .locate_site(locator.clone()) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) @@ -56,6 +58,7 @@ pub fn load_paused_deployment( let (_, is_paused) = catalog_conn .assignment_status(&site) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("assignment status not found for '{locator}'")) @@ -68,16 +71,20 @@ pub fn load_paused_deployment( Ok(PausedDeployment { locator, site }) } -pub fn resume_paused_deployment( +pub async fn resume_paused_deployment( primary_pool: ConnectionPool, notification_sender: Arc, paused_deployment: PausedDeployment, ) -> Result<(), GraphmanError> { - let primary_conn = primary_pool.get()?; + let primary_conn = primary_pool.get().await?; let mut catalog_conn = catalog::Connection::new(primary_conn); - let changes = catalog_conn.resume_subgraph(&paused_deployment.site)?; - catalog_conn.send_store_event(¬ification_sender, &StoreEvent::new(changes))?; + let changes = catalog_conn + .resume_subgraph(&paused_deployment.site) + .await?; + catalog_conn + .send_store_event(¬ification_sender, &StoreEvent::new(changes)) + .await?; Ok(()) } diff --git a/core/graphman/src/commands/deployment/unassign.rs b/core/graphman/src/commands/deployment/unassign.rs index 0061fac49b6..ee738de40a6 100644 --- a/core/graphman/src/commands/deployment/unassign.rs +++ b/core/graphman/src/commands/deployment/unassign.rs @@ -33,22 +33,24 @@ pub enum UnassignDeploymentError { Common(#[from] GraphmanError), } -pub fn load_assigned_deployment( +pub async fn load_assigned_deployment( primary_pool: ConnectionPool, deployment: &DeploymentSelector, ) -> Result { - let mut primary_conn = primary_pool.get().map_err(GraphmanError::from)?; + let mut primary_conn = primary_pool.get().await.map_err(GraphmanError::from)?; let locator = crate::deployment::load_deployment_locator( &mut primary_conn, deployment, &DeploymentVersionSelector::All, - )?; + ) + .await?; let mut catalog_conn = catalog::Connection::new(primary_conn); let site = catalog_conn .locate_site(locator.clone()) + .await .map_err(GraphmanError::from)? .ok_or_else(|| { GraphmanError::Store(anyhow!("deployment site not found for '{locator}'")) @@ -56,6 +58,7 @@ pub fn load_assigned_deployment( match catalog_conn .assigned_node(&site) + .await .map_err(GraphmanError::from)? { Some(_) => Ok(AssignedDeployment { locator, site }), @@ -65,16 +68,18 @@ pub fn load_assigned_deployment( } } -pub fn unassign_deployment( +pub async fn unassign_deployment( primary_pool: ConnectionPool, notification_sender: Arc, deployment: AssignedDeployment, ) -> Result<(), GraphmanError> { - let primary_conn = primary_pool.get()?; + let primary_conn = primary_pool.get().await?; let mut catalog_conn = catalog::Connection::new(primary_conn); - let changes = catalog_conn.unassign_subgraph(&deployment.site)?; - catalog_conn.send_store_event(¬ification_sender, &StoreEvent::new(changes))?; + let changes = catalog_conn.unassign_subgraph(&deployment.site).await?; + catalog_conn + .send_store_event(¬ification_sender, &StoreEvent::new(changes)) + .await?; Ok(()) } diff --git a/core/graphman/src/deployment.rs b/core/graphman/src/deployment.rs index 1d749af54bb..0a38f175586 100644 --- a/core/graphman/src/deployment.rs +++ b/core/graphman/src/deployment.rs @@ -1,11 +1,19 @@ use anyhow::anyhow; use diesel::dsl::sql; -use diesel::prelude::*; use diesel::sql_types::Text; +use diesel::BoolExpressionMethods; +use diesel::ExpressionMethods; +use diesel::JoinOnDsl; +use diesel::NullableExpressionMethods; +use diesel::PgTextExpressionMethods; +use diesel::QueryDsl; +use diesel::Queryable; +use diesel_async::RunQueryDsl; use graph::components::store::DeploymentId; use graph::components::store::DeploymentLocator; use graph::data::subgraph::DeploymentHash; use graph_store_postgres::command_support::catalog; +use graph_store_postgres::AsyncPgConnection; use itertools::Itertools; use crate::GraphmanError; @@ -48,8 +56,8 @@ impl Deployment { } } -pub(crate) fn load_deployments( - primary_conn: &mut PgConnection, +pub(crate) async fn load_deployments( + primary_conn: &mut AsyncPgConnection, deployment: &DeploymentSelector, version: &DeploymentVersionSelector, ) -> Result, GraphmanError> { @@ -124,15 +132,16 @@ pub(crate) fn load_deployments( } } - query.load(primary_conn).map_err(Into::into) + query.load(primary_conn).await.map_err(Into::into) } -pub(crate) fn load_deployment_locator( - primary_conn: &mut PgConnection, +pub(crate) async fn load_deployment_locator( + primary_conn: &mut AsyncPgConnection, deployment: &DeploymentSelector, version: &DeploymentVersionSelector, ) -> Result { - let deployment_locator = load_deployments(primary_conn, deployment, version)? + let deployment_locator = load_deployments(primary_conn, deployment, version) + .await? .into_iter() .map(|deployment| deployment.locator()) .unique() diff --git a/core/graphman/src/execution_tracker.rs b/core/graphman/src/execution_tracker.rs index 96471d7c4a0..806d78defed 100644 --- a/core/graphman/src/execution_tracker.rs +++ b/core/graphman/src/execution_tracker.rs @@ -41,7 +41,7 @@ where let store = self.store.clone(); graph::spawn(async move { - store.mark_execution_as_running(id).unwrap(); + store.mark_execution_as_running(id).await.unwrap(); let stop_heartbeat = heartbeat_stopper.notified(); tokio::pin!(stop_heartbeat); @@ -55,7 +55,7 @@ where }, _ = tokio::time::sleep(DEFAULT_HEARTBEAT_INTERVAL) => { - store.mark_execution_as_running(id).unwrap(); + store.mark_execution_as_running(id).await.unwrap(); }, } } @@ -63,17 +63,19 @@ where } /// Completes the execution with an error. - pub fn track_failure(self, error_message: String) -> Result<()> { + pub async fn track_failure(self, error_message: String) -> Result<()> { self.heartbeat_stopper.notify_one(); - self.store.mark_execution_as_failed(self.id, error_message) + self.store + .mark_execution_as_failed(self.id, error_message) + .await } /// Completes the execution with a success. - pub fn track_success(self) -> Result<()> { + pub async fn track_success(self) -> Result<()> { self.heartbeat_stopper.notify_one(); - self.store.mark_execution_as_succeeded(self.id) + self.store.mark_execution_as_succeeded(self.id).await } } diff --git a/core/graphman_store/Cargo.toml b/core/graphman_store/Cargo.toml index 59705f944e2..fee9daff663 100644 --- a/core/graphman_store/Cargo.toml +++ b/core/graphman_store/Cargo.toml @@ -5,6 +5,7 @@ edition.workspace = true [dependencies] anyhow = { workspace = true } +async-trait = { workspace = true } chrono = { workspace = true } diesel = { workspace = true } strum = { workspace = true } diff --git a/core/graphman_store/src/lib.rs b/core/graphman_store/src/lib.rs index b44cbca8a91..f986d6484c0 100644 --- a/core/graphman_store/src/lib.rs +++ b/core/graphman_store/src/lib.rs @@ -4,6 +4,7 @@ //! commands and store implementations. use anyhow::Result; +use async_trait::async_trait; use chrono::DateTime; use chrono::Utc; use diesel::deserialize::FromSql; @@ -23,15 +24,16 @@ use strum::IntoStaticStr; /// Describes all the capabilities that graphman commands need from a persistent storage. /// /// The primary use case for this is background execution of commands. +#[async_trait] pub trait GraphmanStore { /// Creates a new pending execution of the specified type. /// The implementation is expected to manage execution IDs and return unique IDs on each call. /// /// Creating a new execution does not mean that a command is actually running or will run. - fn new_execution(&self, kind: CommandKind) -> Result; + async fn new_execution(&self, kind: CommandKind) -> Result; /// Returns all stored execution data. - fn load_execution(&self, id: ExecutionId) -> Result; + async fn load_execution(&self, id: ExecutionId) -> Result; /// When an execution begins to make progress, this method is used to update its status. /// @@ -39,19 +41,19 @@ pub trait GraphmanStore { /// to show that the execution is still making progress. /// /// The implementation is expected to not allow updating the status of completed executions. - fn mark_execution_as_running(&self, id: ExecutionId) -> Result<()>; + async fn mark_execution_as_running(&self, id: ExecutionId) -> Result<()>; /// This is a finalizing operation and is expected to be called only once, /// when an execution fails. /// /// The implementation is not expected to prevent overriding the final state of an execution. - fn mark_execution_as_failed(&self, id: ExecutionId, error_message: String) -> Result<()>; + async fn mark_execution_as_failed(&self, id: ExecutionId, error_message: String) -> Result<()>; /// This is a finalizing operation and is expected to be called only once, /// when an execution succeeds. /// /// The implementation is not expected to prevent overriding the final state of an execution. - fn mark_execution_as_succeeded(&self, id: ExecutionId) -> Result<()>; + async fn mark_execution_as_succeeded(&self, id: ExecutionId) -> Result<()>; } /// Data stored about a command execution. diff --git a/core/src/polling_monitor/ipfs_service.rs b/core/src/polling_monitor/ipfs_service.rs index b02578c0ed5..e35fcf30d30 100644 --- a/core/src/polling_monitor/ipfs_service.rs +++ b/core/src/polling_monitor/ipfs_service.rs @@ -109,7 +109,6 @@ mod test { use graph::ipfs::test_utils::add_files_to_local_ipfs_node_for_testing; use graph::ipfs::{IpfsContext, IpfsMetrics, IpfsRpcClient, ServerAddress}; use graph::log::discard; - use graph::tokio; use tower::ServiceExt; use wiremock::matchers as m; use wiremock::Mock; @@ -118,7 +117,7 @@ mod test { use super::*; - #[tokio::test] + #[graph::test] async fn cat_file_in_folder() { let random_bytes = "One morning, when Gregor Samsa woke \ from troubled dreams, he found himself transformed in his bed \ @@ -155,7 +154,7 @@ mod test { assert_eq!(content.to_vec(), random_bytes); } - #[tokio::test] + #[graph::test] async fn arweave_get() { const ID: &str = "8APeQ5lW0-csTcBaGdPBDLAL2ci2AT9pTn2tppGPU_8"; @@ -169,7 +168,7 @@ mod test { assert_eq!(expected, body); } - #[tokio::test] + #[graph::test] async fn no_client_retries_to_allow_polling_monitor_to_handle_retries_internally() { const CID: &str = "QmUNLLsPACCz1vLxQVkXqqLX5R1X345qqfHbsf67hvA3Nn"; diff --git a/core/src/polling_monitor/mod.rs b/core/src/polling_monitor/mod.rs index 7bf4726e7c3..cc76c83d9ce 100644 --- a/core/src/polling_monitor/mod.rs +++ b/core/src/polling_monitor/mod.rs @@ -281,7 +281,7 @@ mod tests { (handle, monitor, rx) } - #[tokio::test] + #[graph::test] async fn polling_monitor_shared_svc() { let (svc, mut handle) = mock::pair(); let shared_svc = tower::buffer::Buffer::new(tower::limit::ConcurrencyLimit::new(svc, 1), 1); @@ -303,7 +303,7 @@ mod tests { assert_eq!(rx1.recv().await, Some(("req-0", "res-0"))); } - #[tokio::test] + #[graph::test] async fn polling_monitor_simple() { let (mut handle, monitor, mut rx) = setup(); @@ -313,7 +313,7 @@ mod tests { assert_eq!(rx.recv().await, Some(("req-0", "res-0"))); } - #[tokio::test] + #[graph::test] async fn polling_monitor_unordered() { let (mut handle, monitor, mut rx) = setup(); @@ -330,7 +330,7 @@ mod tests { assert_eq!(rx.recv().await, Some(("req-1", "res-1"))); } - #[tokio::test] + #[graph::test] async fn polling_monitor_failed_push_to_back() { let (mut handle, monitor, mut rx) = setup(); @@ -354,7 +354,7 @@ mod tests { assert_eq!(rx.recv().await, Some(("req-1", "res-1"))); } - #[tokio::test] + #[graph::test] async fn polling_monitor_cancelation() { // Cancelation on receiver drop, no pending request. let (mut handle, _monitor, rx) = setup(); diff --git a/core/src/subgraph/context/mod.rs b/core/src/subgraph/context/mod.rs index 78a3c1d83c3..846e0d6fefb 100644 --- a/core/src/subgraph/context/mod.rs +++ b/core/src/subgraph/context/mod.rs @@ -25,10 +25,10 @@ use graph::{ TriggerProcessor, }, slog::Logger, - tokio::sync::mpsc, }; use std::sync::{Arc, RwLock}; use std::{collections::HashMap, time::Instant}; +use tokio::sync::mpsc; use self::instance::SubgraphInstance; use super::Decoder; @@ -281,7 +281,7 @@ impl OffchainMonitor { } pub fn ready_offchain_events(&mut self) -> Result, Error> { - use graph::tokio::sync::mpsc::error::TryRecvError; + use tokio::sync::mpsc::error::TryRecvError; let mut triggers = vec![]; loop { diff --git a/core/src/subgraph/instance_manager.rs b/core/src/subgraph/instance_manager.rs index 81c1a3ccd1a..5a70697c7f1 100644 --- a/core/src/subgraph/instance_manager.rs +++ b/core/src/subgraph/instance_manager.rs @@ -9,6 +9,7 @@ use crate::subgraph::Decoder; use std::collections::BTreeSet; use crate::subgraph::runner::SubgraphRunner; +use async_trait::async_trait; use graph::blockchain::block_stream::{BlockStreamMetrics, TriggersAdapterWrapper}; use graph::blockchain::{Blockchain, BlockchainKind, DataSource, NodeCapabilities}; use graph::components::link_resolver::LinkResolverContext; @@ -229,7 +230,8 @@ impl SubgraphInstanceManager { for hash in hashes { let loc = subgraph_store - .active_locator(&hash)? + .active_locator(&hash) + .await? .ok_or_else(|| anyhow!("no active deployment for hash {}", hash))?; let sourceable_store = subgraph_store.clone().sourceable(loc.id.clone()).await?; @@ -300,7 +302,7 @@ impl SubgraphInstanceManager { .set_manifest_raw_yaml(&deployment.hash, raw_yaml) .await?; if let Some(graft) = &manifest.graft { - if self.subgraph_store.is_deployed(&graft.base)? { + if self.subgraph_store.is_deployed(&graft.base).await? { let file_bytes = self .link_resolver .cat( @@ -362,7 +364,8 @@ impl SubgraphInstanceManager { // Write it to the database let deployment_features = manifest.deployment_features(); self.subgraph_store - .create_subgraph_features(deployment_features)?; + .create_subgraph_features(deployment_features) + .await?; // Start the subgraph deployment before reading dynamic data // sources; if the subgraph is a graft or a copy, starting it will @@ -437,7 +440,8 @@ impl SubgraphInstanceManager { // Obtain the debug fork from the subgraph store let debug_fork = self .subgraph_store - .debug_fork(&deployment.hash, logger.clone())?; + .debug_fork(&deployment.hash, logger.clone()) + .await?; // Create a subgraph instance from the manifest; this moves // ownership of the manifest and host builder into the new instance @@ -493,7 +497,7 @@ impl SubgraphInstanceManager { let deployment_head = store.block_ptr().map(|ptr| ptr.number).unwrap_or(0) as f64; block_stream_metrics.deployment_head.set(deployment_head); - let (runtime_adapter, decoder_hook) = chain.runtime()?; + let (runtime_adapter, decoder_hook) = chain.runtime().await?; let host_builder = graph_runtime_wasm::RuntimeHostBuilder::new( runtime_adapter, self.link_resolver.cheap_clone(), @@ -511,7 +515,7 @@ impl SubgraphInstanceManager { let causality_region_seq = CausalityRegionSeq::from_current(store.causality_region_curr_val().await?); - let instrument = self.subgraph_store.instrument(&deployment)?; + let instrument = self.subgraph_store.instrument(&deployment).await?; let decoder = Box::new(Decoder::new(decoder_hook)); diff --git a/core/src/subgraph/registrar.rs b/core/src/subgraph/registrar.rs index b05ccdf4e33..928e2158f93 100644 --- a/core/src/subgraph/registrar.rs +++ b/core/src/subgraph/registrar.rs @@ -18,9 +18,9 @@ use graph::prelude::{ CreateSubgraphResult, SubgraphAssignmentProvider as SubgraphAssignmentProviderTrait, SubgraphRegistrar as SubgraphRegistrarTrait, *, }; -use graph::tokio_retry::Retry; use graph::util::futures::retry_strategy; use graph::util::futures::RETRY_DEFAULT_LIMIT; +use tokio_retry::Retry; pub struct SubgraphRegistrar { logger: Logger, @@ -233,7 +233,7 @@ where &self, name: SubgraphName, ) -> Result { - let id = self.store.create_subgraph(name.clone())?; + let id = self.store.create_subgraph(name.clone()).await?; debug!(self.logger, "Created subgraph"; "subgraph_name" => name.to_string()); @@ -365,7 +365,7 @@ where } async fn remove_subgraph(&self, name: SubgraphName) -> Result<(), SubgraphRegistrarError> { - self.store.clone().remove_subgraph(name.clone())?; + self.store.clone().remove_subgraph(name.clone()).await?; debug!(self.logger, "Removed subgraph"; "subgraph_name" => name.to_string()); @@ -381,31 +381,31 @@ where hash: &DeploymentHash, node_id: &NodeId, ) -> Result<(), SubgraphRegistrarError> { - let locator = self.store.active_locator(hash)?; + let locator = self.store.active_locator(hash).await?; let deployment = locator.ok_or_else(|| SubgraphRegistrarError::DeploymentNotFound(hash.to_string()))?; - self.store.reassign_subgraph(&deployment, node_id)?; + self.store.reassign_subgraph(&deployment, node_id).await?; Ok(()) } async fn pause_subgraph(&self, hash: &DeploymentHash) -> Result<(), SubgraphRegistrarError> { - let locator = self.store.active_locator(hash)?; + let locator = self.store.active_locator(hash).await?; let deployment = locator.ok_or_else(|| SubgraphRegistrarError::DeploymentNotFound(hash.to_string()))?; - self.store.pause_subgraph(&deployment)?; + self.store.pause_subgraph(&deployment).await?; Ok(()) } async fn resume_subgraph(&self, hash: &DeploymentHash) -> Result<(), SubgraphRegistrarError> { - let locator = self.store.active_locator(hash)?; + let locator = self.store.active_locator(hash).await?; let deployment = locator.ok_or_else(|| SubgraphRegistrarError::DeploymentNotFound(hash.to_string()))?; - self.store.resume_subgraph(&deployment)?; + self.store.resume_subgraph(&deployment).await?; Ok(()) } @@ -491,7 +491,7 @@ async fn create_subgraph_version( // Validate the graft_base if there is a pending graft, ensuring its presence. // If the subgraph is new (indicated by DeploymentNotFound), the graft_base should be validated. // If the subgraph already exists and there is no pending graft, graft_base validation is not required. - let should_validate = match store.graft_pending(&deployment) { + let should_validate = match store.graft_pending(&deployment).await { Ok(graft_pending) => graft_pending, Err(StoreError::DeploymentNotFound(_)) => true, Err(e) => return Err(SubgraphRegistrarError::StoreError(e)), @@ -512,7 +512,7 @@ async fn create_subgraph_version( let store = store.clone(); let deployment_store = store.clone(); - if !store.subgraph_exists(&name)? { + if !store.subgraph_exists(&name).await? { debug!( logger, "Subgraph not found, could not create_subgraph_version"; @@ -587,5 +587,6 @@ async fn create_subgraph_version( network_name.into(), version_switching_mode, ) + .await .map_err(SubgraphRegistrarError::SubgraphDeploymentError) } diff --git a/core/src/subgraph/runner.rs b/core/src/subgraph/runner.rs index 237b4cb472e..cca0e59e22b 100644 --- a/core/src/subgraph/runner.rs +++ b/core/src/subgraph/runner.rs @@ -426,7 +426,10 @@ where modifications: mut mods, entity_lfu_cache: cache, evict_stats, - } = entity_cache.as_modifications(block_ptr.number).classify()?; + } = entity_cache + .as_modifications(block_ptr.number) + .await + .classify()?; section.end(); log_evict_stats(&self.logger, &evict_stats); @@ -495,6 +498,9 @@ where // In this scenario the only entity that is stored/transacted is the PoI, // all of the others are discarded. if has_errors && self.inputs.errors_are_fatal() { + if let Err(e) = self.inputs.store.flush().await { + error!(logger, "Failed to flush store after fatal errors"; "error" => format!("{:#}", e)); + } // Only the first error is reported. return Err(ProcessingError::Deterministic(Box::new( first_error.unwrap(), @@ -512,7 +518,7 @@ where .non_deterministic()?; if has_errors { - self.maybe_cancel()?; + self.maybe_cancel().await?; } Ok(()) @@ -520,13 +526,14 @@ where /// Cancel the subgraph if `disable_fail_fast` is not set and it is not /// synced - fn maybe_cancel(&self) -> Result<(), ProcessingError> { + async fn maybe_cancel(&self) -> Result<(), ProcessingError> { // To prevent a buggy pending version from replacing a current version, if errors are // present the subgraph will be unassigned. let store = &self.inputs.store; if !ENV_VARS.disable_fail_fast && !store.is_deployment_synced() { store .pause_subgraph() + .await .map_err(|e| ProcessingError::Unknown(e.into()))?; // Use `Canceled` to avoiding setting the subgraph health to failed, an error was @@ -990,7 +997,8 @@ where let outcome = self .inputs .store - .unfail_non_deterministic_error(&block_ptr)?; + .unfail_non_deterministic_error(&block_ptr) + .await?; // Stop trying to unfail. self.state.should_try_unfail_non_deterministic = false; @@ -1286,7 +1294,8 @@ where mods.extend( block_state .entity_cache - .as_modifications(block.number())? + .as_modifications(block.number()) + .await? .modifications, ); processed_data_sources.extend(block_state.processed_data_sources); @@ -1566,7 +1575,7 @@ async fn update_proof_of_indexing( entity_cache: &mut EntityCache, ) -> Result<(), Error> { // Helper to store the digest as a PoI entity in the cache - fn store_poi_entity( + async fn store_poi_entity( entity_cache: &mut EntityCache, key: EntityKey, digest: Bytes, @@ -1586,7 +1595,7 @@ async fn update_proof_of_indexing( data.push((entity_cache.schema.poi_block_time(), block_time)); } let poi = entity_cache.make_entity(data)?; - entity_cache.set(key, poi, block, None) + entity_cache.set(key, poi, block, None).await } let _section_guard = stopwatch.start_section("update_proof_of_indexing"); @@ -1610,6 +1619,7 @@ async fn update_proof_of_indexing( let poi_digest = entity_cache.schema.poi_digest().clone(); let prev_poi = entity_cache .get(&entity_key, GetScope::Store) + .await .map_err(Error::from)? .map(|entity| match entity.get(poi_digest.as_str()) { Some(Value::Bytes(b)) => b.clone(), @@ -1628,7 +1638,8 @@ async fn update_proof_of_indexing( updated_proof_of_indexing, block_time, block_number, - )?; + ) + .await?; } Ok(()) diff --git a/gnd/src/main.rs b/gnd/src/main.rs index 4c34a59317e..f8136acaf89 100644 --- a/gnd/src/main.rs +++ b/gnd/src/main.rs @@ -9,11 +9,11 @@ use graph::{ log::logger, prelude::{CheapClone, DeploymentHash, LinkResolver, SubgraphName}, slog::{error, info, Logger}, - tokio::{self, sync::mpsc}, }; use graph_core::polling_monitor::ipfs_service; use graph_node::{launcher, opt::Opt}; use lazy_static::lazy_static; +use tokio::{self, sync::mpsc}; use gnd::watcher::{deploy_all_subgraphs, parse_manifest_args, watch_subgraphs}; diff --git a/gnd/src/watcher.rs b/gnd/src/watcher.rs index 743b45f0391..3171f240128 100644 --- a/gnd/src/watcher.rs +++ b/gnd/src/watcher.rs @@ -2,12 +2,12 @@ use anyhow::{anyhow, Context, Result}; use globset::{Glob, GlobSet, GlobSetBuilder}; use graph::prelude::{DeploymentHash, SubgraphName}; use graph::slog::{self, error, info, Logger}; -use graph::tokio::sync::mpsc::Sender; use notify::{recommended_watcher, Event, RecursiveMode, Watcher}; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::mpsc; use std::time::Duration; +use tokio::sync::mpsc::Sender; const WATCH_DELAY: Duration = Duration::from_secs(5); const DEFAULT_BUILD_DIR: &str = "build"; diff --git a/graph/Cargo.toml b/graph/Cargo.toml index 44e004be00c..775a77d2eb6 100644 --- a/graph/Cargo.toml +++ b/graph/Cargo.toml @@ -6,7 +6,7 @@ edition.workspace = true [dependencies] base64 = "=0.21.7" anyhow = "1.0" -async-trait = "0.1.74" +async-trait = { workspace = true } async-stream = "0.3" atomic_refcell = "0.1.13" # We require this precise version of bigdecimal. Updating to later versions @@ -68,16 +68,9 @@ slog-envlogger = "2.1.0" slog-term = "2.7.0" petgraph = "0.8.2" tiny-keccak = "1.5.0" -tokio = { version = "1.45.1", features = [ - "time", - "sync", - "macros", - "test-util", - "rt-multi-thread", - "parking_lot", -] } -tokio-stream = { version = "0.1.15", features = ["sync"] } -tokio-retry = "0.3.0" +tokio = { workspace = true} +tokio-stream = { workspace = true } +tokio-retry = { workspace = true } toml = "0.9.7" url = "2.5.7" prometheus = "0.14.0" diff --git a/graph/derive/src/lib.rs b/graph/derive/src/lib.rs index a722b90d819..ce13fc9faf9 100644 --- a/graph/derive/src/lib.rs +++ b/graph/derive/src/lib.rs @@ -3,7 +3,9 @@ use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::quote; -use syn::{parse_macro_input, Data, DeriveInput, Fields, Generics, Ident, Index, TypeParamBound}; +use syn::{ + parse_macro_input, Data, DeriveInput, Fields, Generics, Ident, Index, ItemFn, TypeParamBound, +}; #[proc_macro_derive(CheapClone)] pub fn derive_cheap_clone(input: TokenStream) -> TokenStream { @@ -235,6 +237,68 @@ pub fn derive_cache_weight(input: TokenStream) -> TokenStream { TokenStream::from(expanded) } +/// A proc macro attribute similar to `tokio::test` but uses the +/// `TEST_RUNTIME` instead of creating a new runtime for each test. +/// +/// # Example +/// +/// ```ignore +/// use graph::prelude::*; +/// +/// #[graph::test] +/// async fn my_test() { +/// // Test code here +/// } +/// ``` +/// +/// The macro transforms the async test function to use +/// `TEST_RUNTIME.block_on()`. +/// +/// Note that for tests in the `graph` crate itself, the macro must be used +/// as `#[crate::test]` +#[proc_macro_attribute] +pub fn test(args: TokenStream, item: TokenStream) -> TokenStream { + let input = parse_macro_input!(item as ItemFn); + + if !args.is_empty() { + let msg = "the `#[graph::test]` attribute does not take any arguments"; + return syn::Error::new(Span::call_site(), msg) + .to_compile_error() + .into(); + } + + let ret = &input.sig.output; + let name = &input.sig.ident; + let body = &input.block; + let attrs = &input.attrs; + let vis = &input.vis; + + if input.sig.asyncness.is_none() { + let msg = "the `async` keyword is missing from the function declaration"; + return syn::Error::new_spanned(&input.sig.fn_token, msg) + .to_compile_error() + .into(); + } + + let crate_name = std::env::var("CARGO_CRATE_NAME").unwrap(); + let pkg_name = std::env::var("CARGO_PKG_NAME").unwrap(); + let runtime = if crate_name == "graph" && pkg_name == "graph" { + quote! { crate::tokio::TEST_RUNTIME } + } else { + quote! { graph::TEST_RUNTIME } + }; + + let expanded = quote! { + #[::core::prelude::v1::test] + #(#attrs)* + #vis fn #name() #ret { + #runtime.block_on(async #body) + } + }; + + TokenStream::from(expanded) +} + #[cfg(test)] mod tests { use proc_macro_utils::assert_expansion; diff --git a/graph/src/blockchain/block_stream.rs b/graph/src/blockchain/block_stream.rs index 86f196ac99c..778474a7f6e 100644 --- a/graph/src/blockchain/block_stream.rs +++ b/graph/src/blockchain/block_stream.rs @@ -5,6 +5,7 @@ use crate::substreams_rpc::response::Message as SubstreamsMessage; use crate::substreams_rpc::BlockScopedData; use anyhow::Error; use async_stream::stream; +use async_trait::async_trait; use futures03::Stream; use prost_types::Any; use std::collections::{BTreeMap, BTreeSet, HashMap}; @@ -498,7 +499,9 @@ async fn get_entities_for_range( .iter() .map(|name| schema.entity_type(name)) .collect(); - Ok(store.get_range(entity_types?, CausalityRegion::ONCHAIN, from..to)?) + Ok(store + .get_range(entity_types?, CausalityRegion::ONCHAIN, from..to) + .await?) } impl TriggersAdapterWrapper { @@ -983,7 +986,7 @@ mod test { } } - #[tokio::test] + #[crate::test] async fn consume_stream() { let initial_block = 100; let buffer_size = 5; diff --git a/graph/src/blockchain/firehose_block_ingestor.rs b/graph/src/blockchain/firehose_block_ingestor.rs index fbe35eab3a7..774785fa2f5 100644 --- a/graph/src/blockchain/firehose_block_ingestor.rs +++ b/graph/src/blockchain/firehose_block_ingestor.rs @@ -78,7 +78,7 @@ where let mut backoff = ExponentialBackoff::new(Duration::from_millis(250), Duration::from_secs(30)); loop { - match self.chain_head_store.clone().chain_head_cursor() { + match self.chain_head_store.clone().chain_head_cursor().await { Ok(cursor) => return cursor.unwrap_or_default(), Err(e) => { error!(self.logger, "Fetching chain head cursor failed: {:#}", e); diff --git a/graph/src/blockchain/mock.rs b/graph/src/blockchain/mock.rs index b2d9bf71df2..577b5fbc816 100644 --- a/graph/src/blockchain/mock.rs +++ b/graph/src/blockchain/mock.rs @@ -458,7 +458,7 @@ impl Blockchain for MockBlockchain { todo!() } - fn runtime( + async fn runtime( &self, ) -> anyhow::Result<(std::sync::Arc>, Self::DecoderHook)> { bail!("mock has no runtime adapter") @@ -484,7 +484,7 @@ impl ChainHeadStore for MockChainStore { async fn chain_head_ptr(self: Arc) -> Result, Error> { unimplemented!() } - fn chain_head_cursor(&self) -> Result, Error> { + async fn chain_head_cursor(&self) -> Result, Error> { unimplemented!() } async fn set_chain_head( @@ -512,13 +512,13 @@ impl ChainStore for MockChainStore { } // Implement other required methods with minimal implementations - fn genesis_block_ptr(&self) -> Result { + async fn genesis_block_ptr(&self) -> Result { unimplemented!() } async fn upsert_block(&self, _block: Arc) -> Result<(), Error> { unimplemented!() } - fn upsert_light_blocks(&self, _blocks: &[&dyn Block]) -> Result<(), Error> { + async fn upsert_light_blocks(&self, _blocks: &[&dyn Block]) -> Result<(), Error> { unimplemented!() } async fn attempt_chain_head_update( @@ -538,16 +538,23 @@ impl ChainStore for MockChainStore { ) -> Result, Error> { unimplemented!() } - fn cleanup_cached_blocks( + async fn cleanup_cached_blocks( &self, _ancestor_count: BlockNumber, ) -> Result, Error> { unimplemented!() } - fn block_hashes_by_block_number(&self, _number: BlockNumber) -> Result, Error> { + async fn block_hashes_by_block_number( + &self, + _number: BlockNumber, + ) -> Result, Error> { unimplemented!() } - fn confirm_block_hash(&self, _number: BlockNumber, _hash: &BlockHash) -> Result { + async fn confirm_block_hash( + &self, + _number: BlockNumber, + _hash: &BlockHash, + ) -> Result { unimplemented!() } async fn block_number( @@ -578,7 +585,7 @@ impl ChainStore for MockChainStore { ) -> Result<(), Error> { unimplemented!() } - fn chain_identifier(&self) -> Result { + async fn chain_identifier(&self) -> Result { unimplemented!() } fn as_head_store(self: Arc) -> Arc { @@ -586,11 +593,12 @@ impl ChainStore for MockChainStore { } } +#[async_trait] impl ChainIdStore for MockChainStore { - fn chain_identifier(&self, _name: &ChainName) -> Result { + async fn chain_identifier(&self, _name: &ChainName) -> Result { unimplemented!() } - fn set_chain_identifier( + async fn set_chain_identifier( &self, _name: &ChainName, _ident: &ChainIdentifier, diff --git a/graph/src/blockchain/mod.rs b/graph/src/blockchain/mod.rs index 7768ea7f6e9..593faca3d32 100644 --- a/graph/src/blockchain/mod.rs +++ b/graph/src/blockchain/mod.rs @@ -213,7 +213,7 @@ pub trait Blockchain: Debug + Sized + Send + Sync + Unpin + 'static { fn is_refetch_block_required(&self) -> bool; - fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)>; + async fn runtime(&self) -> anyhow::Result<(Arc>, Self::DecoderHook)>; fn chain_client(&self) -> Arc>; diff --git a/graph/src/components/link_resolver/arweave.rs b/graph/src/components/link_resolver/arweave.rs index b58dd1c61e2..abd3d80a503 100644 --- a/graph/src/components/link_resolver/arweave.rs +++ b/graph/src/components/link_resolver/arweave.rs @@ -128,7 +128,7 @@ mod test { // This test ensures that passing txid/filename works when the txid refers to manifest. // the actual data seems to have some binary header and footer so these ranges were found // by inspecting the data with hexdump. - #[tokio::test] + #[crate::test] async fn fetch_bundler_url() { let url = Base64::from("Rtdn3QWEzM88MPC2dpWyV5waO7Vuz3VwPl_usS2WoHM/DriveManifest.json"); #[derive(Deserialize, Debug, PartialEq)] diff --git a/graph/src/components/link_resolver/file.rs b/graph/src/components/link_resolver/file.rs index f743efae1d2..14b7438642e 100644 --- a/graph/src/components/link_resolver/file.rs +++ b/graph/src/components/link_resolver/file.rs @@ -165,7 +165,7 @@ mod tests { use std::fs; use std::io::Write; - #[tokio::test] + #[crate::test] async fn test_file_resolver_absolute() { // Test the resolver without a base directory (absolute paths only) @@ -207,7 +207,7 @@ mod tests { let _ = fs::remove_dir(temp_dir); } - #[tokio::test] + #[crate::test] async fn test_file_resolver_with_base_dir() { // Test the resolver with a base directory @@ -256,7 +256,7 @@ mod tests { let _ = fs::remove_dir(temp_dir); } - #[tokio::test] + #[crate::test] async fn test_file_resolver_with_aliases() { // Create a temporary directory for test files let temp_dir = env::temp_dir().join("file_resolver_test_aliases"); diff --git a/graph/src/components/link_resolver/ipfs.rs b/graph/src/components/link_resolver/ipfs.rs index bd609247458..59a9f8027d7 100644 --- a/graph/src/components/link_resolver/ipfs.rs +++ b/graph/src/components/link_resolver/ipfs.rs @@ -246,7 +246,7 @@ mod tests { use crate::ipfs::test_utils::add_files_to_local_ipfs_node_for_testing; use crate::ipfs::{IpfsMetrics, IpfsRpcClient, ServerAddress}; - #[tokio::test] + #[crate::test] async fn max_file_size() { let mut env_vars = EnvVars::default(); env_vars.mappings.max_ipfs_file_bytes = 200; @@ -302,7 +302,7 @@ mod tests { stream.map_ok(|sv| sv.value).try_collect().await } - #[tokio::test] + #[crate::test] async fn read_json_stream() { let values = json_round_trip("\"with newline\"\n", EnvVars::default()).await; assert_eq!(vec![json!("with newline")], values.unwrap()); @@ -324,7 +324,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn ipfs_map_file_size() { let file = "\"small test string that trips the size restriction\""; let mut env_vars = EnvVars::default(); diff --git a/graph/src/components/network_provider/chain_identifier_validator.rs b/graph/src/components/network_provider/chain_identifier_validator.rs index 2b784b55a45..d64eb0a401d 100644 --- a/graph/src/components/network_provider/chain_identifier_validator.rs +++ b/graph/src/components/network_provider/chain_identifier_validator.rs @@ -1,5 +1,6 @@ use std::sync::Arc; +use async_trait::async_trait; use thiserror::Error; use crate::blockchain::BlockHash; @@ -8,12 +9,13 @@ use crate::components::network_provider::ChainName; use crate::components::store::ChainIdStore; /// Additional requirements for stores that are necessary for provider checks. +#[async_trait] pub trait ChainIdentifierValidator: Send + Sync + 'static { /// Verifies that the chain identifier returned by the network provider /// matches the previously stored value. /// /// Fails if the identifiers do not match or if something goes wrong. - fn validate_identifier( + async fn validate_identifier( &self, chain_name: &ChainName, chain_identifier: &ChainIdentifier, @@ -21,7 +23,7 @@ pub trait ChainIdentifierValidator: Send + Sync + 'static { /// Saves the provided identifier that will be used as the source of truth /// for future validations. - fn update_identifier( + async fn update_identifier( &self, chain_name: &ChainName, chain_identifier: &ChainIdentifier, @@ -51,22 +53,23 @@ pub enum ChainIdentifierValidationError { Store(#[source] anyhow::Error), } -pub fn chain_id_validator(store: Arc) -> Arc { +pub fn chain_id_validator(store: Box) -> Arc { Arc::new(ChainIdentifierStore::new(store)) } pub(crate) struct ChainIdentifierStore { - store: Arc, + store: Box, } impl ChainIdentifierStore { - pub fn new(store: Arc) -> Self { + pub fn new(store: Box) -> Self { Self { store } } } +#[async_trait] impl ChainIdentifierValidator for ChainIdentifierStore { - fn validate_identifier( + async fn validate_identifier( &self, chain_name: &ChainName, chain_identifier: &ChainIdentifier, @@ -74,6 +77,7 @@ impl ChainIdentifierValidator for ChainIdentifierStore { let store_identifier = self .store .chain_identifier(chain_name) + .await .map_err(|err| ChainIdentifierValidationError::Store(err))?; if store_identifier.is_default() { @@ -108,13 +112,14 @@ impl ChainIdentifierValidator for ChainIdentifierStore { Ok(()) } - fn update_identifier( + async fn update_identifier( &self, chain_name: &ChainName, chain_identifier: &ChainIdentifier, ) -> Result<(), ChainIdentifierValidationError> { self.store .set_chain_identifier(chain_name, chain_identifier) + .await .map_err(|err| ChainIdentifierValidationError::Store(err)) } } diff --git a/graph/src/components/network_provider/extended_blocks_check.rs b/graph/src/components/network_provider/extended_blocks_check.rs index 059cc43fa08..f4d412795bc 100644 --- a/graph/src/components/network_provider/extended_blocks_check.rs +++ b/graph/src/components/network_provider/extended_blocks_check.rs @@ -128,7 +128,7 @@ mod tests { } } - #[tokio::test] + #[crate::test] async fn check_valid_when_disabled_for_chain() { let check = ExtendedBlocksCheck::new(["chain-1".into()]); let adapter = TestAdapter::default(); @@ -145,7 +145,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_valid_when_disabled_for_multiple_chains() { let check = ExtendedBlocksCheck::new(["chain-1".into(), "chain-2".into()]); let adapter = TestAdapter::default(); @@ -173,7 +173,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_valid_when_extended_blocks_are_supported() { let check = ExtendedBlocksCheck::new([]); @@ -192,7 +192,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_fails_when_extended_blocks_are_not_supported() { let check = ExtendedBlocksCheck::new([]); @@ -211,7 +211,7 @@ mod tests { assert!(matches!(status, ProviderCheckStatus::Failed { .. })); } - #[tokio::test] + #[crate::test] async fn check_temporary_failure_when_provider_request_fails() { let check = ExtendedBlocksCheck::new([]); diff --git a/graph/src/components/network_provider/genesis_hash_check.rs b/graph/src/components/network_provider/genesis_hash_check.rs index 0cfd8c6d1b0..26c8f91bab1 100644 --- a/graph/src/components/network_provider/genesis_hash_check.rs +++ b/graph/src/components/network_provider/genesis_hash_check.rs @@ -29,7 +29,7 @@ impl GenesisHashCheck { } } - pub fn from_id_store(id_store: Arc) -> Self { + pub fn from_id_store(id_store: Box) -> Self { Self { chain_identifier_store: chain_id_validator(id_store), } @@ -68,7 +68,8 @@ impl ProviderCheck for GenesisHashCheck { let check_result = self .chain_identifier_store - .validate_identifier(chain_name, &chain_identifier); + .validate_identifier(chain_name, &chain_identifier) + .await; use ChainIdentifierValidationError::*; @@ -77,7 +78,8 @@ impl ProviderCheck for GenesisHashCheck { Err(IdentifierNotSet(_)) => { let update_result = self .chain_identifier_store - .update_identifier(chain_name, &chain_identifier); + .update_identifier(chain_name, &chain_identifier) + .await; if let Err(err) = update_result { let message = format!( @@ -190,7 +192,7 @@ mod tests { #[async_trait] impl ChainIdentifierValidator for TestChainIdentifierStore { - fn validate_identifier( + async fn validate_identifier( &self, _chain_name: &ChainName, _chain_identifier: &ChainIdentifier, @@ -198,7 +200,7 @@ mod tests { self.validate_identifier_calls.lock().unwrap().remove(0) } - fn update_identifier( + async fn update_identifier( &self, _chain_name: &ChainName, _chain_identifier: &ChainIdentifier, @@ -243,7 +245,7 @@ mod tests { } } - #[tokio::test] + #[crate::test] async fn check_temporary_failure_when_network_provider_request_fails() { let store = Arc::new(TestChainIdentifierStore::default()); let check = GenesisHashCheck::new(store); @@ -266,7 +268,7 @@ mod tests { )); } - #[tokio::test] + #[crate::test] async fn check_valid_when_store_successfully_validates_chain_identifier() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Ok(())); @@ -293,7 +295,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_temporary_failure_on_initial_chain_identifier_update_error() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Err(ChainIdentifierValidationError::IdentifierNotSet( @@ -326,7 +328,7 @@ mod tests { )); } - #[tokio::test] + #[crate::test] async fn check_valid_on_initial_chain_identifier_update() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Err(ChainIdentifierValidationError::IdentifierNotSet( @@ -356,7 +358,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_valid_when_stored_identifier_network_version_is_zero() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Err(ChainIdentifierValidationError::NetVersionMismatch { @@ -387,7 +389,7 @@ mod tests { assert_eq!(status, ProviderCheckStatus::Valid); } - #[tokio::test] + #[crate::test] async fn check_fails_on_identifier_network_version_mismatch() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Err(ChainIdentifierValidationError::NetVersionMismatch { @@ -418,7 +420,7 @@ mod tests { assert!(matches!(status, ProviderCheckStatus::Failed { .. })); } - #[tokio::test] + #[crate::test] async fn check_fails_on_identifier_genesis_hash_mismatch() { let store = Arc::new(TestChainIdentifierStore::default()); store.validate_identifier_call(Err( @@ -451,7 +453,7 @@ mod tests { assert!(matches!(status, ProviderCheckStatus::Failed { .. })); } - #[tokio::test] + #[crate::test] async fn check_temporary_failure_on_store_errors() { let store = Arc::new(TestChainIdentifierStore::default()); store diff --git a/graph/src/components/network_provider/provider_manager.rs b/graph/src/components/network_provider/provider_manager.rs index 300d85118b6..93136c516bc 100644 --- a/graph/src/components/network_provider/provider_manager.rs +++ b/graph/src/components/network_provider/provider_manager.rs @@ -494,7 +494,7 @@ mod tests { adapters.map(|adapter| adapter.id).collect() } - #[tokio::test] + #[crate::test] async fn no_providers() { let manager: ProviderManager> = ProviderManager::new(discard(), [], ProviderCheckStrategy::MarkAsValid); @@ -504,7 +504,7 @@ mod tests { assert_eq!(manager.providers(&chain_name()).await.unwrap().count(), 0); } - #[tokio::test] + #[crate::test] async fn no_providers_for_chain() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -533,7 +533,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn multiple_providers() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -557,7 +557,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn providers_unchecked_skips_provider_checks() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -573,7 +573,7 @@ mod tests { assert_eq!(ids(manager.providers_unchecked(&chain_name())), vec![1]); } - #[tokio::test] + #[crate::test] async fn successful_provider_check() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -599,7 +599,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn multiple_successful_provider_checks() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -628,7 +628,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn multiple_successful_provider_checks_on_multiple_adapters() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -662,7 +662,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn successful_provider_check_for_a_pool_of_adapters_for_a_provider() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -691,7 +691,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn multiple_successful_provider_checks_for_a_pool_of_adapters_for_a_provider() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -723,7 +723,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn provider_validation_timeout() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -753,7 +753,7 @@ mod tests { }; } - #[tokio::test] + #[crate::test] async fn no_providers_available() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -781,7 +781,7 @@ mod tests { }; } - #[tokio::test] + #[crate::test] async fn all_providers_failed() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -808,7 +808,7 @@ mod tests { }; } - #[tokio::test] + #[crate::test] async fn temporary_provider_check_failures_are_retried() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -838,7 +838,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn final_provider_check_failures_are_not_retried() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -863,7 +863,7 @@ mod tests { assert!(manager.providers(&chain_name()).await.is_err()); } - #[tokio::test] + #[crate::test] async fn mix_valid_and_invalid_providers() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -899,7 +899,7 @@ mod tests { ); } - #[tokio::test] + #[crate::test] async fn one_provider_check_failure_is_enough_to_mark_an_provider_as_invalid() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); @@ -923,7 +923,7 @@ mod tests { assert!(manager.providers(&chain_name()).await.is_err()); } - #[tokio::test(flavor = "multi_thread")] + #[crate::test] async fn concurrent_providers_access_does_not_trigger_multiple_validations() { let adapter_1 = Arc::new(TestAdapter::new(1)); adapter_1.provider_name_call("provider_1".into()); diff --git a/graph/src/components/server/server.rs b/graph/src/components/server/server.rs index 28f760b5c70..b746848c86b 100644 --- a/graph/src/components/server/server.rs +++ b/graph/src/components/server/server.rs @@ -5,15 +5,15 @@ use std::sync::Arc; use hyper::body::Incoming; use hyper::Request; +use tokio::net::TcpListener; +use tokio::task::JoinHandle; +use crate::anyhow; use crate::cheap_clone::CheapClone; use crate::hyper::server::conn::http1; use crate::hyper::service::service_fn; use crate::hyper_util::rt::TokioIo; use crate::slog::error; -use crate::tokio::net::TcpListener; -use crate::tokio::task::JoinHandle; -use crate::{anyhow, tokio}; use crate::prelude::Logger; diff --git a/graph/src/components/store/entity_cache.rs b/graph/src/components/store/entity_cache.rs index 062dd67dfc2..11748415444 100644 --- a/graph/src/components/store/entity_cache.rs +++ b/graph/src/components/store/entity_cache.rs @@ -187,7 +187,7 @@ impl EntityCache { self.handler_updates.clear(); } - pub fn get( + pub async fn get( &mut self, key: &EntityKey, scope: GetScope, @@ -197,7 +197,7 @@ impl EntityCache { let mut entity: Option> = match scope { GetScope::Store => { if !self.current.contains_key(key) { - let entity = self.store.get(key)?; + let entity = self.store.get(key).await?; self.current.insert(key.clone(), entity.map(Arc::new)); } // Unwrap: we just inserted the entity @@ -213,7 +213,7 @@ impl EntityCache { // always creates it in a new style. debug_assert!(match scope { GetScope::Store => { - entity == self.store.get(key).unwrap().map(Arc::new) + entity == self.store.get(key).await.unwrap().map(Arc::new) } GetScope::InBlock => true, }); @@ -233,7 +233,7 @@ impl EntityCache { Ok(entity) } - pub fn load_related( + pub async fn load_related( &mut self, eref: &LoadRelatedRequest, ) -> Result, anyhow::Error> { @@ -246,7 +246,7 @@ impl EntityCache { causality_region: eref.causality_region, }; - let mut entity_map = self.store.get_derived(&query)?; + let mut entity_map = self.store.get_derived(&query).await?; for (key, entity) in entity_map.iter() { // Only insert to the cache if it's not already there @@ -364,7 +364,7 @@ impl EntityCache { /// with existing data. The entity will be validated against the /// subgraph schema, and any errors will result in an `Err` being /// returned. - pub fn set( + pub async fn set( &mut self, key: EntityKey, entity: Entity, @@ -407,7 +407,7 @@ impl EntityCache { // lookup in the database and check again with an entity that merges // the existing entity with the changes if !is_valid { - let entity = self.get(&key, GetScope::Store)?.ok_or_else(|| { + let entity = self.get(&key, GetScope::Store).await?.ok_or_else(|| { anyhow!( "Failed to read entity {}[{}] back from cache", key.entity_type, @@ -471,7 +471,7 @@ impl EntityCache { /// to the current state is actually needed. /// /// Also returns the updated `LfuCache`. - pub fn as_modifications( + pub async fn as_modifications( mut self, block: BlockNumber, ) -> Result { @@ -493,7 +493,7 @@ impl EntityCache { // violation in the database, ensuring correctness let missing = missing.filter(|key| !key.entity_type.is_immutable()); - for (entity_key, entity) in self.store.get_many(missing.cloned().collect())? { + for (entity_key, entity) in self.store.get_many(missing.cloned().collect()).await? { self.current.insert(entity_key, Some(Arc::new(entity))); } diff --git a/graph/src/components/store/mod.rs b/graph/src/components/store/mod.rs index f3872b16580..818718a5f74 100644 --- a/graph/src/components/store/mod.rs +++ b/graph/src/components/store/mod.rs @@ -28,6 +28,8 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, RwLock}; use std::time::Duration; +use async_trait::async_trait; + use crate::blockchain::{Block, BlockHash, BlockPtr}; use crate::cheap_clone::CheapClone; use crate::components::store::write::EntityModification; @@ -867,16 +869,20 @@ impl EmptyStore { } } +#[async_trait] impl ReadStore for EmptyStore { - fn get(&self, _key: &EntityKey) -> Result, StoreError> { + async fn get(&self, _key: &EntityKey) -> Result, StoreError> { Ok(None) } - fn get_many(&self, _: BTreeSet) -> Result, StoreError> { + async fn get_many( + &self, + _: BTreeSet, + ) -> Result, StoreError> { Ok(BTreeMap::new()) } - fn get_derived( + async fn get_derived( &self, _query: &DerivedEntityQuery, ) -> Result, StoreError> { diff --git a/graph/src/components/store/traits.rs b/graph/src/components/store/traits.rs index 2d115aeff07..86f8d595f10 100644 --- a/graph/src/components/store/traits.rs +++ b/graph/src/components/store/traits.rs @@ -31,30 +31,32 @@ pub trait SubscriptionManager: Send + Sync + 'static { /// Subgraph forking is the process of lazily fetching entities /// from another subgraph's store (usually a remote one). +#[async_trait] pub trait SubgraphFork: Send + Sync + 'static { - fn fetch(&self, entity_type: String, id: String) -> Result, StoreError>; + async fn fetch(&self, entity_type: String, id: String) -> Result, StoreError>; } /// A special trait to handle looking up ENS names from special rainbow /// tables that need to be manually loaded into the system +#[async_trait] pub trait EnsLookup: Send + Sync + 'static { /// Find the reverse of keccak256 for `hash` through looking it up in the /// rainbow table. - fn find_name(&self, hash: &str) -> Result, StoreError>; + async fn find_name(&self, hash: &str) -> Result, StoreError>; // Check if the rainbow table is filled. - fn is_table_empty(&self) -> Result; + async fn is_table_empty(&self) -> Result; } /// An entry point for all operations that require access to the node's storage /// layer. It provides access to a [`BlockStore`] and a [`SubgraphStore`]. pub trait Store: Clone + StatusStore + Send + Sync + 'static { /// The [`BlockStore`] implementor used by this [`Store`]. - type BlockStore: BlockStore; + type BlockStore: BlockStore + CheapClone; /// The [`SubgraphStore`] implementor used by this [`Store`]. type SubgraphStore: SubgraphStore; - fn block_store(&self) -> Arc; + fn block_store(&self) -> Self::BlockStore; fn subgraph_store(&self) -> Arc; } @@ -67,7 +69,7 @@ pub trait SubgraphStore: Send + Sync + 'static { /// Check if the store is accepting queries for the specified subgraph. /// May return true even if the specified subgraph is not currently assigned to an indexing /// node, as the store will still accept queries. - fn is_deployed(&self, id: &DeploymentHash) -> Result; + async fn is_deployed(&self, id: &DeploymentHash) -> Result; async fn subgraph_features( &self, @@ -78,7 +80,7 @@ pub trait SubgraphStore: Send + Sync + 'static { /// already exists (as identified by the `schema.id`), reuse that, otherwise /// create a new deployment, and point the current or pending version of /// `name` at it, depending on the `mode` - fn create_subgraph_deployment( + async fn create_subgraph_deployment( &self, name: SubgraphName, schema: &InputSchema, @@ -89,33 +91,39 @@ pub trait SubgraphStore: Send + Sync + 'static { ) -> Result; /// Create a subgraph_feature record in the database - fn create_subgraph_features(&self, features: DeploymentFeatures) -> Result<(), StoreError>; + async fn create_subgraph_features( + &self, + features: DeploymentFeatures, + ) -> Result<(), StoreError>; /// Create a new subgraph with the given name. If one already exists, use /// the existing one. Return the `id` of the newly created or existing /// subgraph - fn create_subgraph(&self, name: SubgraphName) -> Result; + async fn create_subgraph(&self, name: SubgraphName) -> Result; /// Remove a subgraph and all its versions; if deployments that were used /// by this subgraph do not need to be indexed anymore, also remove /// their assignment, but keep the deployments themselves around - fn remove_subgraph(&self, name: SubgraphName) -> Result<(), StoreError>; + async fn remove_subgraph(&self, name: SubgraphName) -> Result<(), StoreError>; /// Assign the subgraph with `id` to the node `node_id`. If there is no /// assignment for the given deployment, report an error. - fn reassign_subgraph( + async fn reassign_subgraph( &self, deployment: &DeploymentLocator, node_id: &NodeId, ) -> Result<(), StoreError>; - fn unassign_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; + async fn unassign_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; - fn pause_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; + async fn pause_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; - fn resume_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; + async fn resume_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError>; - fn assigned_node(&self, deployment: &DeploymentLocator) -> Result, StoreError>; + async fn assigned_node( + &self, + deployment: &DeploymentLocator, + ) -> Result, StoreError>; /// Returns Option<(node_id,is_paused)> where `node_id` is the node that /// the subgraph is assigned to, and `is_paused` is true if the @@ -126,35 +134,38 @@ pub trait SubgraphStore: Send + Sync + 'static { deployment: &DeploymentLocator, ) -> Result, StoreError>; - fn assignments(&self, node: &NodeId) -> Result, StoreError>; - /// Returns assignments that are not paused async fn active_assignments(&self, node: &NodeId) -> Result, StoreError>; /// Return `true` if a subgraph `name` exists, regardless of whether the /// subgraph has any deployments attached to it - fn subgraph_exists(&self, name: &SubgraphName) -> Result; + async fn subgraph_exists(&self, name: &SubgraphName) -> Result; /// Returns a collection of all [`EntityModification`] items in relation to /// the given [`BlockNumber`]. No distinction is made between inserts and /// updates, which may be returned as either [`EntityModification::Insert`] /// or [`EntityModification::Overwrite`]. - fn entity_changes_in_block( + async fn entity_changes_in_block( &self, subgraph_id: &DeploymentHash, block_number: BlockNumber, ) -> Result, StoreError>; /// Return the GraphQL schema supplied by the user - fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result; + async fn input_schema(&self, subgraph_id: &DeploymentHash) -> Result; /// Return a bool represeting whether there is a pending graft for the subgraph - fn graft_pending(&self, id: &DeploymentHash) -> Result; + async fn graft_pending(&self, id: &DeploymentHash) -> Result; /// Return the GraphQL schema that was derived from the user's schema by /// adding a root query type etc. to it - fn api_schema( + /// + /// We use the store as a cache for the `ApiSchema`, so that we do not + /// have to rebuild the `ApiSchema` from `InputSchema.api_schema()` for + /// every query. That's a bit clumsy and it might be better to make the + /// `InputSchema` the cache instead. + async fn api_schema( &self, subgraph_id: &DeploymentHash, api_version: &ApiVersion, @@ -162,7 +173,7 @@ pub trait SubgraphStore: Send + Sync + 'static { /// Return a `SubgraphFork`, derived from the user's `debug-fork` deployment argument, /// that is used for debugging purposes only. - fn debug_fork( + async fn debug_fork( &self, subgraph_id: &DeploymentHash, logger: Logger, @@ -205,11 +216,11 @@ pub trait SubgraphStore: Send + Sync + 'static { async fn is_healthy(&self, id: &DeploymentHash) -> Result; /// Find all deployment locators for the subgraph with the given hash. - fn locators(&self, hash: &str) -> Result, StoreError>; + async fn locators(&self, hash: &str) -> Result, StoreError>; /// Find the deployment locator for the active deployment with the given /// hash. Returns `None` if there is no deployment with that hash - fn active_locator(&self, hash: &str) -> Result, StoreError>; + async fn active_locator(&self, hash: &str) -> Result, StoreError>; /// This migrates subgraphs that existed before the raw_yaml column was added. async fn set_manifest_raw_yaml( @@ -221,21 +232,22 @@ pub trait SubgraphStore: Send + Sync + 'static { /// Return `true` if the `instrument` flag for the deployment is set. /// When this flag is set, indexing of the deployment should log /// additional diagnostic information - fn instrument(&self, deployment: &DeploymentLocator) -> Result; + async fn instrument(&self, deployment: &DeploymentLocator) -> Result; } +#[async_trait] pub trait ReadStore: Send + Sync + 'static { /// Looks up an entity using the given store key at the latest block. - fn get(&self, key: &EntityKey) -> Result, StoreError>; + async fn get(&self, key: &EntityKey) -> Result, StoreError>; /// Look up multiple entities as of the latest block. - fn get_many( + async fn get_many( &self, keys: BTreeSet, ) -> Result, StoreError>; /// Reverse lookup - fn get_derived( + async fn get_derived( &self, query_derived: &DerivedEntityQuery, ) -> Result, StoreError>; @@ -244,23 +256,24 @@ pub trait ReadStore: Send + Sync + 'static { } // This silly impl is needed until https://github.com/rust-lang/rust/issues/65991 is stable. +#[async_trait] impl ReadStore for Arc { - fn get(&self, key: &EntityKey) -> Result, StoreError> { - (**self).get(key) + async fn get(&self, key: &EntityKey) -> Result, StoreError> { + (**self).get(key).await } - fn get_many( + async fn get_many( &self, keys: BTreeSet, ) -> Result, StoreError> { - (**self).get_many(keys) + (**self).get_many(keys).await } - fn get_derived( + async fn get_derived( &self, entity_derived: &DerivedEntityQuery, ) -> Result, StoreError> { - (**self).get_derived(entity_derived) + (**self).get_derived(entity_derived).await } fn input_schema(&self) -> InputSchema { @@ -298,7 +311,7 @@ impl DeploymentCursorTracker for Arc { pub trait SourceableStore: Sync + Send + 'static { /// Returns all versions of entities of the given entity_type that were /// changed in the given block_range. - fn get_range( + async fn get_range( &self, entity_types: Vec, causality_region: CausalityRegion, @@ -314,13 +327,15 @@ pub trait SourceableStore: Sync + Send + 'static { // This silly impl is needed until https://github.com/rust-lang/rust/issues/65991 is stable. #[async_trait] impl SourceableStore for Arc { - fn get_range( + async fn get_range( &self, entity_types: Vec, causality_region: CausalityRegion, block_range: Range, ) -> Result>, StoreError> { - (**self).get_range(entity_types, causality_region, block_range) + (**self) + .get_range(entity_types, causality_region, block_range) + .await } fn input_schema(&self) -> InputSchema { @@ -361,7 +376,7 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { /// If a non-deterministic error happened and the current deployment head is past the error /// block range, this function unfails the subgraph and deletes the error. - fn unfail_non_deterministic_error( + async fn unfail_non_deterministic_error( &self, current_ptr: &BlockPtr, ) -> Result; @@ -393,13 +408,13 @@ pub trait WritableStore: ReadStore + DeploymentCursorTracker { ) -> Result<(), StoreError>; /// Force synced status, used for testing. - fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError>; + async fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError>; /// Return true if the deployment with the given id is fully synced, and return false otherwise. /// Cheap, cached operation. fn is_deployment_synced(&self) -> bool; - fn pause_subgraph(&self) -> Result<(), StoreError>; + async fn pause_subgraph(&self) -> Result<(), StoreError>; /// Load the dynamic data sources for the given deployment async fn load_dynamic_data_sources( @@ -447,10 +462,11 @@ pub trait QueryStoreManager: Send + Sync + 'static { ) -> Result, QueryExecutionError>; } +#[async_trait] pub trait BlockStore: ChainIdStore + Send + Sync + 'static { type ChainStore: ChainStore; - fn chain_store(&self, network: &str) -> Option>; + async fn chain_store(&self, network: &str) -> Option>; } /// An interface for tracking the chain head in the store used by most chain @@ -467,7 +483,7 @@ pub trait ChainHeadStore: Send + Sync { /// Get the current head block cursor for this chain. /// /// The head block cursor will be None on initial set up. - fn chain_head_cursor(&self) -> Result, Error>; + async fn chain_head_cursor(&self) -> Result, Error>; /// This method does actually three operations: /// - Upserts received block into blocks table @@ -483,10 +499,10 @@ pub trait ChainHeadStore: Send + Sync { #[async_trait] pub trait ChainIdStore: Send + Sync + 'static { /// Return the chain identifier for this store. - fn chain_identifier(&self, chain_name: &ChainName) -> Result; + async fn chain_identifier(&self, chain_name: &ChainName) -> Result; /// Update the chain identifier for this store. - fn set_chain_identifier( + async fn set_chain_identifier( &self, chain_name: &ChainName, ident: &ChainIdentifier, @@ -497,12 +513,12 @@ pub trait ChainIdStore: Send + Sync + 'static { #[async_trait] pub trait ChainStore: ChainHeadStore { /// Get a pointer to this blockchain's genesis block. - fn genesis_block_ptr(&self) -> Result; + async fn genesis_block_ptr(&self) -> Result; /// Insert a block into the store (or update if they are already present). async fn upsert_block(&self, block: Arc) -> Result<(), Error>; - fn upsert_light_blocks(&self, blocks: &[&dyn Block]) -> Result<(), Error>; + async fn upsert_light_blocks(&self, blocks: &[&dyn Block]) -> Result<(), Error>; /// Try to update the head block pointer to the block with the highest block number. /// @@ -563,17 +579,24 @@ pub trait ChainStore: ChainHeadStore { /// and the number of blocks deleted. /// We will never remove blocks that are within `ancestor_count` of /// the chain head. - fn cleanup_cached_blocks( + async fn cleanup_cached_blocks( &self, ancestor_count: BlockNumber, ) -> Result, Error>; /// Return the hashes of all blocks with the given number - fn block_hashes_by_block_number(&self, number: BlockNumber) -> Result, Error>; + async fn block_hashes_by_block_number( + &self, + number: BlockNumber, + ) -> Result, Error>; /// Confirm that block number `number` has hash `hash` and that the store /// may purge any other blocks with that number - fn confirm_block_hash(&self, number: BlockNumber, hash: &BlockHash) -> Result; + async fn confirm_block_hash( + &self, + number: BlockNumber, + hash: &BlockHash, + ) -> Result; /// Find the block with `block_hash` and return the network name, number, timestamp and parentHash if present. /// Currently, the timestamp is only returned if it's present in the top level block. This format is @@ -607,18 +630,19 @@ pub trait ChainStore: ChainHeadStore { ) -> Result<(), Error>; /// Return the chain identifier for this store. - fn chain_identifier(&self) -> Result; + async fn chain_identifier(&self) -> Result; /// Workaround for Rust issue #65991 that keeps us from using an /// `Arc` as an `Arc` fn as_head_store(self: Arc) -> Arc; } +#[async_trait] pub trait EthereumCallCache: Send + Sync + 'static { /// Returns the return value of the provided Ethereum call, if present /// in the cache. A return of `None` indicates that we know nothing /// about the call. - fn get_call( + async fn get_call( &self, call: &call::Request, block: BlockPtr, @@ -627,7 +651,7 @@ pub trait EthereumCallCache: Send + Sync + 'static { /// Get the return values of many Ethereum calls. For the ones found in /// the cache, return a `Response`; the ones that were not found are /// returned as the original `Request` - fn get_calls( + async fn get_calls( &self, reqs: &[call::Request], block: BlockPtr, @@ -635,11 +659,11 @@ pub trait EthereumCallCache: Send + Sync + 'static { /// Returns all cached calls for a given `block`. This method does *not* /// update the last access time of the returned cached calls. - fn get_calls_in_block(&self, block: BlockPtr) -> Result, Error>; + async fn get_calls_in_block(&self, block: BlockPtr) -> Result, Error>; /// Stores the provided Ethereum call in the cache. - fn set_call( - &self, + async fn set_call( + self: Arc, logger: &Logger, call: call::Request, block: BlockPtr, @@ -655,7 +679,7 @@ pub struct QueryPermit { /// Store operations used when serving queries for a specific deployment #[async_trait] pub trait QueryStore: Send + Sync { - fn find_query_values( + async fn find_query_values( &self, query: EntityQuery, ) -> Result<(Vec, Trace), QueryExecutionError>; @@ -686,9 +710,9 @@ pub trait QueryStore: Send + Sync { /// return details about it needed for executing queries async fn deployment_state(&self) -> Result; - fn api_schema(&self) -> Result, QueryExecutionError>; + async fn api_schema(&self) -> Result, QueryExecutionError>; - fn input_schema(&self) -> Result; + async fn input_schema(&self) -> Result; fn network_name(&self) -> &str; @@ -710,23 +734,23 @@ pub trait StatusStore: Send + Sync + 'static { /// A permit should be acquired before starting query execution. async fn query_permit(&self) -> QueryPermit; - fn status(&self, filter: status::Filter) -> Result, StoreError>; + async fn status(&self, filter: status::Filter) -> Result, StoreError>; /// Support for the explorer-specific API - fn version_info(&self, version_id: &str) -> Result; + async fn version_info(&self, version_id: &str) -> Result; /// Support for the explorer-specific API; note that `subgraph_id` must be /// the id of an entry in `subgraphs.subgraph`, not that of a deployment. /// The return values are the ids of the `subgraphs.subgraph_version` for /// the current and pending versions of the subgraph - fn versions_for_subgraph_id( + async fn versions_for_subgraph_id( &self, subgraph_id: &str, ) -> Result<(Option, Option), StoreError>; /// Support for the explorer-specific API. Returns a vector of (name, version) of all /// subgraphs for a given deployment hash. - fn subgraphs_for_deployment_hash( + async fn subgraphs_for_deployment_hash( &self, deployment_hash: &str, ) -> Result, StoreError>; diff --git a/graph/src/endpoint.rs b/graph/src/endpoint.rs index bdff8dc8135..a9fdd99a98c 100644 --- a/graph/src/endpoint.rs +++ b/graph/src/endpoint.rs @@ -178,7 +178,7 @@ mod test { endpoint::{EndpointMetrics, ProviderName}, }; - #[tokio::test] + #[crate::test] async fn should_increment_and_reset() { let (a, b, c): (ProviderName, ProviderName, ProviderName) = ("a".into(), "b".into(), "c".into()); diff --git a/graph/src/firehose/endpoints.rs b/graph/src/firehose/endpoints.rs index 448eb845496..b05390154ed 100644 --- a/graph/src/firehose/endpoints.rs +++ b/graph/src/firehose/endpoints.rs @@ -797,7 +797,7 @@ mod test { use crate::endpoint::EndpointMetrics; use crate::firehose::SubgraphLimit; - #[tokio::test] + #[crate::test] async fn firehose_endpoint_errors() { let endpoint = vec![Arc::new(FirehoseEndpoint::new( String::new(), @@ -830,7 +830,7 @@ mod test { assert!(err.to_string().contains("unable to get a connection")); } - #[tokio::test] + #[crate::test] async fn firehose_endpoint_with_limit() { let endpoint = vec![Arc::new(FirehoseEndpoint::new( String::new(), @@ -858,7 +858,7 @@ mod test { endpoints.endpoint().await.unwrap(); } - #[tokio::test] + #[crate::test] async fn firehose_endpoint_no_traffic() { let endpoint = vec![Arc::new(FirehoseEndpoint::new( String::new(), @@ -878,7 +878,7 @@ mod test { assert!(err.to_string().contains("conn_pool_size")); } - #[tokio::test] + #[crate::test] async fn firehose_endpoint_selection() { let logger = Logger::root(Discard, o!()); let endpoint_metrics = Arc::new(EndpointMetrics::new( diff --git a/graph/src/ipfs/gateway_client.rs b/graph/src/ipfs/gateway_client.rs index 5c2da25daff..862a46656af 100644 --- a/graph/src/ipfs/gateway_client.rs +++ b/graph/src/ipfs/gateway_client.rs @@ -217,7 +217,7 @@ mod tests { Duration::from_millis(millis) } - #[tokio::test] + #[crate::test] async fn new_fails_to_create_the_client_if_gateway_is_not_accessible() { let server = mock_server().await; @@ -226,7 +226,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn new_creates_the_client_if_it_can_check_the_gateway() { let server = mock_server().await; @@ -252,7 +252,7 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn new_retries_gateway_check_on_non_deterministic_errors() { let server = mock_server().await; @@ -272,14 +272,14 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn new_unchecked_creates_the_client_without_checking_the_gateway() { let server = mock_server().await; IpfsGatewayClient::new_unchecked(server.uri(), IpfsMetrics::test(), &discard()).unwrap(); } - #[tokio::test] + #[crate::test] async fn cat_stream_returns_the_content() { let (server, client) = make_client().await; @@ -304,7 +304,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data") } - #[tokio::test] + #[crate::test] async fn cat_stream_fails_on_timeout() { let (server, client) = make_client().await; @@ -326,7 +326,7 @@ mod tests { assert!(matches!(result, Err(_))); } - #[tokio::test] + #[crate::test] async fn cat_stream_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; @@ -354,7 +354,7 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn cat_returns_the_content() { let (server, client) = make_client().await; @@ -378,7 +378,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn cat_returns_the_content_if_max_size_is_equal_to_the_content_size() { let (server, client) = make_client().await; @@ -404,7 +404,7 @@ mod tests { assert_eq!(bytes.as_ref(), data); } - #[tokio::test] + #[crate::test] async fn cat_fails_if_content_is_too_large() { let (server, client) = make_client().await; @@ -428,7 +428,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn cat_fails_on_timeout() { let (server, client) = make_client().await; @@ -450,7 +450,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn cat_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; @@ -481,7 +481,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn get_block_returns_the_block_content() { let (server, client) = make_client().await; @@ -499,7 +499,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn get_block_fails_on_timeout() { let (server, client) = make_client().await; @@ -520,7 +520,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn get_block_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; @@ -550,7 +550,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn operation_names_include_cid_for_debugging() { use slog::{o, Drain, Logger, Record}; use std::sync::{Arc, Mutex}; diff --git a/graph/src/ipfs/pool.rs b/graph/src/ipfs/pool.rs index dab1191ccce..bf8943036c1 100644 --- a/graph/src/ipfs/pool.rs +++ b/graph/src/ipfs/pool.rs @@ -101,7 +101,7 @@ mod tests { Duration::from_millis(millis) } - #[tokio::test] + #[crate::test] async fn cat_stream_streams_the_response_from_the_fastest_client() { let (server_1, client_1) = make_client().await; let (server_2, client_2) = make_client().await; @@ -154,7 +154,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"server_3"); } - #[tokio::test] + #[crate::test] async fn cat_streams_the_response_from_the_fastest_client() { let (server_1, client_1) = make_client().await; let (server_2, client_2) = make_client().await; @@ -207,7 +207,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"server_3") } - #[tokio::test] + #[crate::test] async fn get_block_streams_the_response_from_the_fastest_client() { let (server_1, client_1) = make_client().await; let (server_2, client_2) = make_client().await; diff --git a/graph/src/ipfs/retry_policy.rs b/graph/src/ipfs/retry_policy.rs index 2e80c5e9c5d..783f506d568 100644 --- a/graph/src/ipfs/retry_policy.rs +++ b/graph/src/ipfs/retry_policy.rs @@ -57,7 +57,7 @@ mod tests { ContentPath::new(CID).unwrap() } - #[tokio::test] + #[crate::test] async fn retry_policy_none_disables_retries() { let counter = Arc::new(AtomicU64::new(0)); @@ -81,7 +81,7 @@ mod tests { assert!(matches!(err, IpfsError::RequestTimeout { .. })); } - #[tokio::test] + #[crate::test] async fn retry_policy_networking_retries_only_network_related_errors() { let counter = Arc::new(AtomicU64::new(0)); @@ -116,7 +116,7 @@ mod tests { assert!(matches!(err, IpfsError::RequestTimeout { .. })); } - #[tokio::test] + #[crate::test] async fn retry_policy_networking_stops_on_success() { let counter = Arc::new(AtomicU64::new(0)); @@ -150,7 +150,7 @@ mod tests { assert_eq!(counter.load(Ordering::SeqCst), 10); } - #[tokio::test] + #[crate::test] async fn retry_policy_non_deterministic_retries_all_non_deterministic_errors() { let counter = Arc::new(AtomicU64::new(0)); @@ -182,7 +182,7 @@ mod tests { assert!(matches!(err, IpfsError::ContentTooLarge { .. })); } - #[tokio::test] + #[crate::test] async fn retry_policy_non_deterministic_stops_on_success() { let counter = Arc::new(AtomicU64::new(0)); diff --git a/graph/src/ipfs/rpc_client.rs b/graph/src/ipfs/rpc_client.rs index 8d5d6fe643d..e5efcc122d0 100644 --- a/graph/src/ipfs/rpc_client.rs +++ b/graph/src/ipfs/rpc_client.rs @@ -186,7 +186,7 @@ mod tests { Duration::from_millis(millis) } - #[tokio::test] + #[crate::test] async fn new_fails_to_create_the_client_if_rpc_api_is_not_accessible() { let server = mock_server().await; @@ -195,7 +195,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn new_creates_the_client_if_it_can_check_the_rpc_api() { let server = mock_server().await; @@ -210,7 +210,7 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn new_retries_rpc_api_check_on_non_deterministic_errors() { let server = mock_server().await; @@ -232,14 +232,14 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn new_unchecked_creates_the_client_without_checking_the_rpc_api() { let server = mock_server().await; IpfsRpcClient::new_unchecked(server.uri(), IpfsMetrics::test(), &discard()).unwrap(); } - #[tokio::test] + #[crate::test] async fn cat_stream_returns_the_content() { let (server, client) = make_client().await; @@ -264,7 +264,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn cat_stream_fails_on_timeout() { let (server, client) = make_client().await; @@ -286,7 +286,7 @@ mod tests { assert!(matches!(result, Err(_))); } - #[tokio::test] + #[crate::test] async fn cat_stream_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; @@ -314,7 +314,7 @@ mod tests { .unwrap(); } - #[tokio::test] + #[crate::test] async fn cat_returns_the_content() { let (server, client) = make_client().await; @@ -338,7 +338,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn cat_returns_the_content_if_max_size_is_equal_to_the_content_size() { let (server, client) = make_client().await; @@ -364,7 +364,7 @@ mod tests { assert_eq!(bytes.as_ref(), data); } - #[tokio::test] + #[crate::test] async fn cat_fails_if_content_is_too_large() { let (server, client) = make_client().await; @@ -388,7 +388,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn cat_fails_on_timeout() { let (server, client) = make_client().await; @@ -410,7 +410,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn cat_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; @@ -441,7 +441,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn get_block_returns_the_block_content() { let (server, client) = make_client().await; @@ -459,7 +459,7 @@ mod tests { assert_eq!(bytes.as_ref(), b"some data"); } - #[tokio::test] + #[crate::test] async fn get_block_fails_on_timeout() { let (server, client) = make_client().await; @@ -480,7 +480,7 @@ mod tests { .unwrap_err(); } - #[tokio::test] + #[crate::test] async fn get_block_retries_the_request_on_non_deterministic_errors() { let (server, client) = make_client().await; diff --git a/graph/src/lib.rs b/graph/src/lib.rs index 05407603f48..3166db5971b 100644 --- a/graph/src/lib.rs +++ b/graph/src/lib.rs @@ -38,8 +38,10 @@ pub mod env; pub mod ipfs; /// Wrapper for spawning tasks that abort on panic, which is our default. -mod task_spawn; -pub use task_spawn::{ +mod tokio; +#[cfg(debug_assertions)] +pub use tokio::TEST_RUNTIME; +pub use tokio::{ block_on, spawn, spawn_allow_panic, spawn_blocking, spawn_blocking_allow_panic, spawn_thread, }; @@ -48,6 +50,7 @@ pub use bytes; pub use futures01; pub use futures03; pub use graph_derive as derive; +pub use graph_derive::test; pub use http; pub use http0; pub use http_body_util; @@ -62,9 +65,6 @@ pub use slog; pub use sqlparser; pub use stable_hash; pub use stable_hash_legacy; -pub use tokio; -pub use tokio_retry; -pub use tokio_stream; pub use url; /// A prelude that makes all system component traits and data types available. @@ -77,7 +77,6 @@ pub use url; pub mod prelude { pub use ::anyhow; pub use anyhow::{anyhow, Context as _, Error}; - pub use async_trait::async_trait; pub use atty; pub use chrono; pub use diesel; diff --git a/graph/src/log/elastic.rs b/graph/src/log/elastic.rs index 777fbb0a84d..eb285d3d6e6 100644 --- a/graph/src/log/elastic.rs +++ b/graph/src/log/elastic.rs @@ -202,7 +202,7 @@ impl ElasticDrain { let mut interval = tokio::time::interval(self.config.flush_interval); let max_retries = self.config.max_retries; - crate::task_spawn::spawn(async move { + crate::tokio::spawn(async move { loop { interval.tick().await; diff --git a/graph/src/runtime/asc_heap.rs b/graph/src/runtime/asc_heap.rs index 6de4cc46a06..4f2f5c41a87 100644 --- a/graph/src/runtime/asc_heap.rs +++ b/graph/src/runtime/asc_heap.rs @@ -1,12 +1,12 @@ use std::mem::MaybeUninit; +use async_trait::async_trait; use semver::Version; use super::{ gas::GasCounter, AscIndexId, AscPtr, AscType, DeterministicHostError, HostExportError, IndexForAscTypeId, }; -use crate::prelude::async_trait; // A 128 limit is plenty for any subgraph, while the `fn recursion_limit` test ensures it is not // large enough to cause stack overflows. diff --git a/graph/src/runtime/mod.rs b/graph/src/runtime/mod.rs index cba8a69b0cc..7958b991598 100644 --- a/graph/src/runtime/mod.rs +++ b/graph/src/runtime/mod.rs @@ -14,15 +14,15 @@ pub use asc_heap::{ pub use asc_ptr::AscPtr; use anyhow::Error; +use async_trait::async_trait; use semver::Version; + use std::convert::TryInto; use std::fmt; use std::mem::size_of; use self::gas::GasCounter; -use crate::prelude::async_trait; - /// Marker trait for AssemblyScript types that the id should /// be in the header. pub trait AscIndexId { diff --git a/graph/src/task_spawn.rs b/graph/src/tokio.rs similarity index 56% rename from graph/src/task_spawn.rs rename to graph/src/tokio.rs index dd1477bb1c8..1f7cb05f1cd 100644 --- a/graph/src/task_spawn.rs +++ b/graph/src/tokio.rs @@ -1,20 +1,49 @@ -//! The functions in this module should be used to execute futures, serving as a facade to the -//! underlying executor implementation which currently is tokio. This serves a few purposes: -//! - Avoid depending directly on tokio APIs, making upgrades or a potential switch easier. -//! - Reflect our chosen default semantics of aborting on task panic, offering `*_allow_panic` -//! functions to opt out of that. -//! - Reflect that historically we've used blocking futures due to making DB calls directly within -//! futures. This point should go away once https://github.com/graphprotocol/graph-node/issues/905 -//! is resolved. Then the blocking flavors should no longer accept futures but closures. +//! Helpers for dealing with certain aspects of tokio. //! -//! These should not be called from within executors other than tokio, particularly the blocking -//! functions will panic in that case. We should generally avoid mixing executors whenever possible. +//! This module sets up a runtime on which all tests should run, as well as +//! providing some functions for spawning tasks with our desired semantics. +//! +//! The functions in this module should be used to execute futures, serving +//! as a facade to the underlying executor implementation which currently is +//! tokio. This serves a few purposes: +//! - Avoid depending directly on tokio APIs, making upgrades or a potential +//! switch easier. +//! - Reflect our chosen default semantics of aborting on task panic, +//! offering `*_allow_panic` functions to opt out of that. +//! - Reflect that historically we've used blocking futures due to making DB +//! calls directly within futures. This point should go away once +//! https://github.com/graphprotocol/graph-node/issues/905 is resolved. +//! Then the blocking flavors should no longer accept futures but +//! closures. +//! +//! These should not be called from within executors other than tokio, +//! particularly the blocking functions will panic in that case. We should +//! generally avoid mixing executors whenever possible. use futures03::future::{FutureExt, TryFutureExt}; use std::future::Future as Future03; use std::panic::AssertUnwindSafe; use tokio::task::JoinHandle; +#[cfg(debug_assertions)] +use tokio::runtime::{Builder, Runtime}; + +#[cfg(debug_assertions)] +lazy_static::lazy_static! { + /// The one true runtime for all tests. Tests should use the + /// `graph::test` macro to make sure they are using this runtime, the + /// same way they would use `#[tokio::test]`. + /// + /// We need to make sure we use a single runtime because if there are + /// multiple runtimes involved, the task that diesel_async spawns to + /// drive database connections (see `drive_connection` in the + /// `diesel_async` crate) may end up on a different runtime than the one + /// the test is using, leading to that task getting dropped, and the + /// test using a connection receiving a `Connection closed` error. + pub static ref TEST_RUNTIME: Runtime = + Builder::new_multi_thread().enable_all().build().unwrap(); +} + fn abort_on_panic( f: impl Future03 + Send + 'static, ) -> impl Future03 { diff --git a/graph/src/util/backoff.rs b/graph/src/util/backoff.rs index 6e6361e0d67..2f739a96ea9 100644 --- a/graph/src/util/backoff.rs +++ b/graph/src/util/backoff.rs @@ -132,7 +132,7 @@ mod tests { assert_eq!(backoff.next_attempt(), Duration::from_secs(45)); } - #[tokio::test] + #[crate::test] async fn test_sleep_async() { let mut backoff = ExponentialBackoff::new(Duration::from_secs_f32(0.1), Duration::from_secs_f32(0.2)); diff --git a/graph/src/util/futures.rs b/graph/src/util/futures.rs index a5726b4d9d8..b4da90c8a1c 100644 --- a/graph/src/util/futures.rs +++ b/graph/src/util/futures.rs @@ -485,7 +485,7 @@ mod tests { use slog::o; use std::sync::Mutex; - #[tokio::test] + #[crate::test] async fn test() { let logger = Logger::root(::slog::Discard, o!()); @@ -510,7 +510,7 @@ mod tests { assert_eq!(result, Ok(10)); } - #[tokio::test] + #[crate::test] async fn limit_reached() { let logger = Logger::root(::slog::Discard, o!()); @@ -535,7 +535,7 @@ mod tests { assert_eq!(result, Err(5)); } - #[tokio::test] + #[crate::test] async fn limit_not_reached() { let logger = Logger::root(::slog::Discard, o!()); @@ -560,7 +560,7 @@ mod tests { assert_eq!(result, Ok(10)); } - #[tokio::test] + #[crate::test] async fn custom_when() { let logger = Logger::root(::slog::Discard, o!()); let c = Mutex::new(0); diff --git a/graph/src/util/jobs.rs b/graph/src/util/jobs.rs index fdda7d365b4..4abed5e2a56 100644 --- a/graph/src/util/jobs.rs +++ b/graph/src/util/jobs.rs @@ -123,7 +123,7 @@ mod tests { } } - #[tokio::test(flavor = "multi_thread")] + #[crate::test] async fn jobs_run() { let count = Arc::new(Mutex::new(0)); let job = CounterJob { diff --git a/graph/tests/subgraph_datasource_tests.rs b/graph/tests/subgraph_datasource_tests.rs index 2c357bf37cd..366c7bc8d26 100644 --- a/graph/tests/subgraph_datasource_tests.rs +++ b/graph/tests/subgraph_datasource_tests.rs @@ -70,7 +70,7 @@ impl MockSourcableStore { #[async_trait] impl SourceableStore for MockSourcableStore { - fn get_range( + async fn get_range( &self, entity_types: Vec, _causality_region: CausalityRegion, @@ -100,7 +100,7 @@ impl SourceableStore for MockSourcableStore { } } -#[tokio::test] +#[graph::test] async fn test_triggers_adapter_with_entities() { let id = DeploymentHash::new("test_deployment").unwrap(); let schema = InputSchema::parse_latest( diff --git a/graphql/Cargo.toml b/graphql/Cargo.toml index b4795cd8e8e..d9cb14684f6 100644 --- a/graphql/Cargo.toml +++ b/graphql/Cargo.toml @@ -4,6 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] +async-trait = { workspace = true } crossbeam = "0.8" graph = { path = "../graph" } graphql-tools = "0.4.0" diff --git a/graphql/src/execution/execution.rs b/graphql/src/execution/execution.rs index 7b1da1a3e95..8173f00f2bf 100644 --- a/graphql/src/execution/execution.rs +++ b/graphql/src/execution/execution.rs @@ -296,7 +296,7 @@ pub(crate) async fn execute_root_selection_set_uncached( let (mut values, trace) = if data_set.is_empty() && meta_items.is_empty() { (Object::default(), Trace::None) } else { - let (initial_data, trace) = ctx.resolver.prefetch(ctx, &data_set)?; + let (initial_data, trace) = ctx.resolver.prefetch(ctx, &data_set).await?; data_set.push_fields(meta_items)?; ( execute_selection_set_to_map(ctx, &data_set, root_type, initial_data).await?, diff --git a/graphql/src/execution/resolver.rs b/graphql/src/execution/resolver.rs index 0074eb124d8..6568b7538f8 100644 --- a/graphql/src/execution/resolver.rs +++ b/graphql/src/execution/resolver.rs @@ -1,8 +1,10 @@ use std::time::Duration; +use async_trait::async_trait; + use graph::components::store::QueryPermit; use graph::data::query::{CacheStatus, Trace}; -use graph::prelude::{async_trait, s, Error, QueryExecutionError}; +use graph::prelude::{s, Error, QueryExecutionError}; use graph::schema::ApiSchema; use graph::{ data::graphql::ObjectOrInterface, @@ -21,7 +23,7 @@ pub trait Resolver: Sized + Send + Sync + 'static { async fn query_permit(&self) -> QueryPermit; /// Prepare for executing a query by prefetching as much data as possible - fn prefetch( + async fn prefetch( &self, ctx: &ExecutionContext, selection_set: &a::SelectionSet, diff --git a/graphql/src/introspection/resolver.rs b/graphql/src/introspection/resolver.rs index 765b0399695..a44e0294c92 100644 --- a/graphql/src/introspection/resolver.rs +++ b/graphql/src/introspection/resolver.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use graph::components::store::QueryPermit; use graph::data::graphql::ext::{FieldExt, TypeDefinitionExt}; use graph::data::query::Trace; @@ -360,7 +361,7 @@ impl Resolver for IntrospectionResolver { unreachable!() } - fn prefetch( + async fn prefetch( &self, _: &ExecutionContext, _: &a::SelectionSet, diff --git a/graphql/src/runner.rs b/graphql/src/runner.rs index d2f0bc9c96c..0132b27c090 100644 --- a/graphql/src/runner.rs +++ b/graphql/src/runner.rs @@ -1,13 +1,15 @@ use std::sync::Arc; use std::time::Instant; +use async_trait::async_trait; + use crate::metrics::GraphQLMetrics; use crate::prelude::{QueryExecutionOptions, StoreResolver}; use crate::query::execute_query; use graph::futures03::future; use graph::prelude::MetricsRegistry; use graph::prelude::{ - async_trait, o, CheapClone, DeploymentState, GraphQLMetrics as GraphQLMetricsTrait, + o, CheapClone, DeploymentState, GraphQLMetrics as GraphQLMetricsTrait, GraphQlRunner as GraphQlRunnerTrait, Logger, Query, QueryExecutionError, ENV_VARS, }; use graph::{data::graphql::load_manager::LoadManager, prelude::QueryStoreManager}; @@ -106,7 +108,7 @@ where let store = self.store.query_store(target.clone()).await?; let state = store.deployment_state().await?; let network = Some(store.network_name().to_string()); - let schema = store.api_schema()?; + let schema = store.api_schema().await?; let latest_block = match store.block_ptr().await.ok().flatten() { Some(block) => Some(LatestBlockInfo { diff --git a/graphql/src/store/prefetch.rs b/graphql/src/store/prefetch.rs index 95f51d51944..ce5722bfb78 100644 --- a/graphql/src/store/prefetch.rs +++ b/graphql/src/store/prefetch.rs @@ -15,7 +15,7 @@ use graph::schema::Field; use graph::slog::warn; use graph::util::cache_weight; use std::collections::{BTreeMap, HashMap}; -use std::rc::Rc; +use std::sync::Arc; use std::time::Instant; use graph::data::graphql::TypeExt; @@ -89,7 +89,7 @@ struct Node { /// copies to the point where we need to convert to `q::Value`, and it /// would be desirable to base the data structure that GraphQL execution /// uses on a DAG rather than a tree, but that's a good amount of work - children: BTreeMap>>, + children: BTreeMap>>, } impl From for Node { @@ -111,11 +111,11 @@ impl CacheWeight for Node { /// Convert a list of nodes into a `q::Value::List` where each node has also /// been converted to a `q::Value` -fn node_list_as_value(nodes: Vec>) -> r::Value { +fn node_list_as_value(nodes: Vec>) -> r::Value { r::Value::List( nodes .into_iter() - .map(|node| Rc::try_unwrap(node).unwrap_or_else(|rc| rc.as_ref().clone())) + .map(|node| Arc::try_unwrap(node).unwrap_or_else(|arc| arc.as_ref().clone())) .map(Into::into) .collect(), ) @@ -211,9 +211,9 @@ impl Node { .expect("__typename must be a string") } - fn set_children(&mut self, response_key: String, nodes: Vec>) { - fn nodes_weight(nodes: &Vec>) -> usize { - let vec_weight = nodes.capacity() * std::mem::size_of::>(); + fn set_children(&mut self, response_key: String, nodes: Vec>) { + fn nodes_weight(nodes: &Vec>) -> usize { + let vec_weight = nodes.capacity() * std::mem::size_of::>(); let children_weight = nodes.iter().map(|node| node.weight()).sum::(); vec_weight + children_weight } @@ -483,7 +483,7 @@ fn add_children( children: Vec, response_key: &str, ) -> Result<(), QueryExecutionError> { - let children: Vec<_> = children.into_iter().map(Rc::new).collect(); + let children: Vec<_> = children.into_iter().map(Arc::new).collect(); if parents.len() == 1 { let parent = parents.first_mut().expect("we just checked"); @@ -495,7 +495,7 @@ fn add_children( // children to their parent. This relies on the fact that interfaces // make sure that id's are distinct across all implementations of the // interface. - let mut grouped: HashMap<&Id, Vec>> = HashMap::default(); + let mut grouped: HashMap<&Id, Vec>> = HashMap::default(); for child in children.iter() { let parent = child.parent.as_ref().ok_or_else(|| { QueryExecutionError::Panic(format!( @@ -546,7 +546,7 @@ fn add_children( /// cases where the store contains data that violates the data model by having /// multiple values for what should be a relationship to a single object in /// @derivedFrom fields -pub fn run( +pub async fn run( resolver: &StoreResolver, ctx: &ExecutionContext, selection_set: &a::SelectionSet, @@ -557,8 +557,9 @@ pub fn run( let trace = Trace::block(resolver.block_number(), ctx.trace); // Execute the root selection set against the root query type. - let (nodes, trace) = - loader.execute_selection_set(make_root_node(), trace, selection_set, None)?; + let (nodes, trace) = loader + .execute_selection_set(make_root_node(), trace, selection_set, None) + .await?; graphql_metrics.observe_query_result_size(nodes.weight()); let obj = Object::from_iter(nodes.into_iter().flat_map(|node| { @@ -583,14 +584,14 @@ impl<'a> Loader<'a> { Loader { resolver, ctx } } - fn execute_selection_set( + async fn execute_selection_set( &self, mut parents: Vec, mut parent_trace: Trace, selection_set: &a::SelectionSet, parent_interval: Option, ) -> Result<(Vec, Trace), Vec> { - let input_schema = self.resolver.store.input_schema()?; + let input_schema = self.resolver.store.input_schema().await?; let mut errors: Vec = Vec::new(); let at_root = is_root_node(parents.iter()); @@ -651,14 +652,15 @@ impl<'a> Loader<'a> { )) }; - match self.fetch(&parents, &join, field) { + match self.fetch(&parents, &join, field).await { Ok((children, trace)) => { - match self.execute_selection_set( + let exec_fut = Box::pin(self.execute_selection_set( children, trace, &field.selection_set, child_interval, - ) { + )); + match exec_fut.await { Ok((children, trace)) => { add_children( &input_schema, @@ -689,13 +691,13 @@ impl<'a> Loader<'a> { /// Query child entities for `parents` from the store. The `join` indicates /// in which child field to look for the parent's id/join field. When /// `is_single` is `true`, there is at most one child per parent. - fn fetch( + async fn fetch( &self, parents: &[&mut Node], join: &MaybeJoin<'_>, field: &a::Field, ) -> Result<(Vec, Trace), QueryExecutionError> { - let input_schema = self.resolver.store.input_schema()?; + let input_schema = self.resolver.store.input_schema().await?; let child_type = join.child_type(); let mut query = build_query( child_type, @@ -743,6 +745,7 @@ impl<'a> Loader<'a> { self.resolver .store .find_query_values(query) + .await .map(|(values, trace)| (values.into_iter().map(Node::from).collect(), trace)) } diff --git a/graphql/src/store/resolver.rs b/graphql/src/store/resolver.rs index 3fb8059988d..500964ea7a2 100644 --- a/graphql/src/store/resolver.rs +++ b/graphql/src/store/resolver.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use std::sync::Arc; +use async_trait::async_trait; use graph::components::graphql::GraphQLMetrics as _; use graph::components::store::QueryPermit; use graph::data::graphql::load_manager::LoadManager; @@ -259,12 +260,13 @@ impl Resolver for StoreResolver { self.store.query_permit().await } - fn prefetch( + async fn prefetch( &self, ctx: &ExecutionContext, selection_set: &a::SelectionSet, ) -> Result<(Option, Trace), Vec> { super::prefetch::run(self, ctx, selection_set, &self.graphql_metrics) + .await .map(|(value, trace)| (Some(value), trace)) } diff --git a/node/Cargo.toml b/node/Cargo.toml index 5b7f051efe1..c11512888e1 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -36,7 +36,9 @@ graphman = { workspace = true } serde = { workspace = true } shellexpand = "3.1.1" termcolor = "1.4.1" +tokio = { workspace = true } diesel = { workspace = true } +diesel-async = { workspace = true } prometheus = { version = "0.14.0", features = ["push"] } json-structural-diff = { version = "0.2", features = ["colorize"] } globset = "0.4.16" diff --git a/node/src/bin/manager.rs b/node/src/bin/manager.rs index 9e67a532a8c..f1a033da8c0 100644 --- a/node/src/bin/manager.rs +++ b/node/src/bin/manager.rs @@ -941,8 +941,8 @@ impl Context { pool } - fn subgraph_store(self) -> Arc { - self.store_and_pools().0.subgraph_store() + async fn subgraph_store(self) -> Arc { + self.store_and_pools().await.0.subgraph_store() } fn subscription_manager(&self) -> Arc { @@ -955,13 +955,13 @@ impl Context { )) } - fn store(&self) -> Arc { - let (store, _) = self.store_and_pools(); + async fn store(&self) -> Arc { + let (store, _) = self.store_and_pools().await; store } - fn pools(self) -> HashMap { - let (_, pools) = self.store_and_pools(); + async fn pools(self) -> HashMap { + let (_, pools) = self.store_and_pools().await; pools } @@ -976,7 +976,7 @@ impl Context { .await } - fn store_and_pools(&self) -> (Arc, HashMap) { + async fn store_and_pools(&self) -> (Arc, HashMap) { let (subgraph_store, pools, _) = StoreBuilder::make_subgraph_store_and_pools( &self.logger, &self.node_id, @@ -996,29 +996,30 @@ impl Context { HashMap::default(), Vec::new(), self.registry.cheap_clone(), - ); + ) + .await; (store, pools) } - fn store_and_primary(self) -> (Arc, ConnectionPool) { - let (store, pools) = self.store_and_pools(); + async fn store_and_primary(self) -> (Arc, ConnectionPool) { + let (store, pools) = self.store_and_pools().await; let primary = pools.get(&*PRIMARY_SHARD).expect("there is a primary pool"); (store, primary.clone()) } - fn block_store_and_primary_pool(self) -> (Arc, ConnectionPool) { - let (store, pools) = self.store_and_pools(); + async fn block_store_and_primary_pool(self) -> (BlockStore, ConnectionPool) { + let (store, pools) = self.store_and_pools().await; let primary = pools.get(&*PRIMARY_SHARD).unwrap(); (store.block_store(), primary.clone()) } - fn graphql_runner(self) -> Arc> { + async fn graphql_runner(self) -> Arc> { let logger = self.logger.clone(); let registry = self.registry.clone(); - let store = self.store(); + let store = self.store().await; let load_manager = Arc::new(LoadManager::new(&logger, vec![], vec![], registry.clone())); @@ -1033,11 +1034,13 @@ impl Context { Networks::from_config(logger, &self.config, registry, metrics, &[]).await } - fn chain_store(self, chain_name: &str) -> anyhow::Result> { + async fn chain_store(self, chain_name: &str) -> anyhow::Result> { use graph::components::store::BlockStore; self.store() + .await .block_store() .chain_store(chain_name) + .await .ok_or_else(|| anyhow::anyhow!("Could not find a network named '{}'", chain_name)) } @@ -1058,7 +1061,7 @@ impl Context { ) .await?; - let chain_store = self.chain_store(chain_name)?; + let chain_store = self.chain_store(chain_name).await?; let ethereum_adapter = networks .ethereum_rpcs(chain_name.into()) .cheapest() @@ -1149,7 +1152,7 @@ async fn main() -> anyhow::Result<()> { use Command::*; match opt.cmd { - TxnSpeed { delay } => commands::txn_speed::run(ctx.primary_pool(), delay), + TxnSpeed { delay } => commands::txn_speed::run(ctx.primary_pool(), delay).await, Info { deployment, current, @@ -1160,7 +1163,7 @@ async fn main() -> anyhow::Result<()> { brief, no_name, } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let ctx = commands::deployment::info::Context { primary_pool, @@ -1178,18 +1181,18 @@ async fn main() -> anyhow::Result<()> { no_name, }; - commands::deployment::info::run(ctx, args) + commands::deployment::info::run(ctx, args).await } Unused(cmd) => { - let store = ctx.subgraph_store(); + let store = ctx.subgraph_store().await; use UnusedCommand::*; match cmd { List { existing, deployment, - } => commands::unused_deployments::list(store, existing, deployment), - Record => commands::unused_deployments::record(store), + } => commands::unused_deployments::list(store, existing, deployment).await, + Record => commands::unused_deployments::record(store).await, Remove { count, deployment, @@ -1198,6 +1201,7 @@ async fn main() -> anyhow::Result<()> { let count = count.unwrap_or(1_000_000); let older = older.map(|older| chrono::Duration::minutes(older as i64)); commands::unused_deployments::remove(store, count, deployment.as_deref(), older) + .await } } } @@ -1208,7 +1212,7 @@ async fn main() -> anyhow::Result<()> { CheckProviders { timeout_seconds } => { let logger = ctx.logger.clone(); let networks = ctx.networks().await?; - let store = ctx.store().block_store(); + let store = ctx.store().await.block_store(); let timeout = Duration::from_secs(timeout_seconds.unwrap_or(60)); commands::provider_checks::execute(&logger, &networks, store, timeout).await; @@ -1229,13 +1233,14 @@ async fn main() -> anyhow::Result<()> { Setting { name } => commands::config::setting(&name), } } - Remove { name } => commands::remove::run(ctx.subgraph_store(), &name), - Create { name } => commands::create::run(ctx.subgraph_store(), name), + Remove { name } => commands::remove::run(ctx.subgraph_store().await, &name).await, + Create { name } => commands::create::run(ctx.subgraph_store().await, name).await, Unassign { deployment } => { let notifications_sender = ctx.notification_sender(); let primary_pool = ctx.primary_pool(); let deployment = make_deployment_selector(deployment); commands::deployment::unassign::run(primary_pool, notifications_sender, deployment) + .await } Reassign { deployment, node } => { let notifications_sender = ctx.notification_sender(); @@ -1248,20 +1253,21 @@ async fn main() -> anyhow::Result<()> { deployment, &node, ) + .await } Pause { deployment } => { let notifications_sender = ctx.notification_sender(); let primary_pool = ctx.primary_pool(); let deployment = make_deployment_selector(deployment); - commands::deployment::pause::run(primary_pool, notifications_sender, deployment) + commands::deployment::pause::run(primary_pool, notifications_sender, deployment).await } Resume { deployment } => { let notifications_sender = ctx.notification_sender(); let primary_pool = ctx.primary_pool(); let deployment = make_deployment_selector(deployment); - commands::deployment::resume::run(primary_pool, notifications_sender, deployment) + commands::deployment::resume::run(primary_pool, notifications_sender, deployment).await } Restart { deployments, sleep } => { let notifications_sender = ctx.notification_sender(); @@ -1275,7 +1281,8 @@ async fn main() -> anyhow::Result<()> { notifications_sender.clone(), deployment, sleep, - )?; + ) + .await?; } Ok(()) @@ -1289,7 +1296,7 @@ async fn main() -> anyhow::Result<()> { start_block, } => { let notification_sender = ctx.notification_sender(); - let (store, primary) = ctx.store_and_primary(); + let (store, primary) = ctx.store_and_primary().await; commands::rewind::run( primary, @@ -1357,17 +1364,17 @@ async fn main() -> anyhow::Result<()> { replace, } => { let shards: Vec<_> = ctx.config.stores.keys().cloned().collect(); - let (store, primary) = ctx.store_and_primary(); + let (store, primary) = ctx.store_and_primary().await; commands::copy::create( store, primary, src, shard, shards, node, offset, activate, replace, ) .await } Activate { deployment, shard } => { - commands::copy::activate(ctx.subgraph_store(), deployment, shard) + commands::copy::activate(ctx.subgraph_store().await, deployment, shard).await } - List => commands::copy::list(ctx.pools()), - Status { dst } => commands::copy::status(ctx.pools(), &dst), + List => commands::copy::list(ctx.pools().await).await, + Status { dst } => commands::copy::status(ctx.pools().await, &dst).await, } } Query { @@ -1376,12 +1383,22 @@ async fn main() -> anyhow::Result<()> { target, query, vars, - } => commands::query::run(ctx.graphql_runner(), target, query, vars, output, trace).await, + } => { + commands::query::run( + ctx.graphql_runner().await, + target, + query, + vars, + output, + trace, + ) + .await + } Chain(cmd) => { use ChainCommand::*; match cmd { List => { - let (block_store, primary) = ctx.block_store_and_primary_pool(); + let (block_store, primary) = ctx.block_store_and_primary_pool().await; commands::chain::list(primary, block_store).await } Info { @@ -1389,21 +1406,22 @@ async fn main() -> anyhow::Result<()> { reorg_threshold, hashes, } => { - let (block_store, primary) = ctx.block_store_and_primary_pool(); + let (block_store, primary) = ctx.block_store_and_primary_pool().await; commands::chain::info(primary, block_store, name, reorg_threshold, hashes).await } Remove { name } => { - let (block_store, primary) = ctx.block_store_and_primary_pool(); - commands::chain::remove(primary, block_store, name) + let (block_store, primary) = ctx.block_store_and_primary_pool().await; + commands::chain::remove(primary, block_store, name).await } ChangeShard { chain_name, shard } => { - let (block_store, primary) = ctx.block_store_and_primary_pool(); + let (block_store, primary) = ctx.block_store_and_primary_pool().await; commands::chain::change_block_cache_shard( primary, block_store, chain_name, shard, ) + .await } UpdateGenesis { @@ -1412,14 +1430,14 @@ async fn main() -> anyhow::Result<()> { chain_name, } => { let store_builder = ctx.store_builder().await; - let store = ctx.store().block_store(); + let store = ctx.store().await.block_store(); let networks = ctx.networks().await?; let chain_id = ChainName::from(chain_name); let block_hash = BlockHash::from_str(&block_hash)?; commands::chain::update_chain_genesis( &networks, store_builder.coord.cheap_clone(), - store, + Box::new(store), &logger, chain_id, block_hash, @@ -1470,8 +1488,8 @@ async fn main() -> anyhow::Result<()> { } Truncate { chain_name, force } => { use commands::check_blocks::truncate; - let chain_store = ctx.chain_store(&chain_name)?; - truncate(chain_store, force) + let chain_store = ctx.chain_store(&chain_name).await?; + truncate(chain_store, force).await } CallCache { method, chain_name } => { match method { @@ -1482,7 +1500,7 @@ async fn main() -> anyhow::Result<()> { ttl_days, ttl_max_contracts, } => { - let chain_store = ctx.chain_store(&chain_name)?; + let chain_store = ctx.chain_store(&chain_name).await?; if let Some(ttl_days) = ttl_days { return commands::chain::clear_stale_call_cache( chain_store, @@ -1521,7 +1539,7 @@ async fn main() -> anyhow::Result<()> { deployment, table, } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let subgraph_store = store.subgraph_store(); commands::stats::account_like( subgraph_store, @@ -1532,9 +1550,9 @@ async fn main() -> anyhow::Result<()> { ) .await } - Show { deployment } => commands::stats::show(ctx.pools(), &deployment), + Show { deployment } => commands::stats::show(ctx.pools().await, &deployment).await, Analyze { deployment, entity } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let subgraph_store = store.subgraph_store(); commands::stats::analyze( subgraph_store, @@ -1542,11 +1560,12 @@ async fn main() -> anyhow::Result<()> { deployment, entity.as_deref(), ) + .await } Target { deployment } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let subgraph_store = store.subgraph_store(); - commands::stats::target(subgraph_store, primary_pool, &deployment) + commands::stats::target(subgraph_store, primary_pool, &deployment).await } SetTarget { target, @@ -1556,7 +1575,7 @@ async fn main() -> anyhow::Result<()> { entity, columns, } => { - let (store, primary) = ctx.store_and_primary(); + let (store, primary) = ctx.store_and_primary().await; let store = store.subgraph_store(); let target = if reset { -1 } else { target as i32 }; commands::stats::set_target( @@ -1568,12 +1587,13 @@ async fn main() -> anyhow::Result<()> { target, no_analyze, ) + .await } } } Index(cmd) => { use IndexCommand::*; - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let subgraph_store = store.subgraph_store(); match cmd { Create { @@ -1653,7 +1673,7 @@ async fn main() -> anyhow::Result<()> { delete_threshold, once, } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let history = history.unwrap_or(ENV_VARS.min_history_blocks.try_into()?); commands::prune::run( store, @@ -1672,7 +1692,7 @@ async fn main() -> anyhow::Result<()> { delete_threshold, history, } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; let history = history.unwrap_or(ENV_VARS.min_history_blocks.try_into()?); commands::prune::set( store, @@ -1685,7 +1705,7 @@ async fn main() -> anyhow::Result<()> { .await } Status { run, deployment } => { - let (store, primary_pool) = ctx.store_and_primary(); + let (store, primary_pool) = ctx.store_and_primary().await; commands::prune::status(store, primary_pool, deployment, run).await } } @@ -1696,7 +1716,7 @@ async fn main() -> anyhow::Result<()> { name, url, } => { - let store = ctx.store(); + let store = ctx.store().await; let subgraph_store = store.subgraph_store(); commands::deploy::run(subgraph_store, deployment, name, url).await diff --git a/node/src/chain.rs b/node/src/chain.rs index 343b783908f..543a0cd5cfb 100644 --- a/node/src/chain.rs +++ b/node/src/chain.rs @@ -26,13 +26,13 @@ use graph::log::factory::LoggerFactory; use graph::prelude::anyhow; use graph::prelude::MetricsRegistry; use graph::slog::{debug, info, o, warn, Logger}; -use graph::tokio::time::timeout; use graph::url::Url; use graph_chain_ethereum::{self as ethereum, Transport}; use graph_store_postgres::{BlockStore, ChainHeadUpdateListener}; use std::cmp::Ordering; use std::collections::BTreeMap; use std::sync::Arc; +use tokio::time::timeout; // The status of a provider that we learned from connecting to it #[derive(PartialEq)] @@ -354,7 +354,7 @@ pub async fn networks_as_chains( blockchain_map: &mut BlockchainMap, logger: &Logger, networks: &Networks, - store: Arc, + store: BlockStore, logger_factory: &LoggerFactory, metrics_registry: Arc, chain_head_update_listener: Arc, @@ -378,7 +378,7 @@ pub async fn networks_as_chains( }); for (chain_id, adapters, kind) in chains.into_iter() { - let chain_store = match store.chain_store(chain_id) { + let chain_store = match store.chain_store(chain_id).await { Some(c) => c, None => { let ident = match timeout( @@ -395,6 +395,7 @@ pub async fn networks_as_chains( }; store .create_chain_store(chain_id, ident) + .await .expect("must be able to create store if one is not yet setup for the chain") } }; @@ -547,11 +548,11 @@ mod test { use graph::components::network_provider::ChainName; use graph::endpoint::EndpointMetrics; use graph::log::logger; - use graph::prelude::{tokio, MetricsRegistry}; + use graph::prelude::MetricsRegistry; use graph_chain_ethereum::NodeCapabilities; use std::sync::Arc; - #[tokio::test] + #[graph::test] async fn correctly_parse_ethereum_networks() { let logger = logger(true); diff --git a/node/src/helpers.rs b/node/src/helpers.rs index c8b7ccd2a24..fd59d6d8d15 100644 --- a/node/src/helpers.rs +++ b/node/src/helpers.rs @@ -5,25 +5,25 @@ use graph::prelude::{ BlockPtr, DeploymentHash, NodeId, SubgraphRegistrarError, SubgraphStore as SubgraphStoreTrait, }; use graph::slog::{error, info, Logger}; -use graph::tokio::sync::mpsc::Receiver; use graph::{ components::store::DeploymentLocator, prelude::{SubgraphName, SubgraphRegistrar}, }; use graph_store_postgres::SubgraphStore; +use tokio::sync::mpsc::Receiver; /// Cleanup a subgraph /// This is used to remove a subgraph before redeploying it when using the watch flag -fn cleanup_dev_subgraph( +async fn cleanup_dev_subgraph( logger: &Logger, subgraph_store: &SubgraphStore, name: &SubgraphName, locator: &DeploymentLocator, ) -> Result<()> { info!(logger, "Removing subgraph"; "name" => name.to_string(), "id" => locator.id.to_string(), "hash" => locator.hash.to_string()); - subgraph_store.remove_subgraph(name.clone())?; - subgraph_store.unassign_subgraph(locator)?; - subgraph_store.remove_deployment(locator.id.into())?; + subgraph_store.remove_subgraph(name.clone()).await?; + subgraph_store.unassign_subgraph(locator).await?; + subgraph_store.remove_deployment(locator.id.into()).await?; info!(logger, "Subgraph removed"; "name" => name.to_string(), "id" => locator.id.to_string(), "hash" => locator.hash.to_string()); Ok(()) } @@ -66,9 +66,9 @@ async fn drop_and_recreate_subgraph( node_id: NodeId, hash: DeploymentHash, ) -> Result { - let locator = subgraph_store.active_locator(&hash)?; + let locator = subgraph_store.active_locator(&hash).await?; if let Some(locator) = locator.clone() { - cleanup_dev_subgraph(logger, &subgraph_store, &name, &locator)?; + cleanup_dev_subgraph(logger, &subgraph_store, &name, &locator).await?; } deploy_subgraph( diff --git a/node/src/launcher.rs b/node/src/launcher.rs index 8855ef1a954..13d4de67e81 100644 --- a/node/src/launcher.rs +++ b/node/src/launcher.rs @@ -77,7 +77,7 @@ async fn setup_store( let primary_pool = store_builder.primary_pool(); let subscription_manager = store_builder.subscription_manager(); let chain_head_update_listener = store_builder.chain_head_update_listener(); - let network_store = store_builder.network_store(config.chain_ids()); + let network_store = store_builder.network_store(config.chain_ids()).await; ( primary_pool, @@ -104,7 +104,7 @@ async fn build_blockchain_map( if env_vars.genesis_validation_enabled { provider_checks.push(Arc::new(network_provider::GenesisHashCheck::from_id_store( - block_store.clone(), + Box::new(block_store.cheap_clone()), ))); } @@ -139,7 +139,10 @@ async fn build_blockchain_map( Arc::new(blockchain_map) } -fn cleanup_ethereum_shallow_blocks(blockchain_map: &BlockchainMap, network_store: &Arc) { +async fn cleanup_ethereum_shallow_blocks( + blockchain_map: &BlockchainMap, + network_store: &Arc, +) { match blockchain_map .get_all_by_kind::(BlockchainKind::Ethereum) .ok() @@ -159,6 +162,7 @@ fn cleanup_ethereum_shallow_blocks(blockchain_map: &BlockchainMap, network_store network_store .block_store() .cleanup_ethereum_shallow_blocks(eth_network_names) + .await .unwrap(); } // This code path only happens if the downcast on the blockchain map fails, that @@ -476,7 +480,7 @@ pub async fn run( // see comment on cleanup_ethereum_shallow_blocks if !opt.disable_block_ingestor { - cleanup_ethereum_shallow_blocks(&blockchain_map, &network_store); + cleanup_ethereum_shallow_blocks(&blockchain_map, &network_store).await; } let graphql_server = build_graphql_server( diff --git a/node/src/manager/commands/assign.rs b/node/src/manager/commands/assign.rs index 01260538a74..234b967584f 100644 --- a/node/src/manager/commands/assign.rs +++ b/node/src/manager/commands/assign.rs @@ -1,8 +1,8 @@ use graph::components::store::DeploymentLocator; use graph::prelude::{anyhow::anyhow, Error, NodeId, StoreEvent}; use graph_store_postgres::{command_support::catalog, ConnectionPool, NotificationSender}; -use std::thread; use std::time::Duration; +use tokio; use crate::manager::deployment::DeploymentSearch; @@ -11,59 +11,63 @@ pub async fn unassign( sender: &NotificationSender, search: &DeploymentSearch, ) -> Result<(), Error> { - let locator = search.locate_unique(&primary)?; + let locator = search.locate_unique(&primary).await?; - let pconn = primary.get()?; + let pconn = primary.get().await?; let mut conn = catalog::Connection::new(pconn); let site = conn - .locate_site(locator.clone())? + .locate_site(locator.clone()) + .await? .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; println!("unassigning {locator}"); - let changes = conn.unassign_subgraph(&site)?; - conn.send_store_event(sender, &StoreEvent::new(changes))?; + let changes = conn.unassign_subgraph(&site).await?; + conn.send_store_event(sender, &StoreEvent::new(changes)) + .await?; Ok(()) } -pub fn reassign( +pub async fn reassign( primary: ConnectionPool, sender: &NotificationSender, search: &DeploymentSearch, node: String, ) -> Result<(), Error> { let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; - let locator = search.locate_unique(&primary)?; + let locator = search.locate_unique(&primary).await?; - let pconn = primary.get()?; + let pconn = primary.get().await?; let mut conn = catalog::Connection::new(pconn); let site = conn - .locate_site(locator.clone())? + .locate_site(locator.clone()) + .await? .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; - let changes = match conn.assigned_node(&site)? { + let changes = match conn.assigned_node(&site).await? { Some(cur) => { if cur == node { println!("deployment {locator} is already assigned to {cur}"); vec![] } else { println!("reassigning {locator} to {node} (was {cur})"); - conn.reassign_subgraph(&site, &node)? + conn.reassign_subgraph(&site, &node).await? } } None => { println!("assigning {locator} to {node}"); - conn.assign_subgraph(&site, &node)? + conn.assign_subgraph(&site, &node).await? } }; - conn.send_store_event(sender, &StoreEvent::new(changes))?; + conn.send_store_event(sender, &StoreEvent::new(changes)) + .await?; // It's easy to make a typo in the name of the node; if this operation // assigns to a node that wasn't used before, warn the user that they // might have mistyped the node name let mirror = catalog::Mirror::primary_only(primary); - let count = mirror.assignments(&node)?.len(); + let count = mirror.assignments(&node).await?.len(); if count == 1 { println!("warning: this is the only deployment assigned to {node}"); println!(" are you sure it is spelled correctly?"); @@ -71,20 +75,21 @@ pub fn reassign( Ok(()) } -pub fn pause_or_resume( +pub async fn pause_or_resume( primary: ConnectionPool, sender: &NotificationSender, locator: &DeploymentLocator, should_pause: bool, ) -> Result<(), Error> { - let pconn = primary.get()?; + let pconn = primary.get().await?; let mut conn = catalog::Connection::new(pconn); let site = conn - .locate_site(locator.clone())? + .locate_site(locator.clone()) + .await? .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; - let change = match conn.assignment_status(&site)? { + let change = match conn.assignment_status(&site).await? { Some((_, is_paused)) => { if should_pause { if is_paused { @@ -92,10 +97,10 @@ pub fn pause_or_resume( return Ok(()); } println!("pausing {locator}"); - conn.pause_subgraph(&site)? + conn.pause_subgraph(&site).await? } else { println!("resuming {locator}"); - conn.resume_subgraph(&site)? + conn.resume_subgraph(&site).await? } } None => { @@ -104,23 +109,24 @@ pub fn pause_or_resume( } }; println!("Operation completed"); - conn.send_store_event(sender, &StoreEvent::new(change))?; + conn.send_store_event(sender, &StoreEvent::new(change)) + .await?; Ok(()) } -pub fn restart( +pub async fn restart( primary: ConnectionPool, sender: &NotificationSender, locator: &DeploymentLocator, sleep: Duration, ) -> Result<(), Error> { - pause_or_resume(primary.clone(), sender, locator, true)?; + pause_or_resume(primary.clone(), sender, locator, true).await?; println!( "Waiting {}s to make sure pausing was processed", sleep.as_secs() ); - thread::sleep(sleep); - pause_or_resume(primary, sender, locator, false)?; + tokio::time::sleep(sleep).await; + pause_or_resume(primary, sender, locator, false).await?; Ok(()) } diff --git a/node/src/manager/commands/chain.rs b/node/src/manager/commands/chain.rs index 11622dca2da..84d54e145da 100644 --- a/node/src/manager/commands/chain.rs +++ b/node/src/manager/commands/chain.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use diesel::sql_query; -use diesel::Connection; -use diesel::RunQueryDsl; +use diesel_async::AsyncConnection; +use diesel_async::RunQueryDsl; use graph::blockchain::BlockHash; use graph::blockchain::BlockPtr; use graph::blockchain::ChainIdentifier; @@ -29,15 +29,16 @@ use graph_store_postgres::BlockStore; use graph_store_postgres::ChainStatus; use graph_store_postgres::ChainStore; use graph_store_postgres::PoolCoordinator; +use graph_store_postgres::ScopedFutureExt; use graph_store_postgres::Shard; use graph_store_postgres::{command_support::catalog::block_store, ConnectionPool}; use crate::network_setup::Networks; -pub async fn list(primary: ConnectionPool, store: Arc) -> Result<(), Error> { +pub async fn list(primary: ConnectionPool, store: BlockStore) -> Result<(), Error> { let mut chains = { - let mut conn = primary.get()?; - block_store::load_chains(&mut conn)? + let mut conn = primary.get().await?; + block_store::load_chains(&mut conn).await? }; chains.sort_by_key(|chain| chain.name.clone()); @@ -52,7 +53,7 @@ pub async fn list(primary: ConnectionPool, store: Arc) -> Result<(), ); } for chain in chains { - let head_block = match store.chain_store(&chain.name) { + let head_block = match store.chain_store(&chain.name).await { None => "no chain".to_string(), Some(chain_store) => chain_store .chain_head_ptr() @@ -98,7 +99,7 @@ pub async fn clear_stale_call_cache( pub async fn info( primary: ConnectionPool, - store: Arc, + store: BlockStore, name: String, offset: BlockNumber, hashes: bool, @@ -121,13 +122,15 @@ pub async fn info( } } - let mut conn = primary.get()?; + let mut conn = primary.get().await?; - let chain = block_store::find_chain(&mut conn, &name)? + let chain = block_store::find_chain(&mut conn, &name) + .await? .ok_or_else(|| anyhow!("unknown chain: {}", name))?; let chain_store = store .chain_store(&chain.name) + .await .ok_or_else(|| anyhow!("unknown chain: {}", name))?; let head_block = chain_store.cheap_clone().chain_head_ptr().await?; let ancestor = match &head_block { @@ -152,11 +155,11 @@ pub async fn info( Ok(()) } -pub fn remove(primary: ConnectionPool, store: Arc, name: String) -> Result<(), Error> { +pub async fn remove(primary: ConnectionPool, store: BlockStore, name: String) -> Result<(), Error> { let sites = { let mut conn = - graph_store_postgres::command_support::catalog::Connection::new(primary.get()?); - conn.find_sites_for_network(&name)? + graph_store_postgres::command_support::catalog::Connection::new(primary.get().await?); + conn.find_sites_for_network(&name).await? }; if !sites.is_empty() { @@ -171,7 +174,7 @@ pub fn remove(primary: ConnectionPool, store: Arc, name: String) -> bail!("remove all deployments using chain {} first", name); } - store.drop_chain(&name)?; + store.drop_chain(&name).await?; Ok(()) } @@ -179,7 +182,7 @@ pub fn remove(primary: ConnectionPool, store: Arc, name: String) -> pub async fn update_chain_genesis( networks: &Networks, coord: Arc, - store: Arc, + store: Box, logger: &Logger, chain_id: ChainName, genesis_hash: BlockHash, @@ -203,13 +206,15 @@ pub async fn update_chain_genesis( // Update the local shard's genesis, whether or not it is the primary. // The chains table is replicated from the primary and keeps another genesis hash. // To keep those in sync we need to update the primary and then refresh the shard tables. - store.set_chain_identifier( - &chain_id, - &ChainIdentifier { - net_version: ident.net_version.clone(), - genesis_block_hash: genesis_hash, - }, - )?; + store + .set_chain_identifier( + &chain_id, + &ChainIdentifier { + net_version: ident.net_version.clone(), + genesis_block_hash: genesis_hash, + }, + ) + .await?; // Refresh the new values println!("Refresh mappings"); @@ -218,17 +223,18 @@ pub async fn update_chain_genesis( Ok(()) } -pub fn change_block_cache_shard( +pub async fn change_block_cache_shard( primary_store: ConnectionPool, - store: Arc, + store: BlockStore, chain_name: String, shard: String, ) -> Result<(), Error> { println!("Changing block cache shard for {} to {}", chain_name, shard); - let mut conn = primary_store.get()?; + let mut conn = primary_store.get().await?; - let chain = find_chain(&mut conn, &chain_name)? + let chain = find_chain(&mut conn, &chain_name) + .await? .ok_or_else(|| anyhow!("unknown chain: {}", chain_name))?; let old_shard = chain.shard; @@ -236,39 +242,41 @@ pub fn change_block_cache_shard( let chain_store = store .chain_store(&chain_name) + .await .ok_or_else(|| anyhow!("unknown chain: {}", &chain_name))?; let new_name = format!("{}-old", &chain_name); - let ident = chain_store.chain_identifier()?; + let ident = chain_store.chain_identifier().await?; - conn.transaction(|conn| -> Result<(), StoreError> { - let shard = Shard::new(shard.to_string())?; + conn.transaction::<(), StoreError, _>(|conn| { + async { + let shard = Shard::new(shard.to_string())?; - let chain = BlockStore::allocate_chain(conn, &chain_name, &shard, &ident)?; + let chain = BlockStore::allocate_chain(conn, &chain_name, &shard, &ident).await?; - store.add_chain_store(&chain,ChainStatus::Ingestible, true)?; + graph::block_on(store.add_chain_store(&chain,ChainStatus::Ingestible, true))?; - // Drop the foreign key constraint on deployment_schemas - sql_query( - "alter table deployment_schemas drop constraint deployment_schemas_network_fkey;", - ) - .execute(conn)?; - - // Update the current chain name to chain-old - update_chain_name(conn, &chain_name, &new_name)?; + // Drop the foreign key constraint on deployment_schemas + sql_query( + "alter table deployment_schemas drop constraint deployment_schemas_network_fkey;", + ) + .execute(conn).await?; + // Update the current chain name to chain-old + update_chain_name(conn, &chain_name, &new_name).await?; - // Create a new chain with the name in the destination shard - let _ = add_chain(conn, &chain_name, &shard, ident)?; + // Create a new chain with the name in the destination shard + let _ = add_chain(conn, &chain_name, &shard, ident).await?; - // Re-add the foreign key constraint - sql_query( - "alter table deployment_schemas add constraint deployment_schemas_network_fkey foreign key (network) references chains(name);", - ) - .execute(conn)?; - Ok(()) - })?; + // Re-add the foreign key constraint + sql_query( + "alter table deployment_schemas add constraint deployment_schemas_network_fkey foreign key (network) references chains(name);", + ) + .execute(conn).await?; + Ok(()) + }.scope_boxed() + }).await?; - chain_store.update_name(&new_name)?; + chain_store.update_name(&new_name).await?; println!( "Changed block cache shard for {} from {} to {}", @@ -296,7 +304,9 @@ pub async fn ingest( let block = Arc::new(BlockFinality::Final(Arc::new(block))); chain_store.upsert_block(block).await?; - let rows = chain_store.confirm_block_hash(ptr.number, &ptr.hash)?; + let rows = chain_store + .confirm_block_hash(ptr.number, &ptr.hash) + .await?; println!("Inserted block {}", ptr); if rows > 0 { diff --git a/node/src/manager/commands/check_blocks.rs b/node/src/manager/commands/check_blocks.rs index 0afa54bd7d3..15314067a49 100644 --- a/node/src/manager/commands/check_blocks.rs +++ b/node/src/manager/commands/check_blocks.rs @@ -30,7 +30,7 @@ pub async fn by_number( logger: &Logger, delete_duplicates: bool, ) -> anyhow::Result<()> { - let block_hashes = steps::resolve_block_hash_from_block_number(number, &chain_store)?; + let block_hashes = steps::resolve_block_hash_from_block_number(number, &chain_store).await?; match &block_hashes.as_slice() { [] => bail!("Could not find a block with number {} in store", number), @@ -54,14 +54,15 @@ pub async fn by_range( let range = ranges::Range::new(range_from, range_to)?; let max = match range.upper_bound { // When we have an open upper bound, we use the chain head's block number - None => steps::find_chain_head(&chain_store)?, + None => steps::find_chain_head(&chain_store).await?, Some(x) => x, }; // FIXME: This performs poorly. // TODO: This could be turned into async code for block_number in range.lower_bound..=max { println!("Checking block [{block_number}/{max}]"); - let block_hashes = steps::resolve_block_hash_from_block_number(block_number, &chain_store)?; + let block_hashes = + steps::resolve_block_hash_from_block_number(block_number, &chain_store).await?; match &block_hashes.as_slice() { [] => eprintln!("Found no block hash with number {block_number}"), [block_hash] => { @@ -87,7 +88,7 @@ pub async fn by_range( Ok(()) } -pub fn truncate(chain_store: Arc, skip_confirmation: bool) -> anyhow::Result<()> { +pub async fn truncate(chain_store: Arc, skip_confirmation: bool) -> anyhow::Result<()> { let prompt = format!( "This will delete all cached blocks for {}.\nProceed?", chain_store.chain @@ -99,6 +100,7 @@ pub fn truncate(chain_store: Arc, skip_confirmation: bool) -> anyhow chain_store .truncate_block_cache() + .await .with_context(|| format!("Failed to truncate block cache for {}", chain_store.chain)) } @@ -115,7 +117,7 @@ async fn run( let diff = steps::diff_block_pair(&cached_block, &provider_block); steps::report_difference(diff.as_deref(), block_hash); if diff.is_some() { - steps::delete_block(block_hash, &chain_store)?; + steps::delete_block(block_hash, &chain_store).await?; } Ok(()) } @@ -138,7 +140,7 @@ async fn handle_multiple_block_hashes( if delete_duplicates { println!("Deleting duplicated blocks..."); for hash in block_hashes { - steps::delete_block(hash, chain_store)?; + steps::delete_block(hash, chain_store).await?; } } else { eprintln!( @@ -164,11 +166,11 @@ mod steps { /// Multiple block hashes can be returned as the store does not enforce uniqueness based on /// block numbers. /// Returns an empty vector if no block hash is found. - pub(super) fn resolve_block_hash_from_block_number( + pub(super) async fn resolve_block_hash_from_block_number( number: i32, chain_store: &ChainStore, ) -> anyhow::Result> { - let block_hashes = chain_store.block_hashes_by_block_number(number)?; + let block_hashes = chain_store.block_hashes_by_block_number(number).await?; Ok(block_hashes .into_iter() .map(|x| H256::from_slice(&x.as_slice()[..32])) @@ -245,16 +247,16 @@ mod steps { } /// Attempts to delete a block from the block cache. - pub(super) fn delete_block(hash: &H256, chain_store: &ChainStore) -> anyhow::Result<()> { + pub(super) async fn delete_block(hash: &H256, chain_store: &ChainStore) -> anyhow::Result<()> { println!("Deleting block {hash} from cache."); - chain_store.delete_blocks(&[hash])?; + chain_store.delete_blocks(&[hash]).await?; println!("Done."); Ok(()) } /// Queries the [`ChainStore`] about the chain head. - pub(super) fn find_chain_head(chain_store: &ChainStore) -> anyhow::Result { - let chain_head: Option = chain_store.chain_head_block(&chain_store.chain)?; + pub(super) async fn find_chain_head(chain_store: &ChainStore) -> anyhow::Result { + let chain_head: Option = chain_store.chain_head_block(&chain_store.chain).await?; chain_head.ok_or_else(|| anyhow!("Could not find the chain head for {}", chain_store.chain)) } } diff --git a/node/src/manager/commands/copy.rs b/node/src/manager/commands/copy.rs index 57f207b5b98..c3fa4cca993 100644 --- a/node/src/manager/commands/copy.rs +++ b/node/src/manager/commands/copy.rs @@ -1,4 +1,5 @@ -use diesel::{ExpressionMethods, JoinOnDsl, OptionalExtension, QueryDsl, RunQueryDsl}; +use diesel::{ExpressionMethods, JoinOnDsl, OptionalExtension, QueryDsl}; +use diesel_async::RunQueryDsl; use std::{collections::HashMap, sync::Arc}; use graph::{ @@ -55,7 +56,7 @@ struct CopyTableState { } impl CopyState { - fn find( + async fn find( pools: &HashMap, shard: &Shard, dst: i32, @@ -67,18 +68,20 @@ impl CopyState { .get(shard) .ok_or_else(|| anyhow!("can not find pool for shard {}", shard))?; - let mut dconn = dpool.get()?; + let mut dconn = dpool.get().await?; let tables = cts::table .filter(cts::dst.eq(dst)) .order_by(cts::entity_type) - .load::(&mut dconn)?; + .load::(&mut dconn) + .await?; - let on_sync = on_sync(&mut dconn, DeploymentId(dst))?; + let on_sync = on_sync(&mut dconn, DeploymentId(dst)).await?; Ok(cs::table .filter(cs::dst.eq(dst)) .get_result::(&mut dconn) + .await .optional()? .map(|state| (state, tables, on_sync))) } @@ -121,8 +124,9 @@ async fn create_inner( let chain_store = store .block_store() .chain_store(network) + .await .ok_or_else(|| anyhow!("could not find chain store for network {}", network))?; - let mut hashes = chain_store.block_hashes_by_block_number(src_number)?; + let mut hashes = chain_store.block_hashes_by_block_number(src_number).await?; let hash = match hashes.len() { 0 => bail!( "could not find a block with number {} in our cache", @@ -146,7 +150,9 @@ async fn create_inner( let shard = Shard::new(shard)?; let node = NodeId::new(node.clone()).map_err(|()| anyhow!("invalid node id `{}`", node))?; - let dst = subgraph_store.copy_deployment(&src, shard, node, base_ptr, on_sync)?; + let dst = subgraph_store + .copy_deployment(&src, shard, node, base_ptr, on_sync) + .await?; println!("created deployment {} as copy of {}", dst, src); Ok(()) @@ -163,7 +169,7 @@ pub async fn create( activate: bool, replace: bool, ) -> Result<(), Error> { - let src = src.locate_unique(&primary)?; + let src = src.locate_unique(&primary).await?; create_inner( store, &src, @@ -178,12 +184,17 @@ pub async fn create( .map_err(|e| anyhow!("cannot copy {src}: {e}")) } -pub fn activate(store: Arc, deployment: String, shard: String) -> Result<(), Error> { +pub async fn activate( + store: Arc, + deployment: String, + shard: String, +) -> Result<(), Error> { let shard = Shard::new(shard)?; let deployment = DeploymentHash::new(deployment).map_err(|s| anyhow!("illegal deployment hash `{}`", s))?; let deployment = store - .locate_in_shard(&deployment, shard.clone())? + .locate_in_shard(&deployment, shard.clone()) + .await? .ok_or_else(|| { anyhow!( "could not find a copy for {} in shard {}", @@ -191,17 +202,17 @@ pub fn activate(store: Arc, deployment: String, shard: String) -> shard ) })?; - store.activate(&deployment)?; + store.activate(&deployment).await?; println!("activated copy {}", deployment); Ok(()) } -pub fn list(pools: HashMap) -> Result<(), Error> { +pub async fn list(pools: HashMap) -> Result<(), Error> { use catalog::active_copies as ac; use catalog::deployment_schemas as ds; let primary = pools.get(&*PRIMARY_SHARD).expect("there is a primary pool"); - let mut conn = primary.get()?; + let mut conn = primary.get().await?; let copies = ac::table .inner_join(ds::table.on(ds::id.eq(ac::dst))) @@ -213,7 +224,8 @@ pub fn list(pools: HashMap) -> Result<(), Error> { ds::subgraph, ds::shard, )) - .load::<(i32, i32, Option, UtcDateTime, String, Shard)>(&mut conn)?; + .load::<(i32, i32, Option, UtcDateTime, String, Shard)>(&mut conn) + .await?; if copies.is_empty() { println!("no active copies"); } else { @@ -230,7 +242,7 @@ pub fn list(pools: HashMap) -> Result<(), Error> { println!("{:20} | {}", "deployment", deployment_hash); println!("{:20} | sgd{} -> sgd{} ({})", "action", src, dst, shard); - match CopyState::find(&pools, &shard, dst)? { + match CopyState::find(&pools, &shard, dst).await? { Some((state, tables, _)) => match cancelled_at { Some(cancel_requested) => match state.cancelled_at { Some(cancelled_at) => status("cancelled", cancelled_at), @@ -254,7 +266,10 @@ pub fn list(pools: HashMap) -> Result<(), Error> { Ok(()) } -pub fn status(pools: HashMap, dst: &DeploymentSearch) -> Result<(), Error> { +pub async fn status( + pools: HashMap, + dst: &DeploymentSearch, +) -> Result<(), Error> { const CHECK: &str = "✓"; use catalog::active_copies as ac; @@ -263,23 +278,25 @@ pub fn status(pools: HashMap, dst: &DeploymentSearch) -> let primary = pools .get(&*PRIMARY_SHARD) .ok_or_else(|| anyhow!("can not find deployment with id {}", dst))?; - let mut pconn = primary.get()?; - let dst = dst.locate_unique(primary)?.id.0; + let mut pconn = primary.get().await?; + let dst = dst.locate_unique(primary).await?.id.0; let (shard, deployment) = ds::table .filter(ds::id.eq(dst)) .select((ds::shard, ds::subgraph)) - .get_result::<(Shard, String)>(&mut pconn)?; + .get_result::<(Shard, String)>(&mut pconn) + .await?; let (active, cancelled_at) = ac::table .filter(ac::dst.eq(dst)) .select((ac::src, ac::cancelled_at)) .get_result::<(i32, Option)>(&mut pconn) + .await .optional()? .map(|(_, cancelled_at)| (true, cancelled_at)) .unwrap_or((false, None)); - let (state, tables, on_sync) = match CopyState::find(&pools, &shard, dst)? { + let (state, tables, on_sync) = match CopyState::find(&pools, &shard, dst).await? { Some((state, tables, on_sync)) => (state, tables, on_sync), None => { if active { diff --git a/node/src/manager/commands/create.rs b/node/src/manager/commands/create.rs index 02e1184684f..cfaa62aa958 100644 --- a/node/src/manager/commands/create.rs +++ b/node/src/manager/commands/create.rs @@ -3,12 +3,12 @@ use std::sync::Arc; use graph::prelude::{anyhow, Error, SubgraphName, SubgraphStore as _}; use graph_store_postgres::SubgraphStore; -pub fn run(store: Arc, name: String) -> Result<(), Error> { +pub async fn run(store: Arc, name: String) -> Result<(), Error> { let name = SubgraphName::new(name.clone()) .map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; println!("creating subgraph {}", name); - store.create_subgraph(name)?; + store.create_subgraph(name).await?; Ok(()) } diff --git a/node/src/manager/commands/database.rs b/node/src/manager/commands/database.rs index bb1f3b195e3..56df0a73f27 100644 --- a/node/src/manager/commands/database.rs +++ b/node/src/manager/commands/database.rs @@ -45,7 +45,7 @@ pub async fn remap( server.shard, pool.shard ); std::io::stdout().flush().ok(); - if let Err(e) = pool.remap(server) { + if let Err(e) = pool.remap(server).await { println!(" FAILED"); println!(" error: {e}"); if !force { diff --git a/node/src/manager/commands/deploy.rs b/node/src/manager/commands/deploy.rs index 34391e94544..15ddc6b1049 100644 --- a/node/src/manager/commands/deploy.rs +++ b/node/src/manager/commands/deploy.rs @@ -82,7 +82,7 @@ pub async fn run( let subgraph_name = SubgraphName::new(name.clone()).map_err(|_| anyhow!("Invalid subgraph name"))?; - let exists = subgraph_store.subgraph_exists(&subgraph_name)?; + let exists = subgraph_store.subgraph_exists(&subgraph_name).await?; if !exists { println!("Creating subgraph `{}`", name); diff --git a/node/src/manager/commands/deployment/info.rs b/node/src/manager/commands/deployment/info.rs index 27a69c3841a..4d121d3692a 100644 --- a/node/src/manager/commands/deployment/info.rs +++ b/node/src/manager/commands/deployment/info.rs @@ -33,7 +33,7 @@ pub struct Args { pub no_name: bool, } -pub fn run(ctx: Context, args: Args) -> Result<()> { +pub async fn run(ctx: Context, args: Args) -> Result<()> { let Context { primary_pool, store, @@ -59,7 +59,7 @@ pub fn run(ctx: Context, args: Args) -> Result<()> { }; let version = make_deployment_version_selector(current, pending, used); - let deployments = load_deployments(primary_pool.clone(), &deployment, &version)?; + let deployments = load_deployments(primary_pool.clone(), &deployment, &version).await?; if deployments.is_empty() { println!("No matches"); @@ -67,7 +67,7 @@ pub fn run(ctx: Context, args: Args) -> Result<()> { } let statuses = if status { - Some(load_deployment_statuses(store, &deployments)?) + Some(load_deployment_statuses(store, &deployments).await?) } else { None }; diff --git a/node/src/manager/commands/deployment/pause.rs b/node/src/manager/commands/deployment/pause.rs index 3e35496113e..2b91680a0f3 100644 --- a/node/src/manager/commands/deployment/pause.rs +++ b/node/src/manager/commands/deployment/pause.rs @@ -8,17 +8,17 @@ use graphman::commands::deployment::pause::{ }; use graphman::deployment::DeploymentSelector; -pub fn run( +pub async fn run( primary_pool: ConnectionPool, notification_sender: Arc, deployment: DeploymentSelector, ) -> Result<()> { - let active_deployment = load_active_deployment(primary_pool.clone(), &deployment); + let active_deployment = load_active_deployment(primary_pool.clone(), &deployment).await; match active_deployment { Ok(active_deployment) => { println!("Pausing deployment {} ...", active_deployment.locator()); - pause_active_deployment(primary_pool, notification_sender, active_deployment)?; + pause_active_deployment(primary_pool, notification_sender, active_deployment).await?; } Err(PauseDeploymentError::AlreadyPaused(locator)) => { println!("Deployment {} is already paused", locator); diff --git a/node/src/manager/commands/deployment/reassign.rs b/node/src/manager/commands/deployment/reassign.rs index 80122fc90b1..da8341422c4 100644 --- a/node/src/manager/commands/deployment/reassign.rs +++ b/node/src/manager/commands/deployment/reassign.rs @@ -9,14 +9,14 @@ use graphman::commands::deployment::reassign::{ }; use graphman::deployment::DeploymentSelector; -pub fn run( +pub async fn run( primary_pool: ConnectionPool, notification_sender: Arc, deployment: DeploymentSelector, node: &NodeId, ) -> Result<()> { - let deployment = load_deployment(primary_pool.clone(), &deployment)?; - let curr_node = deployment.assigned_node(primary_pool.clone())?; + let deployment = load_deployment(primary_pool.clone(), &deployment).await?; + let curr_node = deployment.assigned_node(primary_pool.clone()).await?; let reassign_msg = match &curr_node { Some(curr_node) => format!( "Reassigning deployment {} (was {})", @@ -33,7 +33,8 @@ pub fn run( &deployment, node, curr_node, - )?; + ) + .await?; match reassign_result { ReassignResult::Ok => { diff --git a/node/src/manager/commands/deployment/restart.rs b/node/src/manager/commands/deployment/restart.rs index 5f3783b3e92..12c256103b0 100644 --- a/node/src/manager/commands/deployment/restart.rs +++ b/node/src/manager/commands/deployment/restart.rs @@ -7,7 +7,7 @@ use graph_store_postgres::ConnectionPool; use graph_store_postgres::NotificationSender; use graphman::deployment::DeploymentSelector; -pub fn run( +pub async fn run( primary_pool: ConnectionPool, notification_sender: Arc, deployment: DeploymentSelector, @@ -17,7 +17,8 @@ pub fn run( primary_pool.clone(), notification_sender.clone(), deployment.clone(), - )?; + ) + .await?; println!( "Waiting {}s to make sure pausing was processed ...", @@ -26,7 +27,7 @@ pub fn run( sleep(delay); - super::resume::run(primary_pool, notification_sender, deployment.clone())?; + super::resume::run(primary_pool, notification_sender, deployment.clone()).await?; Ok(()) } diff --git a/node/src/manager/commands/deployment/resume.rs b/node/src/manager/commands/deployment/resume.rs index 01a9924ad51..b83e9e546d9 100644 --- a/node/src/manager/commands/deployment/resume.rs +++ b/node/src/manager/commands/deployment/resume.rs @@ -7,16 +7,16 @@ use graphman::commands::deployment::resume::load_paused_deployment; use graphman::commands::deployment::resume::resume_paused_deployment; use graphman::deployment::DeploymentSelector; -pub fn run( +pub async fn run( primary_pool: ConnectionPool, notification_sender: Arc, deployment: DeploymentSelector, ) -> Result<()> { - let paused_deployment = load_paused_deployment(primary_pool.clone(), &deployment)?; + let paused_deployment = load_paused_deployment(primary_pool.clone(), &deployment).await?; println!("Resuming deployment {} ...", paused_deployment.locator()); - resume_paused_deployment(primary_pool, notification_sender, paused_deployment)?; + resume_paused_deployment(primary_pool, notification_sender, paused_deployment).await?; Ok(()) } diff --git a/node/src/manager/commands/deployment/unassign.rs b/node/src/manager/commands/deployment/unassign.rs index 0c27a2f5944..8f68aceec90 100644 --- a/node/src/manager/commands/deployment/unassign.rs +++ b/node/src/manager/commands/deployment/unassign.rs @@ -7,16 +7,16 @@ use graphman::commands::deployment::unassign::load_assigned_deployment; use graphman::commands::deployment::unassign::unassign_deployment; use graphman::deployment::DeploymentSelector; -pub fn run( +pub async fn run( primary_pool: ConnectionPool, notification_sender: Arc, deployment: DeploymentSelector, ) -> Result<()> { - let assigned_deployment = load_assigned_deployment(primary_pool.clone(), &deployment)?; + let assigned_deployment = load_assigned_deployment(primary_pool.clone(), &deployment).await?; println!("Unassigning deployment {}", assigned_deployment.locator()); - unassign_deployment(primary_pool, notification_sender, assigned_deployment)?; + unassign_deployment(primary_pool, notification_sender, assigned_deployment).await?; Ok(()) } diff --git a/node/src/manager/commands/index.rs b/node/src/manager/commands/index.rs index 6aa68137ad1..6a5370895e5 100644 --- a/node/src/manager/commands/index.rs +++ b/node/src/manager/commands/index.rs @@ -39,7 +39,7 @@ pub async fn create( after: Option, ) -> Result<(), anyhow::Error> { validate_fields(&field_names)?; - let deployment_locator = search.locate_unique(&pool)?; + let deployment_locator = search.locate_unique(&pool).await?; println!("Index creation started. Please wait."); // If the fields contain the block range column, we use GIN @@ -166,7 +166,7 @@ pub async fn list( Ok(()) } - let deployment_locator = search.locate_unique(&pool)?; + let deployment_locator = search.locate_unique(&pool).await?; let indexes: Vec<_> = { let mut indexes = store .indexes_for_entity(&deployment_locator, entity_name) @@ -207,7 +207,7 @@ pub async fn drop( search: DeploymentSearch, index_name: &str, ) -> Result<(), anyhow::Error> { - let deployment_locator = search.locate_unique(&pool)?; + let deployment_locator = search.locate_unique(&pool).await?; store .drop_index_for_deployment(&deployment_locator, index_name) .await?; diff --git a/node/src/manager/commands/provider_checks.rs b/node/src/manager/commands/provider_checks.rs index 298e797e934..a2541be6c7a 100644 --- a/node/src/manager/commands/provider_checks.rs +++ b/node/src/manager/commands/provider_checks.rs @@ -10,18 +10,14 @@ use graph::components::network_provider::NetworkDetails; use graph::components::network_provider::ProviderCheck; use graph::components::network_provider::ProviderCheckStatus; use graph::prelude::tokio; +use graph::prelude::CheapClone; use graph::prelude::Logger; use graph_store_postgres::BlockStore; use itertools::Itertools; use crate::network_setup::Networks; -pub async fn execute( - logger: &Logger, - networks: &Networks, - store: Arc, - timeout: Duration, -) { +pub async fn execute(logger: &Logger, networks: &Networks, store: BlockStore, timeout: Duration) { let chain_name_iter = networks .adapters .iter() @@ -37,7 +33,7 @@ pub async fn execute( .providers_unchecked(chain_name) .unique_by(|x| x.provider_name()) { - let validator = chain_id_validator(store.clone()); + let validator = chain_id_validator(Box::new(store.cheap_clone())); match tokio::time::timeout( timeout, run_checks(logger, chain_name, adapter, validator.clone()), @@ -58,7 +54,7 @@ pub async fn execute( .providers_unchecked(chain_name) .unique_by(|x| x.provider_name()) { - let validator = chain_id_validator(store.clone()); + let validator = chain_id_validator(Box::new(store.cheap_clone())); match tokio::time::timeout(timeout, run_checks(logger, chain_name, adapter, validator)) .await { @@ -76,7 +72,7 @@ pub async fn execute( .providers_unchecked(chain_name) .unique_by(|x| x.provider_name()) { - let validator = chain_id_validator(store.clone()); + let validator = chain_id_validator(Box::new(store.cheap_clone())); match tokio::time::timeout( timeout, run_checks(logger, chain_name, adapter, validator.clone()), diff --git a/node/src/manager/commands/prune.rs b/node/src/manager/commands/prune.rs index ea46d77d0de..415eccbf984 100644 --- a/node/src/manager/commands/prune.rs +++ b/node/src/manager/commands/prune.rs @@ -169,16 +169,17 @@ struct Args { latest_block: BlockNumber, } -fn check_args( +async fn check_args( store: &Arc, primary_pool: ConnectionPool, search: DeploymentSearch, history: usize, ) -> Result { let history = history as BlockNumber; - let deployment = search.locate_unique(&primary_pool)?; + let deployment = search.locate_unique(&primary_pool).await?; let mut info = store - .status(status::Filter::DeploymentIds(vec![deployment.id]))? + .status(status::Filter::DeploymentIds(vec![deployment.id])) + .await? .pop() .ok_or_else(|| anyhow!("deployment {deployment} not found"))?; if info.chains.len() > 1 { @@ -250,7 +251,7 @@ async fn run_inner( once: bool, do_first_prune: bool, ) -> Result<(), anyhow::Error> { - let args = check_args(&store, primary_pool, search, history)?; + let args = check_args(&store, primary_pool, search, history).await?; if do_first_prune { first_prune(&store, &args, rebuild_threshold, delete_threshold).await?; @@ -258,11 +259,10 @@ async fn run_inner( // Only after everything worked out, make the history setting permanent if !once { - store.subgraph_store().set_history_blocks( - &args.deployment, - args.history, - ENV_VARS.reorg_threshold(), - )?; + store + .subgraph_store() + .set_history_blocks(&args.deployment, args.history, ENV_VARS.reorg_threshold()) + .await?; } Ok(()) @@ -333,15 +333,15 @@ pub async fn status( let mut term = Terminal::new(); - let deployment = search.locate_unique(&primary_pool)?; + let deployment = search.locate_unique(&primary_pool).await?; let viewer = store.subgraph_store().prune_viewer(&deployment).await?; - let runs = viewer.runs()?; + let runs = viewer.runs().await?; if runs.is_empty() { return Err(anyhow!("No prune runs found for deployment {deployment}")); } let run = run.unwrap_or(*runs.last().unwrap()); - let Some((state, table_states)) = viewer.state(run)? else { + let Some((state, table_states)) = viewer.state(run).await? else { let runs = match runs.len() { 0 => unreachable!("we checked that runs is not empty"), 1 => format!("There is only one prune run #{}", runs[0]), diff --git a/node/src/manager/commands/remove.rs b/node/src/manager/commands/remove.rs index e89c3642215..3d03bdf6148 100644 --- a/node/src/manager/commands/remove.rs +++ b/node/src/manager/commands/remove.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use graph::prelude::{anyhow, Error, SubgraphName, SubgraphStore as _}; use graph_store_postgres::SubgraphStore; -pub fn run(store: Arc, name: &str) -> Result<(), Error> { +pub async fn run(store: Arc, name: &str) -> Result<(), Error> { let name = SubgraphName::new(name).map_err(|()| anyhow!("illegal subgraph name `{}`", name))?; println!("Removing subgraph {}", name); - store.remove_subgraph(name)?; + store.remove_subgraph(name).await?; Ok(()) } diff --git a/node/src/manager/commands/rewind.rs b/node/src/manager/commands/rewind.rs index 51d432dfd49..236dcd3fdd9 100644 --- a/node/src/manager/commands/rewind.rs +++ b/node/src/manager/commands/rewind.rs @@ -14,7 +14,7 @@ use graph_store_postgres::{BlockStore, NotificationSender}; use graph_store_postgres::{ConnectionPool, Store}; async fn block_ptr( - store: Arc, + store: BlockStore, locators: &HashSet<(String, DeploymentLocator)>, searches: &Vec, hash: &str, @@ -40,7 +40,7 @@ async fn block_ptr( let chain = chains.iter().next().unwrap().to_string(); - let chain_store = match store.chain_store(&chain) { + let chain_store = match store.chain_store(&chain).await { None => bail!("can not find chain store for {}", chain), Some(store) => store, }; @@ -78,7 +78,7 @@ pub async fn run( if !start_block && (block_hash.is_none() || block_number.is_none()) { bail!("--block-hash and --block-number must be specified when --start-block is not set"); } - let pconn = primary.get()?; + let pconn = primary.get().await?; let mut conn = store_catalog::Connection::new(pconn); let subgraph_store = store.subgraph_store(); @@ -87,7 +87,7 @@ pub async fn run( let mut locators = HashSet::new(); for search in &searches { - let results = search.lookup(&primary)?; + let results = search.lookup(&primary).await?; let deployment_locators: HashSet<(String, DeploymentLocator)> = results .iter() @@ -127,10 +127,11 @@ pub async fn run( println!("Checking if its safe to rewind deployments"); for (_, locator) in &locators { let site = conn - .locate_site(locator.clone())? + .locate_site(locator.clone()) + .await? .ok_or_else(|| anyhow!("failed to locate site for {locator}"))?; let deployment_store = subgraph_store.for_site(&site)?; - let deployment_details = deployment_store.deployment_details_for_id(locator)?; + let deployment_details = deployment_store.deployment_details_for_id(locator).await?; let block_number_to = block_ptr_to.as_ref().map(|b| b.number).unwrap_or(0); if block_number_to < deployment_details.earliest_block_number + ENV_VARS.reorg_threshold() { @@ -146,7 +147,7 @@ pub async fn run( println!("Pausing deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, &locator, true)?; + pause_or_resume(primary.clone(), &sender, &locator, true).await?; } // There's no good way to tell that a subgraph has in fact stopped @@ -160,22 +161,28 @@ pub async fn run( println!("\nRewinding deployments"); for (chain, loc) in &locators { let block_store = store.block_store(); - let deployment_details = subgraph_store.load_deployment_by_id(loc.clone().into())?; + let deployment_details = subgraph_store + .load_deployment_by_id(loc.clone().into()) + .await?; let block_ptr_to = block_ptr_to.clone(); - let start_block = deployment_details.start_block.or_else(|| { - block_store - .chain_store(chain) - .and_then(|chain_store| chain_store.genesis_block_ptr().ok()) - }); + let start_block = match deployment_details.start_block { + Some(ptr) => Some(ptr), + None => match block_store.chain_store(chain).await { + Some(chain_store) => chain_store.genesis_block_ptr().await.ok(), + None => None, + }, + }; match (block_ptr_to, start_block) { (Some(block_ptr), _) => { - subgraph_store.rewind(loc.hash.clone(), block_ptr)?; + subgraph_store.rewind(loc.hash.clone(), block_ptr).await?; println!(" ... rewound {}", loc); } (None, Some(start_block_ptr)) => { - subgraph_store.truncate(loc.hash.clone(), start_block_ptr)?; + subgraph_store + .truncate(loc.hash.clone(), start_block_ptr) + .await?; println!(" ... truncated {}", loc); } (None, None) => { @@ -186,7 +193,7 @@ pub async fn run( println!("Resuming deployments"); for (_, locator) in &locators { - pause_or_resume(primary.clone(), &sender, locator, false)?; + pause_or_resume(primary.clone(), &sender, locator, false).await?; } Ok(()) } diff --git a/node/src/manager/commands/run.rs b/node/src/manager/commands/run.rs index 060341fb6e0..fd08f635962 100644 --- a/node/src/manager/commands/run.rs +++ b/node/src/manager/commands/run.rs @@ -27,8 +27,8 @@ use graph_core::{ SubgraphRegistrar as IpfsSubgraphRegistrar, }; -fn locate(store: &dyn SubgraphStore, hash: &str) -> Result { - let mut locators = store.locators(hash)?; +async fn locate(store: &dyn SubgraphStore, hash: &str) -> Result { + let mut locators = store.locators(hash).await?; match locators.len() { 0 => bail!("could not find subgraph {hash} we just created"), 1 => Ok(locators.pop().unwrap()), @@ -91,14 +91,14 @@ pub async fn run( let link_resolver = Arc::new(IpfsResolver::new(ipfs_client, env_vars.cheap_clone())); let chain_head_update_listener = store_builder.chain_head_update_listener(); - let network_store = store_builder.network_store(config.chain_ids()); + let network_store = store_builder.network_store(config.chain_ids()).await; let block_store = network_store.block_store(); let mut provider_checks: Vec> = Vec::new(); if env_vars.genesis_validation_enabled { - let store = chain_id_validator(network_store.block_store()); + let store = chain_id_validator(Box::new(network_store.block_store())); provider_checks.push(Arc::new( graph::components::network_provider::GenesisHashCheck::new(store), )); @@ -214,7 +214,7 @@ pub async fn run( ) .await?; - let locator = locate(subgraph_store.as_ref(), &hash)?; + let locator = locate(subgraph_store.as_ref(), &hash).await?; SubgraphAssignmentProvider::start(subgraph_provider.as_ref(), locator, Some(stop_block)).await; @@ -239,7 +239,10 @@ pub async fn run( } info!(&logger, "Removing subgraph {}", name); - subgraph_store.clone().remove_subgraph(subgraph_name)?; + subgraph_store + .clone() + .remove_subgraph(subgraph_name) + .await?; if let Some(host) = metrics_ctx.prometheus_host { let mfs = metrics_ctx.prometheus.gather(); diff --git a/node/src/manager/commands/stats.rs b/node/src/manager/commands/stats.rs index 8200703c180..651a30b45b6 100644 --- a/node/src/manager/commands/stats.rs +++ b/node/src/manager/commands/stats.rs @@ -4,36 +4,35 @@ use std::sync::Arc; use crate::manager::deployment::DeploymentSearch; use crate::manager::fmt; -use diesel::r2d2::ConnectionManager; -use diesel::r2d2::PooledConnection; -use diesel::PgConnection; use graph::components::store::DeploymentLocator; use graph::components::store::VersionStats; use graph::prelude::anyhow; use graph::prelude::CheapClone as _; use graph_store_postgres::command_support::catalog as store_catalog; use graph_store_postgres::command_support::catalog::Site; +use graph_store_postgres::AsyncPgConnection; use graph_store_postgres::ConnectionPool; use graph_store_postgres::Shard; use graph_store_postgres::SubgraphStore; use graph_store_postgres::PRIMARY_SHARD; -fn site_and_conn( +async fn site_and_conn( pools: HashMap, search: &DeploymentSearch, -) -> Result<(Arc, PooledConnection>), anyhow::Error> { +) -> Result<(Arc, AsyncPgConnection), anyhow::Error> { let primary_pool = pools.get(&*PRIMARY_SHARD).unwrap(); - let locator = search.locate_unique(primary_pool)?; + let locator = search.locate_unique(primary_pool).await?; - let pconn = primary_pool.get()?; + let pconn = primary_pool.get().await?; let mut conn = store_catalog::Connection::new(pconn); let site = conn - .locate_site(locator)? + .locate_site(locator) + .await? .ok_or_else(|| anyhow!("deployment `{}` does not exist", search))?; let site = Arc::new(site); - let conn = pools.get(&site.shard).unwrap().get()?; + let conn = pools.get(&site.shard).unwrap().get().await?; Ok((site, conn)) } @@ -45,7 +44,7 @@ pub async fn account_like( search: &DeploymentSearch, table: String, ) -> Result<(), anyhow::Error> { - let locator = search.locate_unique(&primary_pool)?; + let locator = search.locate_unique(&primary_pool).await?; store.set_account_like(&locator, &table, !clear).await?; let clear_text = if clear { "cleared" } else { "set" }; @@ -92,31 +91,32 @@ pub fn show_stats( Ok(()) } -pub fn show( +pub async fn show( pools: HashMap, search: &DeploymentSearch, ) -> Result<(), anyhow::Error> { - let (site, mut conn) = site_and_conn(pools, search)?; + let (site, mut conn) = site_and_conn(pools, search).await?; - let catalog = store_catalog::Catalog::load(&mut conn, site.cheap_clone(), false, vec![])?; - let stats = catalog.stats(&mut conn)?; + let catalog = + store_catalog::Catalog::load(&mut conn, site.cheap_clone(), false, vec![]).await?; + let stats = catalog.stats(&mut conn).await?; - let account_like = store_catalog::account_like(&mut conn, &site)?; + let account_like = store_catalog::account_like(&mut conn, &site).await?; show_stats(stats.as_slice(), account_like) } -pub fn analyze( +pub async fn analyze( store: Arc, pool: ConnectionPool, search: DeploymentSearch, entity_name: Option<&str>, ) -> Result<(), anyhow::Error> { - let locator = search.locate_unique(&pool)?; - analyze_loc(store, &locator, entity_name) + let locator = search.locate_unique(&pool).await?; + analyze_loc(store, &locator, entity_name).await } -fn analyze_loc( +async fn analyze_loc( store: Arc, locator: &DeploymentLocator, entity_name: Option<&str>, @@ -125,16 +125,19 @@ fn analyze_loc( Some(entity_name) => println!("Analyzing table sgd{}.{entity_name}", locator.id), None => println!("Analyzing all tables for sgd{}", locator.id), } - store.analyze(locator, entity_name).map_err(|e| anyhow!(e)) + store + .analyze(locator, entity_name) + .await + .map_err(|e| anyhow!(e)) } -pub fn target( +pub async fn target( store: Arc, primary: ConnectionPool, search: &DeploymentSearch, ) -> Result<(), anyhow::Error> { - let locator = search.locate_unique(&primary)?; - let (default, targets) = store.stats_targets(&locator)?; + let locator = search.locate_unique(&primary).await?; + let (default, targets) = store.stats_targets(&locator).await?; let has_targets = targets .values() @@ -166,7 +169,7 @@ pub fn target( Ok(()) } -pub fn set_target( +pub async fn set_target( store: Arc, primary: ConnectionPool, search: &DeploymentSearch, @@ -181,12 +184,14 @@ pub fn set_target( columns }; - let locator = search.locate_unique(&primary)?; + let locator = search.locate_unique(&primary).await?; - store.set_stats_target(&locator, entity, columns, target)?; + store + .set_stats_target(&locator, entity, columns, target) + .await?; if !no_analyze { - analyze_loc(store, &locator, entity)?; + analyze_loc(store, &locator, entity).await?; } Ok(()) } diff --git a/node/src/manager/commands/txn_speed.rs b/node/src/manager/commands/txn_speed.rs index 480d4669a9f..766904cbe6b 100644 --- a/node/src/manager/commands/txn_speed.rs +++ b/node/src/manager/commands/txn_speed.rs @@ -1,17 +1,17 @@ -use diesel::PgConnection; use std::{collections::HashMap, thread::sleep, time::Duration}; +use diesel::dsl::sql; use graph::prelude::anyhow; -use graph_store_postgres::ConnectionPool; +use graph_store_postgres::{AsyncPgConnection, ConnectionPool}; use crate::manager::catalog; -pub fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> { - fn query(conn: &mut PgConnection) -> Result, anyhow::Error> { +pub async fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> { + async fn query(conn: &mut AsyncPgConnection) -> Result, anyhow::Error> { use catalog::pg_catalog::pg_stat_database as d; - use diesel::dsl::*; use diesel::sql_types::BigInt; - use diesel::{ExpressionMethods, QueryDsl, RunQueryDsl}; + use diesel::{ExpressionMethods, QueryDsl}; + use diesel_async::RunQueryDsl; let rows = d::table .filter(d::datname.eq_any(vec!["explorer", "graph"])) @@ -21,7 +21,8 @@ pub fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> { sql::("txid_current()::bigint"), )) //.select((d::datname)) - .load::<(Option, i64, i64)>(conn)?; + .load::<(Option, i64, i64)>(conn) + .await?; Ok(rows .into_iter() .map(|(datname, all_txn, write_txn)| { @@ -31,8 +32,8 @@ pub fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> { } let mut speeds = HashMap::new(); - let mut conn = pool.get()?; - for (datname, all_txn, write_txn) in query(&mut conn)? { + let mut conn = pool.get().await?; + for (datname, all_txn, write_txn) in query(&mut conn).await? { speeds.insert(datname, (all_txn, write_txn)); } println!( @@ -42,7 +43,7 @@ pub fn run(pool: ConnectionPool, delay: u64) -> Result<(), anyhow::Error> { sleep(Duration::from_secs(delay)); println!("Number of transactions/minute"); println!("{:10} {:>7} write", "database", "all"); - for (datname, all_txn, write_txn) in query(&mut conn)? { + for (datname, all_txn, write_txn) in query(&mut conn).await? { let (all_speed, write_speed) = speeds .get(&datname) .map(|(all_txn_old, write_txn_old)| { diff --git a/node/src/manager/commands/unused_deployments.rs b/node/src/manager/commands/unused_deployments.rs index e8a6e14a1da..5690a7485af 100644 --- a/node/src/manager/commands/unused_deployments.rs +++ b/node/src/manager/commands/unused_deployments.rs @@ -29,7 +29,7 @@ fn add_row(list: &mut List, deployment: UnusedDeployment) { ]) } -pub fn list( +pub async fn list( store: Arc, existing: bool, deployment: Option, @@ -44,7 +44,7 @@ pub fn list( }, }; - for deployment in store.list_unused_deployments(filter)? { + for deployment in store.list_unused_deployments(filter).await? { add_row(&mut list, deployment); } @@ -57,13 +57,13 @@ pub fn list( Ok(()) } -pub fn record(store: Arc) -> Result<(), Error> { +pub async fn record(store: Arc) -> Result<(), Error> { let mut list = make_list(); println!("Recording unused deployments. This might take a while."); - let recorded = store.record_unused_deployments()?; + let recorded = store.record_unused_deployments().await?; - for unused in store.list_unused_deployments(unused::Filter::New)? { + for unused in store.list_unused_deployments(unused::Filter::New).await? { if recorded.iter().any(|r| r.subgraph == unused.deployment) { add_row(&mut list, unused); } @@ -75,7 +75,7 @@ pub fn record(store: Arc) -> Result<(), Error> { Ok(()) } -pub fn remove( +pub async fn remove( store: Arc, count: usize, deployment: Option<&str>, @@ -85,7 +85,7 @@ pub fn remove( Some(duration) => unused::Filter::UnusedLongerThan(duration), None => unused::Filter::New, }; - let unused = store.list_unused_deployments(filter)?; + let unused = store.list_unused_deployments(filter).await?; let unused = match &deployment { None => unused, Some(deployment) => unused @@ -123,7 +123,7 @@ pub fn remove( } let start = Instant::now(); - match store.remove_deployment(deployment.id) { + match store.remove_deployment(deployment.id).await { Ok(()) => { println!( "done removing {} from {} in {:.1}s\n", diff --git a/node/src/manager/deployment.rs b/node/src/manager/deployment.rs index a7cedbd33f2..eb5b575eb4e 100644 --- a/node/src/manager/deployment.rs +++ b/node/src/manager/deployment.rs @@ -2,8 +2,12 @@ use std::collections::HashSet; use std::fmt; use std::str::FromStr; -use diesel::{dsl::sql, prelude::*}; -use diesel::{sql_types::Text, PgConnection}; +use diesel::dsl::sql; +use diesel::sql_types::Text; +use diesel::{ + ExpressionMethods, JoinOnDsl, NullableExpressionMethods, PgTextExpressionMethods, QueryDsl, +}; +use diesel_async::RunQueryDsl; use graph::components::store::DeploymentId; use graph::{ @@ -12,7 +16,7 @@ use graph::{ }; use graph_store_postgres::command_support::catalog as store_catalog; use graph_store_postgres::unused; -use graph_store_postgres::ConnectionPool; +use graph_store_postgres::{AsyncPgConnection, ConnectionPool}; lazy_static! { // `Qm...` optionally follow by `:$shard` @@ -88,14 +92,14 @@ impl DeploymentSearch { } } - pub fn lookup(&self, primary: &ConnectionPool) -> Result, anyhow::Error> { - let mut conn = primary.get()?; - self.lookup_with_conn(&mut conn) + pub async fn lookup(&self, primary: &ConnectionPool) -> Result, anyhow::Error> { + let mut conn = primary.get().await?; + self.lookup_with_conn(&mut conn).await } - pub fn lookup_with_conn( + pub async fn lookup_with_conn( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result, anyhow::Error> { use store_catalog::deployment_schemas as ds; use store_catalog::subgraph as s; @@ -126,25 +130,25 @@ impl DeploymentSearch { let deployments: Vec = match self { DeploymentSearch::Name { name } => { let pattern = format!("%{}%", name); - query.filter(s::name.ilike(&pattern)).load(conn)? + query.filter(s::name.ilike(&pattern)).load(conn).await? } DeploymentSearch::Hash { hash, shard } => { let query = query.filter(ds::subgraph.eq(&hash)); match shard { - Some(shard) => query.filter(ds::shard.eq(shard)).load(conn)?, - None => query.load(conn)?, + Some(shard) => query.filter(ds::shard.eq(shard)).load(conn).await?, + None => query.load(conn).await?, } } DeploymentSearch::Deployment { namespace } => { - query.filter(ds::name.eq(&namespace)).load(conn)? + query.filter(ds::name.eq(&namespace)).load(conn).await? } - DeploymentSearch::All => query.load(conn)?, + DeploymentSearch::All => query.load(conn).await?, }; Ok(deployments) } /// Finds all [`Deployment`]s for this [`DeploymentSearch`]. - pub fn find( + pub async fn find( &self, pool: ConnectionPool, current: bool, @@ -154,7 +158,7 @@ impl DeploymentSearch { let current = current || used; let pending = pending || used; - let deployments = self.lookup(&pool)?; + let deployments = self.lookup(&pool).await?; // Filter by status; if neither `current` or `pending` are set, list // all deployments let deployments: Vec<_> = deployments @@ -170,9 +174,10 @@ impl DeploymentSearch { } /// Finds a single deployment locator for the given deployment identifier. - pub fn locate_unique(&self, pool: &ConnectionPool) -> anyhow::Result { + pub async fn locate_unique(&self, pool: &ConnectionPool) -> anyhow::Result { let mut locators: Vec = HashSet::::from_iter( - self.lookup(pool)? + self.lookup(pool) + .await? .into_iter() .map(|deployment| deployment.locator()), ) diff --git a/node/src/network_setup.rs b/node/src/network_setup.rs index d086c786f82..63cfe8097b4 100644 --- a/node/src/network_setup.rs +++ b/node/src/network_setup.rs @@ -401,7 +401,7 @@ impl Networks { &self, config: &Arc, logger: &Logger, - store: Arc, + store: BlockStore, logger_factory: &LoggerFactory, metrics_registry: Arc, chain_head_update_listener: Arc, diff --git a/node/src/store_builder.rs b/node/src/store_builder.rs index e1d1d38635f..d54a64f5b06 100644 --- a/node/src/store_builder.rs +++ b/node/src/store_builder.rs @@ -173,7 +173,7 @@ impl StoreBuilder { (store, pools, coord) } - pub fn make_store( + pub async fn make_store( logger: &Logger, pools: HashMap, subgraph_store: Arc, @@ -192,18 +192,18 @@ impl StoreBuilder { let logger = logger.new(o!("component" => "BlockStore")); let chain_store_metrics = Arc::new(ChainStoreMetrics::new(registry)); - let block_store = Arc::new( - DieselBlockStore::new( - logger, - networks, - pools, - subgraph_store.notification_sender(), - chain_store_metrics, - ) - .expect("Creating the BlockStore works"), - ); + let block_store = DieselBlockStore::new( + logger, + networks, + pools, + subgraph_store.notification_sender(), + chain_store_metrics, + ) + .await + .expect("Creating the BlockStore works"); block_store .update_db_version() + .await .expect("Updating `db_version` should work"); Arc::new(DieselStore::new(subgraph_store, block_store)) @@ -292,7 +292,7 @@ impl StoreBuilder { /// Return a store that combines both a `Store` for subgraph data /// and a `BlockStore` for all chain related data - pub fn network_store(self, networks: Vec>) -> Arc { + pub async fn network_store(self, networks: Vec>) -> Arc { Self::make_store( &self.logger, self.pools, @@ -301,6 +301,7 @@ impl StoreBuilder { networks.into_iter().map(Into::into).collect(), self.registry, ) + .await } pub fn subscription_manager(&self) -> Arc { diff --git a/runtime/test/Cargo.toml b/runtime/test/Cargo.toml index be03619a7a9..60311f1d8c4 100644 --- a/runtime/test/Cargo.toml +++ b/runtime/test/Cargo.toml @@ -4,6 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] +async-trait = { workspace = true } semver = "1.0" wasmtime.workspace = true graph = { path = "../../graph" } diff --git a/runtime/test/src/test.rs b/runtime/test/src/test.rs index f2db34af862..4e65c236500 100644 --- a/runtime/test/src/test.rs +++ b/runtime/test/src/test.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use graph::blockchain::BlockTime; use graph::components::metrics::gas::GasMetrics; use graph::components::store::*; @@ -403,12 +404,12 @@ async fn test_json_conversions(api_version: Version, gas_used: u64) { assert_eq!(module.gas_used(), gas_used); } -#[tokio::test] +#[graph::test] async fn json_conversions_v0_0_4() { test_json_conversions(API_VERSION_0_0_4, 52976429).await; } -#[tokio::test] +#[graph::test] async fn json_conversions_v0_0_5() { test_json_conversions(API_VERSION_0_0_5, 2289897).await; } @@ -449,12 +450,12 @@ async fn test_json_parsing(api_version: Version, gas_used: u64) { assert_eq!(output, "ERROR: true"); } -#[tokio::test] +#[graph::test] async fn json_parsing_v0_0_4() { test_json_parsing(API_VERSION_0_0_4, 4373087).await; } -#[tokio::test] +#[graph::test] async fn json_parsing_v0_0_5() { test_json_parsing(API_VERSION_0_0_5, 5153540).await; } @@ -477,17 +478,17 @@ async fn test_ipfs_cat(api_version: Version) { assert_eq!(data, "42"); } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_cat_v0_0_4() { test_ipfs_cat(API_VERSION_0_0_4).await; } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_cat_v0_0_5() { test_ipfs_cat(API_VERSION_0_0_5).await; } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn test_ipfs_block() { let fut = add_files_to_local_ipfs_node_for_testing(["42".as_bytes().to_vec()]); let hash = fut.await.unwrap()[0].hash.to_owned(); @@ -566,7 +567,8 @@ async fn run_ipfs_map( .take_ctx() .take_state() .entity_cache - .as_modifications(0)? + .as_modifications(0) + .await? .modifications; // Bring the modifications into a predictable order (by entity_id) @@ -633,12 +635,12 @@ async fn test_ipfs_map(api_version: Version, json_error_msg: &str) { assert!(format!("{err:?}").contains("invalid CID")); } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_map_v0_0_4() { test_ipfs_map(API_VERSION_0_0_4, "JSON value is not a string.").await; } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_map_v0_0_5() { test_ipfs_map(API_VERSION_0_0_5, "'id' should not be null").await; } @@ -661,12 +663,12 @@ async fn test_ipfs_fail(api_version: Version) { assert!(ptr.is_null()); } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_fail_v0_0_4() { test_ipfs_fail(API_VERSION_0_0_4).await; } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn ipfs_fail_v0_0_5() { test_ipfs_fail(API_VERSION_0_0_5).await; } @@ -691,12 +693,12 @@ async fn test_crypto_keccak256(api_version: Version) { ); } -#[tokio::test] +#[graph::test] async fn crypto_keccak256_v0_0_4() { test_crypto_keccak256(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn crypto_keccak256_v0_0_5() { test_crypto_keccak256(API_VERSION_0_0_5).await; } @@ -737,7 +739,7 @@ async fn test_big_int_to_hex(api_version: Version, gas_used: u64) { assert_eq!(instance.gas_used(), gas_used); } -#[tokio::test] +#[graph::test] async fn test_big_int_size_limit() { let mut module = test_module( "BigIntSizeLimit", @@ -767,12 +769,12 @@ async fn test_big_int_size_limit() { ); } -#[tokio::test] +#[graph::test] async fn big_int_to_hex_v0_0_4() { test_big_int_to_hex(API_VERSION_0_0_4, 53113760).await; } -#[tokio::test] +#[graph::test] async fn big_int_to_hex_v0_0_5() { test_big_int_to_hex(API_VERSION_0_0_5, 2858580).await; } @@ -833,12 +835,12 @@ async fn test_big_int_arithmetic(api_version: Version, gas_used: u64) { assert_eq!(module.gas_used(), gas_used); } -#[tokio::test] +#[graph::test] async fn big_int_arithmetic_v0_0_4() { test_big_int_arithmetic(API_VERSION_0_0_4, 54962411).await; } -#[tokio::test] +#[graph::test] async fn big_int_arithmetic_v0_0_5() { test_big_int_arithmetic(API_VERSION_0_0_5, 7318364).await; } @@ -863,7 +865,7 @@ async fn test_abort(api_version: Version, error_msg: &str) { assert!(format!("{err:?}").contains(error_msg)); } -#[tokio::test] +#[graph::test] async fn abort_v0_0_4() { test_abort( API_VERSION_0_0_4, @@ -872,7 +874,7 @@ async fn abort_v0_0_4() { .await; } -#[tokio::test] +#[graph::test] async fn abort_v0_0_5() { test_abort( API_VERSION_0_0_5, @@ -902,12 +904,12 @@ async fn test_bytes_to_base58(api_version: Version, gas_used: u64) { assert_eq!(module.gas_used(), gas_used); } -#[tokio::test] +#[graph::test] async fn bytes_to_base58_v0_0_4() { test_bytes_to_base58(API_VERSION_0_0_4, 52301689).await; } -#[tokio::test] +#[graph::test] async fn bytes_to_base58_v0_0_5() { test_bytes_to_base58(API_VERSION_0_0_5, 1310019).await; } @@ -971,12 +973,12 @@ async fn run_data_source_create( .drain_created_data_sources()) } -#[tokio::test] +#[graph::test] async fn data_source_create_v0_0_4() { test_data_source_create(API_VERSION_0_0_4, 152102833).await; } -#[tokio::test] +#[graph::test] async fn data_source_create_v0_0_5() { test_data_source_create(API_VERSION_0_0_5, 101450079).await; } @@ -994,7 +996,7 @@ async fn test_ens_name_by_hash(api_version: Version) { let hash = "0x7f0c1b04d1a4926f9c635a030eeb611d4c26e5e73291b32a1c7a4ac56935b5b3"; let name = "dealdrafts"; - test_store::insert_ens_name(hash, name); + test_store::insert_ens_name(hash, name).await; let converted: AscPtr = module.invoke_export1("nameByHash", hash).await; let data: String = module.asc_get(converted).unwrap(); assert_eq!(data, name); @@ -1005,12 +1007,12 @@ async fn test_ens_name_by_hash(api_version: Version) { .is_null()); } -#[tokio::test] +#[graph::test] async fn ens_name_by_hash_v0_0_4() { test_ens_name_by_hash(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn ens_name_by_hash_v0_0_5() { test_ens_name_by_hash(API_VERSION_0_0_5).await; } @@ -1026,7 +1028,7 @@ async fn test_entity_store(api_version: Version) { ) .await; - let schema = store.input_schema(&deployment.hash).unwrap(); + let schema = store.input_schema(&deployment.hash).await.unwrap(); let alex = entity! { schema => id: "alex", name: "Alex", vid: 0i64 }; let steve = entity! { schema => id: "steve", name: "Steve", vid: 1i64 }; @@ -1081,7 +1083,7 @@ async fn test_entity_store(api_version: Version) { &mut ctx.ctx.state.entity_cache, EntityCache::new(Arc::new(writable.clone())), ); - let mut mods = cache.as_modifications(0).unwrap().modifications; + let mut mods = cache.as_modifications(0).await.unwrap().modifications; assert_eq!(1, mods.len()); match mods.pop().unwrap() { EntityModification::Overwrite { data, .. } => { @@ -1102,6 +1104,7 @@ async fn test_entity_store(api_version: Version) { .take_state() .entity_cache .as_modifications(0) + .await .unwrap() .modifications; assert_eq!(1, mods.len()); @@ -1114,12 +1117,12 @@ async fn test_entity_store(api_version: Version) { }; } -#[tokio::test] +#[graph::test] async fn entity_store_v0_0_4() { test_entity_store(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn entity_store_v0_0_5() { test_entity_store(API_VERSION_0_0_5).await; } @@ -1147,12 +1150,12 @@ fn test_detect_contract_calls(api_version: Version) { ); } -#[tokio::test] +#[graph::test] async fn detect_contract_calls_v0_0_4() { test_detect_contract_calls(API_VERSION_0_0_4); } -#[tokio::test] +#[graph::test] async fn detect_contract_calls_v0_0_5() { test_detect_contract_calls(API_VERSION_0_0_5); } @@ -1175,7 +1178,7 @@ async fn test_allocate_global(api_version: Version) { .unwrap(); } -#[tokio::test] +#[graph::test] async fn allocate_global_v0_0_5() { // Only in apiVersion v0.0.5 because there's no issue in older versions. // The problem with the new one is related to the AS stub runtime `offset` @@ -1199,7 +1202,7 @@ async fn test_null_ptr_read(api_version: Version) -> Result<(), Error> { module.invoke_export0_void("nullPtrRead").await } -#[tokio::test] +#[graph::test] async fn null_ptr_read_0_0_5() { let err = test_null_ptr_read(API_VERSION_0_0_5).await.unwrap_err(); assert!( @@ -1223,7 +1226,7 @@ async fn test_safe_null_ptr_read(api_version: Version) -> Result<(), Error> { module.invoke_export0_void("safeNullPtrRead").await } -#[tokio::test] +#[graph::test] async fn safe_null_ptr_read_0_0_5() { let err = test_safe_null_ptr_read(API_VERSION_0_0_5) .await @@ -1236,14 +1239,14 @@ async fn safe_null_ptr_read_0_0_5() { } #[ignore] // Ignored because of long run time in debug build. -#[tokio::test] +#[graph::test] async fn test_array_blowup() { let mut module = test_module_latest("ArrayBlowup", "array_blowup.wasm").await; let err = module.invoke_export0_void("arrayBlowup").await.unwrap_err(); assert!(format!("{err:?}").contains("Gas limit exceeded. Used: 11286295575421")); } -#[tokio::test] +#[graph::test] async fn test_boolean() { let mut module = test_module_latest("boolean", "boolean.wasm").await; @@ -1282,7 +1285,7 @@ async fn test_boolean() { } } -#[tokio::test] +#[graph::test] async fn recursion_limit() { let mut module = test_module_latest("RecursionLimit", "recursion_limit.wasm").await; @@ -1350,17 +1353,17 @@ impl Host { } } - fn store_set( + async fn store_set( &mut self, entity_type: &str, id: &str, data: Vec<(&str, &str)>, ) -> Result<(), HostExportError> { let data: Vec<_> = data.into_iter().map(|(k, v)| (k, Value::from(v))).collect(); - self.store_setv(entity_type, id, data) + self.store_setv(entity_type, id, data).await } - fn store_setv( + async fn store_setv( &mut self, entity_type: &str, id: &str, @@ -1368,31 +1371,35 @@ impl Host { ) -> Result<(), HostExportError> { let id = String::from(id); let data = HashMap::from_iter(data.into_iter().map(|(k, v)| (Word::from(k), v))); - self.host_exports.store_set( - &self.ctx.logger, - 12, // Arbitrary block number - &mut self.ctx.state, - &self.ctx.proof_of_indexing, - entity_type.to_string(), - id, - data, - &self.stopwatch, - &self.gas, - ) + self.host_exports + .store_set( + &self.ctx.logger, + 12, // Arbitrary block number + &mut self.ctx.state, + &self.ctx.proof_of_indexing, + entity_type.to_string(), + id, + data, + &self.stopwatch, + &self.gas, + ) + .await } - fn store_get( + async fn store_get( &mut self, entity_type: &str, id: &str, ) -> Result>, anyhow::Error> { let user_id = String::from(id); - self.host_exports.store_get( - &mut self.ctx.state, - entity_type.to_string(), - user_id, - &self.gas, - ) + self.host_exports + .store_get( + &mut self.ctx.state, + entity_type.to_string(), + user_id, + &self.gas, + ) + .await } } @@ -1404,7 +1411,7 @@ fn err_says(err: E, exp: &str) { /// Test the various ways in which `store_set` sets the `id` of entities and /// errors when there are issues -#[tokio::test] +#[graph::test] async fn test_store_set_id() { const UID: &str = "u1"; const USER: &str = "User"; @@ -1424,17 +1431,20 @@ async fn test_store_set_id() { let mut host = Host::new(schema, "hostStoreSetId", "boolean.wasm", None).await; host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) + .await .expect("setting with same id works"); let err = host .store_set(USER, UID, vec![("id", "ux"), ("name", "user1")]) + .await .expect_err("setting with different id fails"); err_says(err, "conflicts with ID passed"); host.store_set(USER, UID, vec![("name", "user2")]) + .await .expect("setting with no id works"); - let entity = host.store_get(USER, UID).unwrap().unwrap(); + let entity = host.store_get(USER, UID).await.unwrap().unwrap(); assert_eq!( "u1", entity.id().to_string(), @@ -1444,6 +1454,7 @@ async fn test_store_set_id() { let beef = Value::Bytes("0xbeef".parse().unwrap()); let err = host .store_setv(USER, "0xbeef", vec![("id", beef)]) + .await .expect_err("setting with Bytes id fails"); err_says( err, @@ -1451,6 +1462,7 @@ async fn test_store_set_id() { ); host.store_setv(USER, UID, vec![("id", Value::Int(32))]) + .await .expect_err("id must be a string"); // @@ -1460,6 +1472,7 @@ async fn test_store_set_id() { let err = host .store_set(BINARY, BID, vec![("id", BID), ("name", "user1")]) + .await .expect_err("setting with string id in values fails"); err_says( err, @@ -1471,18 +1484,21 @@ async fn test_store_set_id() { BID, vec![("id", bid_bytes), ("name", Value::from("user1"))], ) + .await .expect("setting with bytes id in values works"); let beef = Value::Bytes("0xbeef".parse().unwrap()); let err = host .store_setv(BINARY, BID, vec![("id", beef)]) + .await .expect_err("setting with different id fails"); err_says(err, "conflicts with ID passed"); host.store_set(BINARY, BID, vec![("name", "user2")]) + .await .expect("setting with no id works"); - let entity = host.store_get(BINARY, BID).unwrap().unwrap(); + let entity = host.store_get(BINARY, BID).await.unwrap().unwrap(); assert_eq!( BID, entity.id().to_string(), @@ -1491,6 +1507,7 @@ async fn test_store_set_id() { let err = host .store_setv(BINARY, BID, vec![("id", Value::Int(32))]) + .await .expect_err("id must be Bytes"); err_says( err, @@ -1500,7 +1517,7 @@ async fn test_store_set_id() { /// Test setting fields that are not defined in the schema /// This should return an error -#[tokio::test] +#[graph::test] async fn test_store_set_invalid_fields() { const UID: &str = "u1"; const USER: &str = "User"; @@ -1525,6 +1542,7 @@ async fn test_store_set_invalid_fields() { .await; host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) + .await .unwrap(); let err = host @@ -1538,6 +1556,7 @@ async fn test_store_set_invalid_fields() { ("test2", "invalid_field"), ], ) + .await .err() .unwrap(); @@ -1552,6 +1571,7 @@ async fn test_store_set_invalid_fields() { UID, vec![("id", "u1"), ("name", "user1"), ("test3", "invalid_field")], ) + .await .err() .unwrap(); @@ -1577,6 +1597,7 @@ async fn test_store_set_invalid_fields() { ("test2", "invalid_field"), ], ) + .await .err() .is_none(); @@ -1584,7 +1605,7 @@ async fn test_store_set_invalid_fields() { } /// Test generating ids through `store_set` -#[tokio::test] +#[graph::test] async fn generate_id() { const AUTO: &str = "auto"; const INT8: &str = "Int8"; @@ -1607,16 +1628,24 @@ async fn generate_id() { // new id. Note that the types of the ids have an incorrect type, but // that doesn't matter since they get overwritten. host.store_set(INT8, AUTO, vec![("id", "u1"), ("name", "int1")]) + .await .expect("setting auto works"); host.store_set(INT8, AUTO, vec![("id", "u1"), ("name", "int2")]) + .await .expect("setting auto works"); host.store_set(BINARY, AUTO, vec![("id", "u1"), ("name", "bin1")]) + .await .expect("setting auto works"); host.store_set(BINARY, AUTO, vec![("id", "u1"), ("name", "bin2")]) + .await .expect("setting auto works"); let entity_cache = host.ctx.state.entity_cache; - let mods = entity_cache.as_modifications(12).unwrap().modifications; + let mods = entity_cache + .as_modifications(12) + .await + .unwrap() + .modifications; let id_map: HashMap<&str, Id> = HashMap::from_iter( vec![ ( @@ -1646,7 +1675,7 @@ async fn generate_id() { } } -#[tokio::test] +#[graph::test] async fn test_store_intf() { const UID: &str = "u1"; const USER: &str = "User"; @@ -1665,16 +1694,19 @@ async fn test_store_intf() { let mut host = Host::new(schema, "hostStoreSetIntf", "boolean.wasm", None).await; host.store_set(PERSON, UID, vec![("id", "u1"), ("name", "user1")]) + .await .expect_err("can not use store_set with an interface"); host.store_set(USER, UID, vec![("id", "u1"), ("name", "user1")]) + .await .expect("storing user works"); host.store_get(PERSON, UID) + .await .expect_err("store_get with interface does not work"); } -#[tokio::test] +#[graph::test] async fn test_store_ts() { const DATA: &str = "Data"; const STATS: &str = "Stats"; @@ -1711,6 +1743,7 @@ async fn test_store_ts() { ("amount", b20.clone()), ], ) + .await .expect("Setting 'Data' is allowed"); // This is very backhanded: we generate an id the same way that @@ -1718,11 +1751,16 @@ async fn test_store_ts() { let did = IdType::Int8.generate_id(12, 0).unwrap(); // Set overrides the user-supplied timestamp for timeseries - let data = host.store_get(DATA, &did.to_string()).unwrap().unwrap(); + let data = host + .store_get(DATA, &did.to_string()) + .await + .unwrap() + .unwrap(); assert_eq!(Some(&Value::from(block_time)), data.get("timestamp")); let err = host .store_setv(STATS, SID, vec![("amount", b20)]) + .await .expect_err("store_set must fail for aggregations"); err_says( err, @@ -1731,6 +1769,7 @@ async fn test_store_ts() { let err = host .store_get(STATS, SID) + .await .expect_err("store_get must fail for timeseries"); err_says( err, @@ -1797,12 +1836,12 @@ async fn test_yaml_parsing(api_version: Version, gas_used: u64) { assert_eq!(module.gas_used(), gas_used, "gas used"); } -#[tokio::test] +#[graph::test] async fn yaml_parsing_v0_0_4() { test_yaml_parsing(API_VERSION_0_0_4, 10462217077171).await; } -#[tokio::test] +#[graph::test] async fn yaml_parsing_v0_0_5() { test_yaml_parsing(API_VERSION_0_0_5, 10462245390665).await; } diff --git a/runtime/test/src/test/abi.rs b/runtime/test/src/test/abi.rs index 422bd25b2d1..add915f07e2 100644 --- a/runtime/test/src/test/abi.rs +++ b/runtime/test/src/test/abi.rs @@ -32,12 +32,12 @@ async fn test_unbounded_loop(api_version: Version) { ); } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn unbounded_loop_v0_0_4() { test_unbounded_loop(API_VERSION_0_0_4).await; } -#[tokio::test(flavor = "multi_thread")] +#[graph::test] async fn unbounded_loop_v0_0_5() { test_unbounded_loop(API_VERSION_0_0_5).await; } @@ -66,12 +66,12 @@ async fn test_unbounded_recursion(api_version: Version) { ); } -#[tokio::test] +#[graph::test] async fn unbounded_recursion_v0_0_4() { test_unbounded_recursion(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn unbounded_recursion_v0_0_5() { test_unbounded_recursion(API_VERSION_0_0_5).await; } @@ -110,12 +110,12 @@ async fn test_abi_array(api_version: Version, gas_used: u64) { ); } -#[tokio::test] +#[graph::test] async fn abi_array_v0_0_4() { test_abi_array(API_VERSION_0_0_4, 695935).await; } -#[tokio::test] +#[graph::test] async fn abi_array_v0_0_5() { test_abi_array(API_VERSION_0_0_5, 1636130).await; } @@ -140,12 +140,12 @@ async fn test_abi_subarray(api_version: Version) { assert_eq!(new_vec, vec![3]); } -#[tokio::test] +#[graph::test] async fn abi_subarray_v0_0_4() { test_abi_subarray(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn abi_subarray_v0_0_5() { test_abi_subarray(API_VERSION_0_0_5).await; } @@ -172,12 +172,12 @@ async fn test_abi_bytes_and_fixed_bytes(api_version: Version) { assert_eq!(new_vec, concated); } -#[tokio::test] +#[graph::test] async fn abi_bytes_and_fixed_bytes_v0_0_4() { test_abi_bytes_and_fixed_bytes(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn abi_bytes_and_fixed_bytes_v0_0_5() { test_abi_bytes_and_fixed_bytes(API_VERSION_0_0_5).await; } @@ -298,14 +298,14 @@ async fn test_abi_ethabi_token_identity(api_version: Version) { /// Test a roundtrip Token -> Payload -> Token identity conversion through asc, /// and assert the final token is the same as the starting one. -#[tokio::test] +#[graph::test] async fn abi_ethabi_token_identity_v0_0_4() { test_abi_ethabi_token_identity(API_VERSION_0_0_4).await; } /// Test a roundtrip Token -> Payload -> Token identity conversion through asc, /// and assert the final token is the same as the starting one. -#[tokio::test] +#[graph::test] async fn abi_ethabi_token_identity_v0_0_5() { test_abi_ethabi_token_identity(API_VERSION_0_0_5).await; } @@ -427,12 +427,12 @@ async fn test_abi_store_value(api_version: Version) { ); } -#[tokio::test] +#[graph::test] async fn abi_store_value_v0_0_4() { test_abi_store_value(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn abi_store_value_v0_0_5() { test_abi_store_value(API_VERSION_0_0_5).await; } @@ -461,12 +461,12 @@ async fn test_abi_h160(api_version: Version) { ) } -#[tokio::test] +#[graph::test] async fn abi_h160_v0_0_4() { test_abi_h160(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn abi_h160_v0_0_5() { test_abi_h160(API_VERSION_0_0_5).await; } @@ -487,12 +487,12 @@ async fn test_string(api_version: Version) { assert_eq!(doubled_string, string.repeat(2)); } -#[tokio::test] +#[graph::test] async fn string_v0_0_4() { test_string(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn string_v0_0_5() { test_string(API_VERSION_0_0_5).await; } @@ -527,12 +527,12 @@ async fn test_abi_big_int(api_version: Version) { assert_eq!(new_uint, new_uint_from_u256); } -#[tokio::test] +#[graph::test] async fn abi_big_int_v0_0_4() { test_abi_big_int(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn abi_big_int_v0_0_5() { test_abi_big_int(API_VERSION_0_0_5).await; } @@ -555,12 +555,12 @@ async fn test_big_int_to_string(api_version: Version) { assert_eq!(string, big_int_str); } -#[tokio::test] +#[graph::test] async fn big_int_to_string_v0_0_4() { test_big_int_to_string(API_VERSION_0_0_4).await; } -#[tokio::test] +#[graph::test] async fn big_int_to_string_v0_0_5() { test_big_int_to_string(API_VERSION_0_0_5).await; } @@ -590,7 +590,7 @@ async fn test_invalid_discriminant(api_version: Version) { // This should panic rather than exhibiting UB. It's hard to test for UB, but // when reproducing a SIGILL was observed which would be caught by this. -#[tokio::test] +#[graph::test] #[should_panic] async fn invalid_discriminant_v0_0_4() { test_invalid_discriminant(API_VERSION_0_0_4).await; @@ -598,7 +598,7 @@ async fn invalid_discriminant_v0_0_4() { // This should panic rather than exhibiting UB. It's hard to test for UB, but // when reproducing a SIGILL was observed which would be caught by this. -#[tokio::test] +#[graph::test] #[should_panic] async fn invalid_discriminant_v0_0_5() { test_invalid_discriminant(API_VERSION_0_0_5).await; diff --git a/runtime/test/src/test_padding.rs b/runtime/test/src/test_padding.rs index bf633d3dc73..ef750674178 100644 --- a/runtime/test/src/test_padding.rs +++ b/runtime/test/src/test_padding.rs @@ -1,5 +1,4 @@ use crate::protobuf; -use graph::prelude::tokio; use wasmtime::AsContextMut; use self::data::BadFixed; @@ -60,11 +59,12 @@ pub mod data { IndexForAscTypeId::UnitTestNetworkUnitTestTypeBool; } + use async_trait::async_trait; + use graph::runtime::HostExportError; pub use graph::runtime::{ asc_new, gas::GasCounter, AscHeap, AscIndexId, AscPtr, AscType, AscValue, DeterministicHostError, IndexForAscTypeId, ToAscObj, }; - use graph::{prelude::async_trait, runtime::HostExportError}; use graph_runtime_wasm::asc_abi::class::AscString; #[async_trait] @@ -144,22 +144,22 @@ pub mod data { } } -#[tokio::test] +#[graph::test] async fn test_v5_manual_padding_manualy_fixed_ok() { manual_padding_manualy_fixed_ok(super::test::API_VERSION_0_0_5).await } -#[tokio::test] +#[graph::test] async fn test_v4_manual_padding_manualy_fixed_ok() { manual_padding_manualy_fixed_ok(super::test::API_VERSION_0_0_4).await } -#[tokio::test] +#[graph::test] async fn test_v5_manual_padding_should_fail() { manual_padding_should_fail(super::test::API_VERSION_0_0_5).await } -#[tokio::test] +#[graph::test] async fn test_v4_manual_padding_should_fail() { manual_padding_should_fail(super::test::API_VERSION_0_0_4).await } diff --git a/runtime/wasm/Cargo.toml b/runtime/wasm/Cargo.toml index d82df81c164..e2260a7bb59 100644 --- a/runtime/wasm/Cargo.toml +++ b/runtime/wasm/Cargo.toml @@ -4,7 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] -async-trait = "0.1.50" +async-trait = { workspace = true } ethabi = "17.2" hex = "0.4.3" graph = { path = "../../graph" } diff --git a/runtime/wasm/src/host_exports.rs b/runtime/wasm/src/host_exports.rs index cdc6b5379d5..43e235c6299 100644 --- a/runtime/wasm/src/host_exports.rs +++ b/runtime/wasm/src/host_exports.rs @@ -222,7 +222,7 @@ impl HostExports { ))) } - pub(crate) fn store_set( + pub(crate) async fn store_set( &self, logger: &Logger, block: BlockNumber, @@ -337,12 +337,15 @@ impl HostExports { state.metrics.track_entity_write(&entity_type, &entity); - state.entity_cache.set( - key, - entity, - block, - Some(&mut state.write_capacity_remaining), - )?; + state + .entity_cache + .set( + key, + entity, + block, + Some(&mut state.write_capacity_remaining), + ) + .await?; Ok(()) } @@ -382,7 +385,7 @@ impl HostExports { Ok(()) } - pub(crate) fn store_get<'a>( + pub(crate) async fn store_get<'a>( &self, state: &'a mut BlockState, entity_type: String, @@ -396,7 +399,7 @@ impl HostExports { let store_key = entity_type.parse_key_in(entity_id, self.data_source.causality_region)?; self.check_entity_type_access(&store_key.entity_type)?; - let result = state.entity_cache.get(&store_key, scope)?; + let result = state.entity_cache.get(&store_key, scope).await?; Self::track_gas_and_ops( gas, @@ -415,7 +418,7 @@ impl HostExports { Ok(result) } - pub(crate) fn store_load_related( + pub(crate) async fn store_load_related( &self, state: &mut BlockState, entity_type: String, @@ -433,7 +436,7 @@ impl HostExports { }; self.check_entity_type_access(&store_key.entity_type)?; - let result = state.entity_cache.load_related(&store_key)?; + let result = state.entity_cache.load_related(&store_key).await?; Self::track_gas_and_ops( gas, @@ -1056,18 +1059,18 @@ impl HostExports { Ok(()) } - pub(crate) fn ens_name_by_hash( + pub(crate) async fn ens_name_by_hash( &self, hash: &str, gas: &GasCounter, state: &mut BlockState, ) -> Result, anyhow::Error> { Self::track_gas_and_ops(gas, state, gas::ENS_NAME_BY_HASH, "ens_name_by_hash")?; - Ok(self.ens_lookup.find_name(hash)?) + Ok(self.ens_lookup.find_name(hash).await?) } - pub(crate) fn is_ens_data_empty(&self) -> Result { - Ok(self.ens_lookup.is_table_empty()?) + pub(crate) async fn is_ens_data_empty(&self) -> Result { + Ok(self.ens_lookup.is_table_empty().await?) } pub(crate) fn log_log( @@ -1335,7 +1338,7 @@ pub mod test_support { } } - pub fn store_set( + pub async fn store_set( &self, logger: &Logger, block: BlockNumber, @@ -1347,21 +1350,23 @@ pub mod test_support { stopwatch: &StopwatchMetrics, gas: &GasCounter, ) -> Result<(), HostExportError> { - self.host_exports.store_set( - logger, - block, - state, - proof_of_indexing, - self.block_time, - entity_type, - entity_id, - data, - stopwatch, - gas, - ) + self.host_exports + .store_set( + logger, + block, + state, + proof_of_indexing, + self.block_time, + entity_type, + entity_id, + data, + stopwatch, + gas, + ) + .await } - pub fn store_get( + pub async fn store_get( &self, state: &mut BlockState, entity_type: String, @@ -1370,6 +1375,7 @@ pub mod test_support { ) -> Result>, anyhow::Error> { self.host_exports .store_get(state, entity_type, entity_id, gas, GetScope::Store) + .await } } } diff --git a/runtime/wasm/src/module/context.rs b/runtime/wasm/src/module/context.rs index 881d7eb6c88..9ecb04782ef 100644 --- a/runtime/wasm/src/module/context.rs +++ b/runtime/wasm/src/module/context.rs @@ -148,13 +148,15 @@ impl WasmInstanceContext<'_> { let entity_type: String = asc_get(self, entity_ptr, gas)?; let id: String = asc_get(self, id_ptr, gas)?; - let entity_option = host_exports.store_get( - &mut self.as_mut().ctx.state, - entity_type.clone(), - id.clone(), - gas, - scope, - )?; + let entity_option = host_exports + .store_get( + &mut self.as_mut().ctx.state, + entity_type.clone(), + id.clone(), + gas, + scope, + ) + .await?; if self.as_ref().ctx.instrument { debug!(self.as_ref().ctx.logger, "store_get"; @@ -172,7 +174,7 @@ impl WasmInstanceContext<'_> { } None => match &debug_fork { Some(fork) => { - let entity_option = fork.fetch(entity_type, id).map_err(|e| { + let entity_option = fork.fetch(entity_type, id).await.map_err(|e| { HostExportError::Unknown(anyhow!( "store_get: failed to fetch entity from the debug fork: {}", e @@ -265,18 +267,20 @@ impl WasmInstanceContext<'_> { let host_exports = self.as_ref().ctx.host_exports.cheap_clone(); let ctx = &mut self.as_mut().ctx; - host_exports.store_set( - &logger, - block_number, - &mut ctx.state, - &ctx.proof_of_indexing, - ctx.timestamp, - entity, - id, - data, - &stopwatch, - gas, - )?; + host_exports + .store_set( + &logger, + block_number, + &mut ctx.state, + &ctx.proof_of_indexing, + ctx.timestamp, + entity, + id, + data, + &stopwatch, + gas, + ) + .await?; Ok(()) } @@ -344,13 +348,15 @@ impl WasmInstanceContext<'_> { let id: String = asc_get(self, id_ptr, gas)?; let field: String = asc_get(self, field_ptr, gas)?; let host_exports = self.as_ref().ctx.host_exports.cheap_clone(); - let entities = host_exports.store_load_related( - &mut self.as_mut().ctx.state, - entity_type.clone(), - id.clone(), - field.clone(), - gas, - )?; + let entities = host_exports + .store_load_related( + &mut self.as_mut().ctx.state, + entity_type.clone(), + id.clone(), + field.clone(), + gas, + ) + .await?; let entities: Vec> = entities.into_iter().map(|entity| entity.sorted()).collect(); @@ -1100,8 +1106,10 @@ impl WasmInstanceContext<'_> { let hash: String = asc_get(self, hash_ptr, gas)?; let host_exports = self.as_ref().ctx.host_exports.cheap_clone(); let ctx = &mut self.as_mut().ctx; - let name = host_exports.ens_name_by_hash(&hash, gas, &mut ctx.state)?; - if name.is_none() && self.as_ref().ctx.host_exports.is_ens_data_empty()? { + let name = host_exports + .ens_name_by_hash(&hash, gas, &mut ctx.state) + .await?; + if name.is_none() && self.as_ref().ctx.host_exports.is_ens_data_empty().await? { return Err(anyhow!( "Missing ENS data: see https://github.com/graphprotocol/ens-rainbow" ) diff --git a/runtime/wasm/src/module/mod.rs b/runtime/wasm/src/module/mod.rs index 3b64451571d..86bf4055e5a 100644 --- a/runtime/wasm/src/module/mod.rs +++ b/runtime/wasm/src/module/mod.rs @@ -3,6 +3,7 @@ use std::mem::MaybeUninit; use anyhow::anyhow; use anyhow::Error; +use async_trait::async_trait; use graph::blockchain::Blockchain; use graph::data_source::subgraph; use graph::parking_lot::RwLock; diff --git a/server/graphman/Cargo.toml b/server/graphman/Cargo.toml index 231ef5e0828..1e5bd2db23e 100644 --- a/server/graphman/Cargo.toml +++ b/server/graphman/Cargo.toml @@ -21,6 +21,7 @@ tower-http = { workspace = true } [dev-dependencies] diesel = { workspace = true } +diesel-async = { workspace = true } lazy_static = { workspace = true } reqwest = { workspace = true } serde = { workspace = true } diff --git a/server/graphman/src/resolvers/deployment_mutation.rs b/server/graphman/src/resolvers/deployment_mutation.rs index bb1d91cfe4b..aa716c286d0 100644 --- a/server/graphman/src/resolvers/deployment_mutation.rs +++ b/server/graphman/src/resolvers/deployment_mutation.rs @@ -43,7 +43,7 @@ impl DeploymentMutation { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; - pause::run(&ctx, &deployment)?; + pause::run(&ctx, &deployment).await?; Ok(EmptyResponse::new()) } @@ -57,7 +57,7 @@ impl DeploymentMutation { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; - resume::run(&ctx, &deployment)?; + resume::run(&ctx, &deployment).await?; Ok(EmptyResponse::new()) } @@ -84,14 +84,14 @@ impl DeploymentMutation { /// Create a subgraph pub async fn create(&self, ctx: &Context<'_>, name: String) -> Result { let ctx = GraphmanContext::new(ctx)?; - create::run(&ctx, &name)?; + create::run(&ctx, &name).await?; Ok(EmptyResponse::new()) } /// Remove a subgraph pub async fn remove(&self, ctx: &Context<'_>, name: String) -> Result { let ctx = GraphmanContext::new(ctx)?; - remove::run(&ctx, &name)?; + remove::run(&ctx, &name).await?; Ok(EmptyResponse::new()) } @@ -104,7 +104,7 @@ impl DeploymentMutation { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; - unassign::run(&ctx, &deployment)?; + unassign::run(&ctx, &deployment).await?; Ok(EmptyResponse::new()) } @@ -119,7 +119,7 @@ impl DeploymentMutation { let ctx = GraphmanContext::new(ctx)?; let deployment = deployment.try_into()?; let node = NodeId::new(node.clone()).map_err(|()| anyhow!("illegal node id `{}`", node))?; - let reassign_result = reassign::run(&ctx, &deployment, &node)?; + let reassign_result = reassign::run(&ctx, &deployment, &node).await?; match reassign_result { ReassignResult::CompletedWithWarnings(warnings) => Ok( ReassignResponse::CompletedWithWarnings(CompletedWithWarnings::new(warnings)), diff --git a/server/graphman/src/resolvers/deployment_mutation/create.rs b/server/graphman/src/resolvers/deployment_mutation/create.rs index 0488c094535..3f8cc5d8e20 100644 --- a/server/graphman/src/resolvers/deployment_mutation/create.rs +++ b/server/graphman/src/resolvers/deployment_mutation/create.rs @@ -6,8 +6,8 @@ use graph_store_postgres::command_support::catalog; use crate::resolvers::context::GraphmanContext; use graphman::GraphmanError; -pub fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { - let primary_pool = ctx.primary_pool.get().map_err(GraphmanError::from)?; +pub async fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { + let primary_pool = ctx.primary_pool.get().await.map_err(GraphmanError::from)?; let mut catalog_conn = catalog::Connection::new(primary_pool); let name = match SubgraphName::new(name) { @@ -20,7 +20,7 @@ pub fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { } }; - catalog_conn.create_subgraph(&name)?; + catalog_conn.create_subgraph(&name).await?; Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_mutation/pause.rs b/server/graphman/src/resolvers/deployment_mutation/pause.rs index c16c505c178..71327e62f7e 100644 --- a/server/graphman/src/resolvers/deployment_mutation/pause.rs +++ b/server/graphman/src/resolvers/deployment_mutation/pause.rs @@ -6,8 +6,8 @@ use graphman::deployment::DeploymentSelector; use crate::resolvers::context::GraphmanContext; -pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { - let active_deployment = load_active_deployment(ctx.primary_pool.clone(), deployment); +pub async fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { + let active_deployment = load_active_deployment(ctx.primary_pool.clone(), deployment).await; match active_deployment { Ok(active_deployment) => { @@ -15,7 +15,8 @@ pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> ctx.primary_pool.clone(), ctx.notification_sender.clone(), active_deployment, - )?; + ) + .await?; } Err(PauseDeploymentError::AlreadyPaused(_)) => { return Ok(()); diff --git a/server/graphman/src/resolvers/deployment_mutation/reassign.rs b/server/graphman/src/resolvers/deployment_mutation/reassign.rs index 026ef94ed9f..8a1d3459479 100644 --- a/server/graphman/src/resolvers/deployment_mutation/reassign.rs +++ b/server/graphman/src/resolvers/deployment_mutation/reassign.rs @@ -8,13 +8,13 @@ use graphman::deployment::DeploymentSelector; use crate::resolvers::context::GraphmanContext; -pub fn run( +pub async fn run( ctx: &GraphmanContext, deployment: &DeploymentSelector, node: &NodeId, ) -> Result { - let deployment = load_deployment(ctx.primary_pool.clone(), deployment)?; - let curr_node = deployment.assigned_node(ctx.primary_pool.clone())?; + let deployment = load_deployment(ctx.primary_pool.clone(), deployment).await?; + let curr_node = deployment.assigned_node(ctx.primary_pool.clone()).await?; let reassign_result = reassign_deployment( ctx.primary_pool.clone(), @@ -22,6 +22,7 @@ pub fn run( &deployment, &node, curr_node, - )?; + ) + .await?; Ok(reassign_result) } diff --git a/server/graphman/src/resolvers/deployment_mutation/remove.rs b/server/graphman/src/resolvers/deployment_mutation/remove.rs index 0e5c02fea40..e889896b3aa 100644 --- a/server/graphman/src/resolvers/deployment_mutation/remove.rs +++ b/server/graphman/src/resolvers/deployment_mutation/remove.rs @@ -6,8 +6,8 @@ use graph_store_postgres::command_support::catalog; use crate::resolvers::context::GraphmanContext; use graphman::GraphmanError; -pub fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { - let primary_pool = ctx.primary_pool.get().map_err(GraphmanError::from)?; +pub async fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { + let primary_pool = ctx.primary_pool.get().await.map_err(GraphmanError::from)?; let mut catalog_conn = catalog::Connection::new(primary_pool); let name = match SubgraphName::new(name) { @@ -20,8 +20,10 @@ pub fn run(ctx: &GraphmanContext, name: &String) -> Result<()> { } }; - let changes = catalog_conn.remove_subgraph(name)?; - catalog_conn.send_store_event(&ctx.notification_sender, &StoreEvent::new(changes))?; + let changes = catalog_conn.remove_subgraph(name).await?; + catalog_conn + .send_store_event(&ctx.notification_sender, &StoreEvent::new(changes)) + .await?; Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_mutation/restart.rs b/server/graphman/src/resolvers/deployment_mutation/restart.rs index aa1241deb14..3f92b9403e7 100644 --- a/server/graphman/src/resolvers/deployment_mutation/restart.rs +++ b/server/graphman/src/resolvers/deployment_mutation/restart.rs @@ -17,7 +17,7 @@ pub async fn run_in_background( deployment: DeploymentSelector, delay_seconds: u64, ) -> Result { - let id = store.new_execution(CommandKind::RestartDeployment)?; + let id = store.new_execution(CommandKind::RestartDeployment).await?; graph::spawn(async move { let tracker = GraphmanExecutionTracker::new(store, id); @@ -25,10 +25,10 @@ pub async fn run_in_background( match result { Ok(()) => { - tracker.track_success().unwrap(); + tracker.track_success().await.unwrap(); } Err(err) => { - tracker.track_failure(format!("{err:#?}")).unwrap(); + tracker.track_failure(format!("{err:#?}")).await.unwrap(); } }; }); @@ -41,11 +41,11 @@ async fn run( deployment: &DeploymentSelector, delay_seconds: u64, ) -> Result<()> { - super::pause::run(ctx, deployment)?; + super::pause::run(ctx, deployment).await?; tokio::time::sleep(Duration::from_secs(delay_seconds)).await; - super::resume::run(ctx, deployment)?; + super::resume::run(ctx, deployment).await?; Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_mutation/resume.rs b/server/graphman/src/resolvers/deployment_mutation/resume.rs index 45fa30d5e7f..1a39426be43 100644 --- a/server/graphman/src/resolvers/deployment_mutation/resume.rs +++ b/server/graphman/src/resolvers/deployment_mutation/resume.rs @@ -5,14 +5,15 @@ use graphman::deployment::DeploymentSelector; use crate::resolvers::context::GraphmanContext; -pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { - let paused_deployment = load_paused_deployment(ctx.primary_pool.clone(), deployment)?; +pub async fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { + let paused_deployment = load_paused_deployment(ctx.primary_pool.clone(), deployment).await?; resume_paused_deployment( ctx.primary_pool.clone(), ctx.notification_sender.clone(), paused_deployment, - )?; + ) + .await?; Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_mutation/unassign.rs b/server/graphman/src/resolvers/deployment_mutation/unassign.rs index 4af620e8568..73598259df8 100644 --- a/server/graphman/src/resolvers/deployment_mutation/unassign.rs +++ b/server/graphman/src/resolvers/deployment_mutation/unassign.rs @@ -5,13 +5,14 @@ use graphman::deployment::DeploymentSelector; use crate::resolvers::context::GraphmanContext; -pub fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { - let deployment = load_assigned_deployment(ctx.primary_pool.clone(), deployment)?; +pub async fn run(ctx: &GraphmanContext, deployment: &DeploymentSelector) -> Result<()> { + let deployment = load_assigned_deployment(ctx.primary_pool.clone(), deployment).await?; unassign_deployment( ctx.primary_pool.clone(), ctx.notification_sender.clone(), deployment, - )?; + ) + .await?; Ok(()) } diff --git a/server/graphman/src/resolvers/deployment_query.rs b/server/graphman/src/resolvers/deployment_query.rs index 09d9d5bb792..8763f2d5f10 100644 --- a/server/graphman/src/resolvers/deployment_query.rs +++ b/server/graphman/src/resolvers/deployment_query.rs @@ -24,6 +24,6 @@ impl DeploymentQuery { When not provided, no additional version filter is applied.")] version: Option, ) -> Result> { - info::run(ctx, deployment, version) + info::run(ctx, deployment, version).await } } diff --git a/server/graphman/src/resolvers/deployment_query/info.rs b/server/graphman/src/resolvers/deployment_query/info.rs index b5f8c079b35..ce25da2522b 100644 --- a/server/graphman/src/resolvers/deployment_query/info.rs +++ b/server/graphman/src/resolvers/deployment_query/info.rs @@ -6,7 +6,7 @@ use crate::entities::DeploymentSelector; use crate::entities::DeploymentVersionSelector; use crate::resolvers::context::GraphmanContext; -pub fn run( +pub async fn run( ctx: &Context<'_>, deployment: Option, version: Option, @@ -27,13 +27,15 @@ pub fn run( ctx.primary_pool.clone(), &deployment, &version, - )?; + ) + .await?; let statuses = if load_status { graphman::commands::deployment::info::load_deployment_statuses( ctx.store.clone(), &deployments, - )? + ) + .await? } else { Default::default() }; diff --git a/server/graphman/src/resolvers/execution_query.rs b/server/graphman/src/resolvers/execution_query.rs index f0cded8ea97..44eaf3c9b81 100644 --- a/server/graphman/src/resolvers/execution_query.rs +++ b/server/graphman/src/resolvers/execution_query.rs @@ -17,7 +17,7 @@ impl ExecutionQuery { /// Returns all stored command execution data. pub async fn info(&self, ctx: &Context<'_>, id: ExecutionId) -> Result { let store = ctx.data::>()?.to_owned(); - let execution = store.load_execution(id.into())?; + let execution = store.load_execution(id.into()).await?; Ok(execution.try_into()?) } diff --git a/server/graphman/tests/deployment_mutation.rs b/server/graphman/tests/deployment_mutation.rs index 88f4a9a5180..8b87ad73263 100644 --- a/server/graphman/tests/deployment_mutation.rs +++ b/server/graphman/tests/deployment_mutation.rs @@ -515,7 +515,11 @@ fn graphql_can_reassign_deployment() { ) .await; - let node = SUBGRAPH_STORE.assigned_node(&locator).unwrap().unwrap(); + let node = SUBGRAPH_STORE + .assigned_node(&locator) + .await + .unwrap() + .unwrap(); let reassign = send_graphql_request( json!({ diff --git a/server/graphman/tests/deployment_query.rs b/server/graphman/tests/deployment_query.rs index ee66323716c..9d11cfc018f 100644 --- a/server/graphman/tests/deployment_query.rs +++ b/server/graphman/tests/deployment_query.rs @@ -58,7 +58,11 @@ fn graphql_returns_deployment_info() { .await; let namespace = format!("sgd{}", locator.id); - let node = SUBGRAPH_STORE.assigned_node(&locator).unwrap().unwrap(); + let node = SUBGRAPH_STORE + .assigned_node(&locator) + .await + .unwrap() + .unwrap(); let qs = STORE .query_store(QueryTarget::Deployment( locator.hash.clone(), diff --git a/server/graphman/tests/util/mod.rs b/server/graphman/tests/util/mod.rs index 61201dd708c..7fe6893fcd6 100644 --- a/server/graphman/tests/util/mod.rs +++ b/server/graphman/tests/util/mod.rs @@ -4,18 +4,14 @@ pub mod server; use std::future::Future; use std::sync::Mutex; +use graph::TEST_RUNTIME; use lazy_static::lazy_static; use test_store::store::remove_subgraphs; use test_store::store::PRIMARY_POOL; -use tokio::runtime::Builder; -use tokio::runtime::Runtime; lazy_static! { // Used to make sure tests will run sequentially. static ref SEQ_MUX: Mutex<()> = Mutex::new(()); - - // One runtime helps share the same server between the tests. - static ref RUNTIME: Runtime = Builder::new_current_thread().enable_all().build().unwrap(); } pub fn run_test(test: T) @@ -25,22 +21,23 @@ where { let _lock = SEQ_MUX.lock().unwrap_or_else(|err| err.into_inner()); - cleanup_graphman_command_executions_table(); - remove_subgraphs(); + TEST_RUNTIME.block_on(async { + cleanup_graphman_command_executions_table().await; + remove_subgraphs().await; - RUNTIME.block_on(async { server::start().await; test().await; }); } -fn cleanup_graphman_command_executions_table() { - use diesel::prelude::*; +async fn cleanup_graphman_command_executions_table() { + use diesel_async::RunQueryDsl; - let mut conn = PRIMARY_POOL.get().unwrap(); + let mut conn = PRIMARY_POOL.get().await.unwrap(); diesel::sql_query("truncate table public.graphman_command_executions;") .execute(&mut conn) + .await .expect("truncate is successful"); } diff --git a/server/http/Cargo.toml b/server/http/Cargo.toml index 4cf34a851c1..3c1e4b9058c 100644 --- a/server/http/Cargo.toml +++ b/server/http/Cargo.toml @@ -4,6 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] +async-trait = { workspace = true } serde = { workspace = true } graph = { path = "../../graph" } graph-graphql = { path = "../../graphql" } diff --git a/server/http/src/service.rs b/server/http/src/service.rs index 8e2237b86ff..f68debbb19e 100644 --- a/server/http/src/service.rs +++ b/server/http/src/service.rs @@ -395,6 +395,7 @@ where #[cfg(test)] mod tests { + use async_trait::async_trait; use graph::data::value::{Object, Word}; use graph::http_body_util::{BodyExt, Full}; use graph::hyper::body::Bytes; @@ -451,7 +452,7 @@ mod tests { } } - #[tokio::test] + #[graph::test] async fn querying_not_found_routes_responds_correctly() { let logger = Logger::root(slog::Discard, o!()); let graphql_runner = Arc::new(TestGraphQlRunner); @@ -482,7 +483,7 @@ mod tests { assert_eq!(json.unwrap(), serde_json::json!({"message": "Not found"})); } - #[tokio::test] + #[graph::test] async fn posting_invalid_query_yields_error_response() { let logger = Logger::root(slog::Discard, o!()); let subgraph_id = USERS.clone(); @@ -514,7 +515,7 @@ mod tests { assert_eq!(message, response.to_string()); } - #[tokio::test(flavor = "multi_thread")] + #[graph::test] async fn posting_valid_queries_yields_result_response() { let logger = Logger::root(slog::Discard, o!()); let subgraph_id = USERS.clone(); diff --git a/server/http/tests/response.rs b/server/http/tests/response.rs index 7167a096457..63e94509aca 100644 --- a/server/http/tests/response.rs +++ b/server/http/tests/response.rs @@ -3,7 +3,7 @@ use graph::data::{graphql::object, query::QueryResults}; use graph::prelude::*; use graph_server_http::test_utils; -#[tokio::test] +#[graph::test] async fn generates_200_for_query_results() { let data = Object::from_iter([]); let query_result = QueryResults::from(data).as_http_response(); @@ -11,7 +11,7 @@ async fn generates_200_for_query_results() { test_utils::assert_successful_response(query_result).await; } -#[tokio::test] +#[graph::test] async fn generates_valid_json_for_an_empty_result() { let data = Object::from_iter([]); let query_result = QueryResults::from(data).as_http_response(); diff --git a/server/http/tests/server.rs b/server/http/tests/server.rs index 08d5a41f363..e0c6860d324 100644 --- a/server/http/tests/server.rs +++ b/server/http/tests/server.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use graph::http::StatusCode; use std::time::Duration; @@ -157,7 +158,7 @@ mod test { } } - #[tokio::test] + #[graph::test] async fn rejects_empty_json() { let logger = Logger::root(slog::Discard, o!()); let logger_factory = LoggerFactory::new(logger, None, Arc::new(MetricsRegistry::mock())); @@ -189,7 +190,7 @@ mod test { assert_eq!(message, "{\"error\":\"GraphQL server error (client error): The \\\"query\\\" field is missing in request data\"}"); } - #[tokio::test] + #[graph::test] async fn rejects_invalid_queries() { let logger = Logger::root(slog::Discard, o!()); let logger_factory = LoggerFactory::new(logger, None, Arc::new(MetricsRegistry::mock())); @@ -259,7 +260,7 @@ mod test { assert_eq!(column, 1); } - #[tokio::test] + #[graph::test] async fn accepts_valid_queries() { let logger = Logger::root(slog::Discard, o!()); let logger_factory = LoggerFactory::new(logger, None, Arc::new(MetricsRegistry::mock())); @@ -296,7 +297,7 @@ mod test { assert_eq!(name, "Jordi".to_string()); } - #[tokio::test] + #[graph::test] async fn accepts_valid_queries_with_variables() { let logger = Logger::root(slog::Discard, o!()); let logger_factory = LoggerFactory::new(logger, None, Arc::new(MetricsRegistry::mock())); diff --git a/server/index-node/Cargo.toml b/server/index-node/Cargo.toml index 57feb1267b8..103cba19f96 100644 --- a/server/index-node/Cargo.toml +++ b/server/index-node/Cargo.toml @@ -4,6 +4,7 @@ version.workspace = true edition.workspace = true [dependencies] +async-trait = { workspace = true } blake3 = "1.8" graph = { path = "../../graph" } graph-graphql = { path = "../../graphql" } diff --git a/server/index-node/src/explorer.rs b/server/index-node/src/explorer.rs index da7d6354076..8bcd39d7f8b 100644 --- a/server/index-node/src/explorer.rs +++ b/server/index-node/src/explorer.rs @@ -48,25 +48,25 @@ where } } - pub fn handle(&self, logger: &Logger, req: &[&str]) -> ServerResult { + pub async fn handle(&self, logger: &Logger, req: &[&str]) -> ServerResult { match req { - ["subgraph-versions", subgraph_id] => self.handle_subgraph_versions(subgraph_id), - ["subgraph-version", version] => self.handle_subgraph_version(version), - ["subgraph-repo", version] => self.handle_subgraph_repo(version), - ["entity-count", deployment] => self.handle_entity_count(logger, deployment), + ["subgraph-versions", subgraph_id] => self.handle_subgraph_versions(subgraph_id).await, + ["subgraph-version", version] => self.handle_subgraph_version(version).await, + ["subgraph-repo", version] => self.handle_subgraph_repo(version).await, + ["entity-count", deployment] => self.handle_entity_count(logger, deployment).await, ["subgraphs-for-deployment", deployment_hash] => { - self.handle_subgraphs_for_deployment(deployment_hash) + self.handle_subgraphs_for_deployment(deployment_hash).await } _ => handle_not_found(), } } - fn handle_subgraph_versions(&self, subgraph_id: &str) -> ServerResult { + async fn handle_subgraph_versions(&self, subgraph_id: &str) -> ServerResult { if let Some(value) = self.versions.get(subgraph_id) { return Ok(as_http_response(value.as_ref())); } - let (current, pending) = self.store.versions_for_subgraph_id(subgraph_id)?; + let (current, pending) = self.store.versions_for_subgraph_id(subgraph_id).await?; let value = object! { currentVersion: current, @@ -78,8 +78,8 @@ where Ok(resp) } - fn handle_subgraph_version(&self, version: &str) -> ServerResult { - let vi = self.version_info(version)?; + async fn handle_subgraph_version(&self, version: &str) -> ServerResult { + let vi = self.version_info(version).await?; let latest_ethereum_block_number = vi.latest_ethereum_block_number; let total_ethereum_blocks_count = vi.total_ethereum_blocks_count; @@ -98,8 +98,8 @@ where Ok(as_http_response(&value)) } - fn handle_subgraph_repo(&self, version: &str) -> ServerResult { - let vi = self.version_info(version)?; + async fn handle_subgraph_repo(&self, version: &str) -> ServerResult { + let vi = self.version_info(version).await?; let value = object! { createdAt: vi.created_at.as_str(), @@ -109,7 +109,7 @@ where Ok(as_http_response(&value)) } - fn handle_entity_count(&self, logger: &Logger, deployment: &str) -> ServerResult { + async fn handle_entity_count(&self, logger: &Logger, deployment: &str) -> ServerResult { let start = Instant::now(); let count = self.entity_counts.get(deployment); if start.elapsed() > ENV_VARS.explorer_lock_threshold { @@ -130,7 +130,8 @@ where let start = Instant::now(); let infos = self .store - .status(status::Filter::Deployments(vec![deployment.to_string()]))?; + .status(status::Filter::Deployments(vec![deployment.to_string()])) + .await?; if start.elapsed() > ENV_VARS.explorer_query_threshold { warn!(logger, "Getting entity_count takes too long"; "action" => "query_status", @@ -167,21 +168,22 @@ where Ok(resp) } - fn version_info(&self, version: &str) -> Result, ServerError> { + async fn version_info(&self, version: &str) -> Result, ServerError> { match self.version_infos.get(version) { Some(vi) => Ok(vi), None => { - let vi = Arc::new(self.store.version_info(version)?); + let vi = Arc::new(self.store.version_info(version).await?); self.version_infos.set(version.to_string(), vi.clone()); Ok(vi) } } } - fn handle_subgraphs_for_deployment(&self, deployment_hash: &str) -> ServerResult { + async fn handle_subgraphs_for_deployment(&self, deployment_hash: &str) -> ServerResult { let name_version_pairs: Vec = self .store - .subgraphs_for_deployment_hash(deployment_hash)? + .subgraphs_for_deployment_hash(deployment_hash) + .await? .into_iter() .map(|(name, version)| { object! { diff --git a/server/index-node/src/resolver.rs b/server/index-node/src/resolver.rs index f1b5b4ecab6..ee85c3cc0ac 100644 --- a/server/index-node/src/resolver.rs +++ b/server/index-node/src/resolver.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use std::convert::TryInto; +use async_trait::async_trait; use graph::data::query::Trace; use graph::data::store::Id; use graph::schema::EntityType; @@ -122,7 +123,10 @@ impl IndexNodeResolver { } } - fn resolve_indexing_statuses(&self, field: &a::Field) -> Result { + async fn resolve_indexing_statuses( + &self, + field: &a::Field, + ) -> Result { let deployments = field .argument_value("subgraphs") .map(|value| match value { @@ -143,11 +147,12 @@ impl IndexNodeResolver { let infos = self .store - .status(status::Filter::Deployments(deployments))?; + .status(status::Filter::Deployments(deployments)) + .await?; Ok(infos.into_value()) } - fn resolve_indexing_statuses_for_subgraph_name( + async fn resolve_indexing_statuses_for_subgraph_name( &self, field: &a::Field, ) -> Result { @@ -166,12 +171,13 @@ impl IndexNodeResolver { let infos = self .store - .status(status::Filter::SubgraphName(subgraph_name))?; + .status(status::Filter::SubgraphName(subgraph_name)) + .await?; Ok(infos.into_value()) } - fn resolve_entity_changes_in_block( + async fn resolve_entity_changes_in_block( &self, field: &a::Field, ) -> Result { @@ -186,7 +192,8 @@ impl IndexNodeResolver { let entity_changes = self .store .subgraph_store() - .entity_changes_in_block(&subgraph_id, block_number)?; + .entity_changes_in_block(&subgraph_id, block_number) + .await?; Ok(entity_changes_to_graphql(entity_changes)) } @@ -200,7 +207,7 @@ impl IndexNodeResolver { .get_required::("blockHash") .expect("Valid blockHash required"); - let chain_store = if let Some(cs) = self.store.block_store().chain_store(&network) { + let chain_store = if let Some(cs) = self.store.block_store().chain_store(&network).await { cs } else { error!( @@ -309,7 +316,7 @@ impl IndexNodeResolver { }; let block_ptr = BlockPtr::new(block_hash.cheap_clone(), block_number); - let calls = match call_cache.get_calls_in_block(block_ptr) { + let calls = match call_cache.get_calls_in_block(block_ptr).await { Ok(c) => c, Err(e) => { error!( @@ -443,7 +450,7 @@ impl IndexNodeResolver { Ok(r::Value::List(public_poi_results)) } - fn resolve_indexing_status_for_version( + async fn resolve_indexing_status_for_version( &self, field: &a::Field, @@ -460,10 +467,13 @@ impl IndexNodeResolver { "current_version" => current_version, ); - let infos = self.store.status(status::Filter::SubgraphVersion( - subgraph_name, - current_version, - ))?; + let infos = self + .store + .status(status::Filter::SubgraphVersion( + subgraph_name, + current_version, + )) + .await?; Ok(infos .into_iter() @@ -766,7 +776,7 @@ impl Resolver for IndexNodeResolver { self.store.query_permit().await } - fn prefetch( + async fn prefetch( &self, _: &ExecutionContext, _: &a::SelectionSet, @@ -811,10 +821,11 @@ impl Resolver for IndexNodeResolver { // Resolves the `field.name` top-level field. match (prefetched_objects, object_type.name(), field.name.as_str()) { (None, "SubgraphIndexingStatus", "indexingStatuses") => { - self.resolve_indexing_statuses(field) + self.resolve_indexing_statuses(field).await } (None, "SubgraphIndexingStatus", "indexingStatusesForSubgraphName") => { self.resolve_indexing_statuses_for_subgraph_name(field) + .await } (None, "CachedEthereumCall", "cachedEthereumCalls") => { self.resolve_cached_ethereum_calls(field).await @@ -840,13 +851,13 @@ impl Resolver for IndexNodeResolver { // Resolves the `field.name` top-level field. match (prefetched_object, field.name.as_str()) { (None, "indexingStatusForCurrentVersion") => { - self.resolve_indexing_status_for_version(field, true) + self.resolve_indexing_status_for_version(field, true).await } (None, "indexingStatusForPendingVersion") => { - self.resolve_indexing_status_for_version(field, false) + self.resolve_indexing_status_for_version(field, false).await } (None, "subgraphFeatures") => self.resolve_subgraph_features(field).await, - (None, "entityChangesInBlock") => self.resolve_entity_changes_in_block(field), + (None, "entityChangesInBlock") => self.resolve_entity_changes_in_block(field).await, // The top-level `subgraphVersions` field (None, "apiVersions") => self.resolve_api_versions(field), (None, "version") => self.version(), diff --git a/server/index-node/src/service.rs b/server/index-node/src/service.rs index d07d9b9e5e3..cfacfa18881 100644 --- a/server/index-node/src/service.rs +++ b/server/index-node/src/service.rs @@ -229,7 +229,9 @@ where } (Method::OPTIONS, ["graphql"]) => Ok(Self::handle_graphql_options(req)), - (Method::GET, ["explorer", rest @ ..]) => self.explorer.handle(&self.logger, rest), + (Method::GET, ["explorer", rest @ ..]) => { + self.explorer.handle(&self.logger, rest).await + } _ => Ok(Self::handle_not_found()), } diff --git a/store/postgres/Cargo.toml b/store/postgres/Cargo.toml index f05c0862778..dba4587433b 100644 --- a/store/postgres/Cargo.toml +++ b/store/postgres/Cargo.toml @@ -4,11 +4,13 @@ version.workspace = true edition.workspace = true [dependencies] -async-trait = "0.1.50" +async-trait = { workspace = true } blake3 = "1.8" chrono = { workspace = true } +deadpool = { workspace = true } derive_more = { version = "2.0.1", features = ["full"] } diesel = { workspace = true } +diesel-async = { workspace = true } diesel-dynamic-schema = { workspace = true } diesel-derive-enum = { workspace = true } diesel_derives = { workspace = true } @@ -27,6 +29,8 @@ rand.workspace = true serde = { workspace = true } serde_json = { workspace = true } stable-hash_legacy = { git = "https://github.com/graphprotocol/stable-hash", branch = "old", package = "stable-hash" } +tokio = { workspace = true } +tokio-stream = { workspace = true } anyhow = "1.0.100" git-testament = "0.2.6" itertools = "0.14.0" diff --git a/store/postgres/src/advisory_lock.rs b/store/postgres/src/advisory_lock.rs index 85e2cf5a4ae..e012f08e82a 100644 --- a/store/postgres/src/advisory_lock.rs +++ b/store/postgres/src/advisory_lock.rs @@ -14,12 +14,14 @@ //! * 2, n: to lock the deployment with id n to make sure only one write //! happens to it +use diesel::sql_query; use diesel::sql_types::Bool; -use diesel::{sql_query, PgConnection, RunQueryDsl}; +use diesel_async::RunQueryDsl; use graph::prelude::StoreError; use crate::command_support::catalog::Site; use crate::primary::DeploymentId; +use crate::AsyncPgConnection; /// A locking scope for a particular deployment. We use different scopes for /// different purposes, and in each scope we use an advisory lock for each @@ -31,7 +33,11 @@ struct Scope { impl Scope { /// Try to lock the deployment in this scope with the given id. Return /// `true` if we got the lock, and `false` if it is already locked. - fn try_lock(&self, conn: &mut PgConnection, id: DeploymentId) -> Result { + async fn try_lock( + &self, + conn: &mut AsyncPgConnection, + id: DeploymentId, + ) -> Result { #[derive(QueryableByName)] struct Locked { #[diesel(sql_type = Bool)] @@ -43,23 +49,30 @@ impl Scope { self.id )) .get_result::(conn) + .await .map(|res| res.locked) .map_err(StoreError::from) } /// Lock the deployment in this scope with the given id. Blocks until we /// can get the lock - fn lock(&self, conn: &mut PgConnection, id: DeploymentId) -> Result<(), StoreError> { + async fn lock(&self, conn: &mut AsyncPgConnection, id: DeploymentId) -> Result<(), StoreError> { sql_query(format!("select pg_advisory_lock({}, {id})", self.id)) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } /// Unlock the deployment in this scope with the given id. - fn unlock(&self, conn: &mut PgConnection, id: DeploymentId) -> Result<(), StoreError> { + async fn unlock( + &self, + conn: &mut AsyncPgConnection, + id: DeploymentId, + ) -> Result<(), StoreError> { sql_query(format!("select pg_advisory_unlock({}, {id})", self.id)) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } @@ -73,65 +86,85 @@ const PRUNE: Scope = Scope { id: 3 }; /// it is done. This is used to make sure that only one node runs setup at a /// time. pub(crate) async fn with_migration_lock( - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, f: F, ) -> Result where - F: FnOnce(&mut PgConnection) -> Fut, + F: FnOnce(&mut AsyncPgConnection) -> Fut, Fut: std::future::Future>, { - fn execute(conn: &mut PgConnection, query: &str, msg: &str) -> Result<(), StoreError> { - sql_query(query).execute(conn).map(|_| ()).map_err(|e| { - StoreError::from_diesel_error(&e) - .unwrap_or_else(|| StoreError::Unknown(anyhow::anyhow!("{}: {}", msg, e))) - }) + async fn execute( + conn: &mut AsyncPgConnection, + query: &str, + msg: &str, + ) -> Result<(), StoreError> { + sql_query(query) + .execute(conn) + .await + .map(|_| ()) + .map_err(|e| { + StoreError::from_diesel_error(&e) + .unwrap_or_else(|| StoreError::Unknown(anyhow::anyhow!("{}: {}", msg, e))) + }) } const LOCK: &str = "select pg_advisory_lock(1)"; const UNLOCK: &str = "select pg_advisory_unlock(1)"; - execute(conn, LOCK, "failed to acquire migration lock")?; + execute(conn, LOCK, "failed to acquire migration lock").await?; let res = f(conn).await; - execute(conn, UNLOCK, "failed to release migration lock")?; + execute(conn, UNLOCK, "failed to release migration lock").await?; res } /// Take the lock used to keep two copy operations to run simultaneously on /// the same deployment. Block until we can get the lock -pub(crate) fn lock_copying(conn: &mut PgConnection, dst: &Site) -> Result<(), StoreError> { - COPY.lock(conn, dst.id) +pub(crate) async fn lock_copying( + conn: &mut AsyncPgConnection, + dst: &Site, +) -> Result<(), StoreError> { + COPY.lock(conn, dst.id).await } /// Release the lock acquired with `lock_copying`. -pub(crate) fn unlock_copying(conn: &mut PgConnection, dst: &Site) -> Result<(), StoreError> { - COPY.unlock(conn, dst.id) +pub(crate) async fn unlock_copying( + conn: &mut AsyncPgConnection, + dst: &Site, +) -> Result<(), StoreError> { + COPY.unlock(conn, dst.id).await } /// Take the lock used to keep two operations from writing to the deployment /// simultaneously. Return `true` if we got the lock, and `false` if we did /// not. You don't want to use this directly. Instead, use /// `deployment::with_lock` -pub(crate) fn lock_deployment_session( - conn: &mut PgConnection, +pub(crate) async fn lock_deployment_session( + conn: &mut AsyncPgConnection, site: &Site, ) -> Result { - WRITE.try_lock(conn, site.id) + WRITE.try_lock(conn, site.id).await } /// Release the lock acquired with `lock_deployment_session`. -pub(crate) fn unlock_deployment_session( - conn: &mut PgConnection, +pub(crate) async fn unlock_deployment_session( + conn: &mut AsyncPgConnection, site: &Site, ) -> Result<(), StoreError> { - WRITE.unlock(conn, site.id) + WRITE.unlock(conn, site.id).await } /// Try to take the lock used to prevent two prune operations from running at the /// same time. Return `true` if we got the lock, and `false` otherwise. -pub(crate) fn try_lock_pruning(conn: &mut PgConnection, site: &Site) -> Result { - PRUNE.try_lock(conn, site.id) +pub(crate) async fn try_lock_pruning( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result { + PRUNE.try_lock(conn, site.id).await } -pub(crate) fn unlock_pruning(conn: &mut PgConnection, site: &Site) -> Result<(), StoreError> { - PRUNE.unlock(conn, site.id) +pub(crate) async fn unlock_pruning( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result<(), StoreError> { + PRUNE.unlock(conn, site.id).await } diff --git a/store/postgres/src/block_store.rs b/store/postgres/src/block_store.rs index c3754c399af..e677623fbcc 100644 --- a/store/postgres/src/block_store.rs +++ b/store/postgres/src/block_store.rs @@ -5,14 +5,13 @@ use std::{ }; use anyhow::anyhow; -use diesel::{ - query_dsl::methods::FilterDsl as _, - r2d2::{ConnectionManager, PooledConnection}, - sql_query, ExpressionMethods as _, PgConnection, RunQueryDsl, -}; +use async_trait::async_trait; +use diesel::{sql_query, ExpressionMethods as _, QueryDsl}; +use diesel_async::{scoped_futures::ScopedFutureExt, RunQueryDsl}; use graph::{ blockchain::ChainIdentifier, components::store::{BlockStore as BlockStoreTrait, QueryPermit}, + derive::CheapClone, prelude::{error, info, BlockNumber, BlockPtr, Logger, ENV_VARS}, slog::o, }; @@ -28,7 +27,7 @@ use crate::{ chain_store::{ChainStoreMetrics, Storage}, pool::ConnectionPool, primary::Mirror as PrimaryMirror, - ChainStore, NotificationSender, Shard, PRIMARY_SHARD, + AsyncPgConnection, ChainStore, NotificationSender, Shard, PRIMARY_SHARD, }; use self::primary::Chain; @@ -51,18 +50,15 @@ pub enum ChainStatus { pub mod primary { use std::convert::TryFrom; - use diesel::{ - delete, insert_into, - r2d2::{ConnectionManager, PooledConnection}, - update, ExpressionMethods, OptionalExtension, PgConnection, QueryDsl, RunQueryDsl, - }; + use diesel::{delete, insert_into, update, ExpressionMethods, OptionalExtension, QueryDsl}; + use diesel_async::RunQueryDsl; use graph::{ blockchain::{BlockHash, ChainIdentifier}, internal_error, prelude::StoreError, }; - use crate::chain_store::Storage; + use crate::{chain_store::Storage, AsyncPgConnection}; use crate::{ConnectionPool, Shard}; table! { @@ -107,19 +103,23 @@ pub mod primary { } } - pub fn load_chains(conn: &mut PgConnection) -> Result, StoreError> { - Ok(chains::table.load(conn)?) + pub async fn load_chains(conn: &mut AsyncPgConnection) -> Result, StoreError> { + Ok(chains::table.load(conn).await?) } - pub fn find_chain(conn: &mut PgConnection, name: &str) -> Result, StoreError> { + pub async fn find_chain( + conn: &mut AsyncPgConnection, + name: &str, + ) -> Result, StoreError> { Ok(chains::table .filter(chains::name.eq(name)) .first(conn) + .await .optional()?) } - pub fn add_chain( - conn: &mut PooledConnection>, + pub async fn add_chain( + conn: &mut AsyncPgConnection, name: &str, shard: &Shard, ident: ChainIdentifier, @@ -138,8 +138,12 @@ pub mod primary { )) .returning(chains::namespace) .get_result::(conn) + .await .map_err(StoreError::from)?; - return Ok(chains::table.filter(chains::name.eq(name)).first(conn)?); + return Ok(chains::table + .filter(chains::name.eq(name)) + .first(conn) + .await?); } insert_into(chains::table) @@ -151,37 +155,33 @@ pub mod primary { )) .returning(chains::namespace) .get_result::(conn) + .await .map_err(StoreError::from)?; - Ok(chains::table.filter(chains::name.eq(name)).first(conn)?) + Ok(chains::table + .filter(chains::name.eq(name)) + .first(conn) + .await?) } - pub(super) fn drop_chain(pool: &ConnectionPool, name: &str) -> Result<(), StoreError> { - let mut conn = pool.get()?; + pub(super) async fn drop_chain(pool: &ConnectionPool, name: &str) -> Result<(), StoreError> { + let mut conn = pool.get().await?; - delete(chains::table.filter(chains::name.eq(name))).execute(&mut conn)?; + delete(chains::table.filter(chains::name.eq(name))) + .execute(&mut conn) + .await?; Ok(()) } // update chain name where chain name is 'name' - pub fn update_chain_name( - conn: &mut PooledConnection>, + pub async fn update_chain_name( + conn: &mut AsyncPgConnection, name: &str, new_name: &str, ) -> Result<(), StoreError> { update(chains::table.filter(chains::name.eq(name))) .set(chains::name.eq(new_name)) - .execute(conn)?; - Ok(()) - } - - pub fn update_chain_genesis_hash( - conn: &mut PooledConnection>, - name: &str, - hash: BlockHash, - ) -> Result<(), StoreError> { - update(chains::table.filter(chains::name.eq(name))) - .set(chains::genesis_block_hash.eq(hash.hash_hex())) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } } @@ -206,7 +206,20 @@ pub mod primary { /// not possible to change its configuration, in particular, the database /// shard and namespace, and the genesis block and net version must not /// change between runs of `graph-node` +#[derive(Clone, CheapClone)] pub struct BlockStore { + inner: Arc, +} + +impl std::ops::Deref for BlockStore { + type Target = Inner; + + fn deref(&self) -> &Self::Target { + &self.inner + } +} + +pub struct Inner { logger: Logger, /// Map chain names to the corresponding store. This map is updated /// dynamically with new chains if an operation would require a chain @@ -235,7 +248,7 @@ impl BlockStore { /// Each entry in `chains` gives the chain name, the network identifier, /// and the name of the database shard for the chain. The `ChainStore` for /// a chain uses the pool from `pools` for the given shard. - pub fn new( + pub async fn new( logger: Logger, // (network, shard) shards: Vec<(String, Shard)>, @@ -249,11 +262,13 @@ impl BlockStore { const CHAIN_HEAD_CACHE_TTL: Duration = Duration::from_secs(2); let mirror = PrimaryMirror::new(&pools); - let existing_chains = mirror.read(|conn| primary::load_chains(conn))?; + let existing_chains = mirror + .read_async(|conn| primary::load_chains(conn).scope_boxed()) + .await?; let chain_head_cache = TimedCache::new(CHAIN_HEAD_CACHE_TTL); let chains = shards.clone(); - let block_store = Self { + let inner = Arc::new(Inner { logger, stores: RwLock::new(HashMap::new()), shards, @@ -262,7 +277,8 @@ impl BlockStore { mirror, chain_head_cache, chain_store_metrics, - }; + }); + let block_store = Self { inner }; /// Check that the configuration for `chain` hasn't changed so that /// it is ok to ingest from it @@ -297,7 +313,7 @@ impl BlockStore { } else { ChainStatus::ReadOnly }; - block_store.add_chain_store(chain, status, false)?; + block_store.add_chain_store(chain, status, false).await?; } None => {} }; @@ -316,7 +332,9 @@ impl BlockStore { .iter() .filter(|chain| !configured_chains.contains(&chain.name)) { - block_store.add_chain_store(chain, ChainStatus::ReadOnly, false)?; + block_store + .add_chain_store(chain, ChainStatus::ReadOnly, false) + .await?; } Ok(block_store) } @@ -325,8 +343,8 @@ impl BlockStore { self.mirror.primary().query_permit().await } - pub fn allocate_chain( - conn: &mut PooledConnection>, + pub async fn allocate_chain( + conn: &mut AsyncPgConnection, name: &String, shard: &Shard, ident: &ChainIdentifier, @@ -338,8 +356,9 @@ impl BlockStore { } // Fetch the current last_value from the sequence - let result = - sql_query("SELECT last_value FROM chains_id_seq").get_result::(conn)?; + let result = sql_query("SELECT last_value FROM chains_id_seq") + .get_result::(conn) + .await?; let last_val = result.last_value; @@ -360,7 +379,7 @@ impl BlockStore { Ok(chain) } - pub fn add_chain_store( + pub async fn add_chain_store( &self, chain: &primary::Chain, status: ChainStatus, @@ -389,7 +408,7 @@ impl BlockStore { self.chain_store_metrics.clone(), ); if create { - store.create(&ident)?; + store.create(&ident).await?; } let store = Arc::new(store); self.stores @@ -402,18 +421,18 @@ impl BlockStore { /// Return a map from network name to the network's chain head pointer. /// The information is cached briefly since this method is used heavily /// by the indexing status API - pub fn chain_head_pointers(&self) -> Result, StoreError> { + pub async fn chain_head_pointers(&self) -> Result, StoreError> { let mut map = HashMap::new(); for (shard, pool) in &self.pools { let cached = match self.chain_head_cache.get(shard.as_str()) { Some(cached) => cached, None => { - let mut conn = match pool.get() { + let mut conn = match pool.get().await { Ok(conn) => conn, Err(StoreError::DatabaseUnavailable) => continue, Err(e) => return Err(e), }; - let heads = Arc::new(ChainStore::chain_head_pointers(&mut conn)?); + let heads = Arc::new(ChainStore::chain_head_pointers(&mut conn).await?); self.chain_head_cache.set(shard.to_string(), heads.clone()); heads } @@ -427,26 +446,38 @@ impl BlockStore { Ok(map) } - pub fn chain_head_block(&self, chain: &str) -> Result, StoreError> { + pub async fn chain_head_block(&self, chain: &str) -> Result, StoreError> { let store = self .store(chain) + .await .ok_or_else(|| internal_error!("unknown network `{}`", chain))?; - store.chain_head_block(chain) + store.chain_head_block(chain).await } - fn lookup_chain<'a>(&'a self, chain: &'a str) -> Result>, StoreError> { + async fn lookup_chain(&self, chain: &str) -> Result>, StoreError> { // See if we have that chain in the database even if it wasn't one // of the configured chains - self.mirror.read(|conn| { - primary::find_chain(conn, chain).and_then(|chain| { - chain - .map(|chain| self.add_chain_store(&chain, ChainStatus::ReadOnly, false)) - .transpose() + let chain = chain.to_string(); + let this = self.cheap_clone(); + self.mirror + .read_async(|conn| { + async { + match primary::find_chain(conn, &chain).await? { + Some(chain) => { + let chain_store = this + .add_chain_store(&chain, ChainStatus::ReadOnly, false) + .await?; + Ok(Some(chain_store)) + } + None => Ok(None), + } + } + .scope_boxed() }) - }) + .await } - fn store(&self, chain: &str) -> Option> { + async fn store(&self, chain: &str) -> Option> { let store = self .stores .read() @@ -460,28 +491,40 @@ impl BlockStore { // suppress errors here since it will be very rare that we look up // a chain from the database as most of them will be set up when // the block store is created - self.lookup_chain(chain).unwrap_or_else(|e| { + self.lookup_chain(chain).await.unwrap_or_else(|e| { error!(&self.logger, "Error getting chain from store"; "network" => chain, "error" => e.to_string()); None }) } - pub fn drop_chain(&self, chain: &str) -> Result<(), StoreError> { + pub async fn drop_chain(&self, chain: &str) -> Result<(), StoreError> { let chain_store = self .store(chain) + .await .ok_or_else(|| internal_error!("unknown chain {}", chain))?; // Delete from the primary first since that's where // deployment_schemas has a fk constraint on chains - primary::drop_chain(self.mirror.primary(), chain)?; + primary::drop_chain(self.mirror.primary(), chain).await?; - chain_store.drop_chain()?; + chain_store.drop_chain().await?; self.stores.write().unwrap().remove(chain); Ok(()) } + // Helper to clone the list of chain stores to avoid holding the lock + // while awaiting + fn stores(&self) -> Vec> { + self.stores + .read() + .unwrap() + .values() + .map(CheapClone::cheap_clone) + .collect() + } + // cleanup_ethereum_shallow_blocks will delete cached blocks previously produced by firehose on // an ethereum chain that is not currently configured to use firehose provider. // @@ -496,49 +539,53 @@ impl BlockStore { // hit on graph-node startup. // // Discussed here: https://github.com/graphprotocol/graph-node/pull/4790 - pub fn cleanup_ethereum_shallow_blocks( + pub async fn cleanup_ethereum_shallow_blocks( &self, eth_rpc_only_nets: Vec, ) -> Result<(), StoreError> { - for store in self.stores.read().unwrap().values() { + for store in self.stores() { if !eth_rpc_only_nets.contains(&&store.chain) { continue; }; - if let Some(head_block) = store.remove_cursor(&&store.chain)? { + if let Some(head_block) = store.remove_cursor(&&store.chain).await? { let lower_bound = head_block.saturating_sub(ENV_VARS.reorg_threshold() * 2); info!(&self.logger, "Removed cursor for non-firehose chain, now cleaning shallow blocks"; "network" => &store.chain, "lower_bound" => lower_bound); - store.cleanup_shallow_blocks(lower_bound)?; + store.cleanup_shallow_blocks(lower_bound).await?; } } Ok(()) } - fn truncate_block_caches(&self) -> Result<(), StoreError> { - for store in self.stores.read().unwrap().values() { - store.truncate_block_cache()? + async fn truncate_block_caches(&self) -> Result<(), StoreError> { + for store in self.stores() { + store.truncate_block_cache().await?; } Ok(()) } - pub fn update_db_version(&self) -> Result<(), StoreError> { + pub async fn update_db_version(&self) -> Result<(), StoreError> { use crate::primary::db_version as dbv; - use diesel::prelude::*; let primary_pool = self.pools.get(&*PRIMARY_SHARD).unwrap(); - let mut conn = primary_pool.get()?; - let version: i64 = dbv::table.select(dbv::version).get_result(&mut conn)?; + let mut conn = primary_pool.get().await?; + let version: i64 = dbv::table + .select(dbv::version) + .get_result(&mut conn) + .await?; if version < 3 { - self.truncate_block_caches()?; + self.truncate_block_caches().await?; diesel::update(dbv::table) .set(dbv::version.eq(3)) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; }; if version < SUPPORTED_DB_VERSION { // Bump it to make sure that all executables are working with the same DB format diesel::update(dbv::table) .set(dbv::version.eq(SUPPORTED_DB_VERSION)) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; }; if version > SUPPORTED_DB_VERSION { panic!( @@ -549,40 +596,19 @@ impl BlockStore { Ok(()) } - /// Updates the chains table of the primary shard. This table is replicated to other shards and - /// has to be refreshed afterwards for the update to be reflected. - pub fn set_chain_identifier( - &self, - chain_id: ChainName, - ident: &ChainIdentifier, - ) -> Result<(), StoreError> { - use primary::chains as c; - - let primary_pool = self.pools.get(&*PRIMARY_SHARD).unwrap(); - let mut conn = primary_pool.get()?; - - diesel::update(c::table.filter(c::name.eq(chain_id.as_str()))) - .set(( - c::genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), - c::net_version.eq(&ident.net_version), - )) - .execute(&mut conn)?; - - Ok(()) - } - pub fn create_chain_store( + pub async fn create_chain_store( &self, network: &str, ident: ChainIdentifier, ) -> anyhow::Result> { - match self.store(network) { + match self.store(network).await { Some(chain_store) => { return Ok(chain_store); } None => {} } - let mut conn = self.mirror.primary().get()?; + let mut conn = self.mirror.primary().get().await?; let shard = self .shards .iter() @@ -594,30 +620,37 @@ impl BlockStore { } }) .ok_or_else(|| anyhow!("unable to find shard for network {}", network))?; - let chain = primary::add_chain(&mut conn, &network, &shard, ident)?; + let chain = primary::add_chain(&mut conn, &network, &shard, ident).await?; self.add_chain_store(&chain, ChainStatus::Ingestible, true) + .await .map_err(anyhow::Error::from) } } +#[async_trait] impl BlockStoreTrait for BlockStore { type ChainStore = ChainStore; - fn chain_store(&self, network: &str) -> Option> { - self.store(network) + async fn chain_store(&self, network: &str) -> Option> { + self.store(network).await } } +#[async_trait] impl ChainIdStore for BlockStore { - fn chain_identifier(&self, chain_name: &ChainName) -> Result { + async fn chain_identifier( + &self, + chain_name: &ChainName, + ) -> Result { let chain_store = self .chain_store(&chain_name) + .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; - chain_store.chain_identifier() + chain_store.chain_identifier().await } - fn set_chain_identifier( + async fn set_chain_identifier( &self, chain_name: &ChainName, ident: &ChainIdentifier, @@ -627,20 +660,22 @@ impl ChainIdStore for BlockStore { // Update the block shard first since that contains a copy from the primary let chain_store = self .chain_store(&chain_name) + .await .ok_or_else(|| anyhow!("unable to get store for chain '{chain_name}'"))?; - chain_store.set_chain_identifier(ident)?; + chain_store.set_chain_identifier(ident).await?; // Update the master copy in the primary let primary_pool = self.pools.get(&*PRIMARY_SHARD).unwrap(); - let mut conn = primary_pool.get()?; + let mut conn = primary_pool.get().await?; diesel::update(c::table.filter(c::name.eq(chain_name.as_str()))) .set(( c::genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), c::net_version.eq(&ident.net_version), )) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; Ok(()) } diff --git a/store/postgres/src/catalog.rs b/store/postgres/src/catalog.rs index 6b7f184cab2..0f7dc065733 100644 --- a/store/postgres/src/catalog.rs +++ b/store/postgres/src/catalog.rs @@ -1,11 +1,12 @@ +use diesel::select; +use diesel::sql_query; use diesel::sql_types::{Bool, Integer}; -use diesel::{connection::SimpleConnection, prelude::RunQueryDsl, select}; use diesel::{insert_into, OptionalExtension}; -use diesel::{pg::PgConnection, sql_query}; use diesel::{ sql_types::{Array, BigInt, Double, Nullable, Text}, ExpressionMethods, QueryDsl, }; +use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; use graph::components::store::VersionStats; use graph::prelude::BlockNumber; use graph::schema::EntityType; @@ -22,6 +23,7 @@ use graph::{ prelude::{lazy_static, StoreError, BLOCK_NUMBER_MAX}, }; +use crate::AsyncPgConnection; use crate::{ block_range::BLOCK_RANGE_COLUMN, pool::ForeignServer, @@ -127,7 +129,7 @@ pub struct Locale { impl Locale { /// Load locale information for current database - pub fn load(conn: &mut PgConnection) -> Result { + pub async fn load(conn: &mut AsyncPgConnection) -> Result { use diesel::dsl::sql; use pg_database as db; @@ -138,7 +140,8 @@ impl Locale { db::datctype, sql::("pg_encoding_to_char(encoding)::text"), )) - .get_result::<(String, String, String)>(conn)?; + .get_result::<(String, String, String)>(conn) + .await?; Ok(Locale { collate, ctype, @@ -196,16 +199,16 @@ pub struct Catalog { impl Catalog { /// Load the catalog for an existing subgraph - pub fn load( - conn: &mut PgConnection, + pub async fn load( + conn: &mut AsyncPgConnection, site: Arc, use_bytea_prefix: bool, entities_with_causality_region: Vec, ) -> Result { - let text_columns = get_text_columns(conn, &site.namespace)?; - let use_poi = supports_proof_of_indexing(conn, &site.namespace)?; - let has_minmax_multi_ops = has_minmax_multi_ops(conn)?; - let pg_stats_has_range_bounds_histogram = pg_stats_has_range_bounds_histogram(conn)?; + let text_columns = get_text_columns(conn, &site.namespace).await?; + let use_poi = supports_proof_of_indexing(conn, &site.namespace).await?; + let has_minmax_multi_ops = has_minmax_multi_ops(conn).await?; + let pg_stats_has_range_bounds_histogram = pg_stats_has_range_bounds_histogram(conn).await?; Ok(Catalog { site, @@ -219,13 +222,13 @@ impl Catalog { } /// Return a new catalog suitable for creating a new subgraph - pub fn for_creation( - conn: &mut PgConnection, + pub async fn for_creation( + conn: &mut AsyncPgConnection, site: Arc, entities_with_causality_region: BTreeSet, ) -> Result { - let has_minmax_multi_ops = has_minmax_multi_ops(conn)?; - let pg_stats_has_range_bounds_histogram = pg_stats_has_range_bounds_histogram(conn)?; + let has_minmax_multi_ops = has_minmax_multi_ops(conn).await?; + let pg_stats_has_range_bounds_histogram = pg_stats_has_range_bounds_histogram(conn).await?; Ok(Catalog { site, @@ -281,7 +284,10 @@ impl Catalog { } } - pub fn stats(&self, conn: &mut PgConnection) -> Result, StoreError> { + pub async fn stats( + &self, + conn: &mut AsyncPgConnection, + ) -> Result, StoreError> { #[derive(Queryable, QueryableByName)] pub struct DbStats { #[diesel(sql_type = BigInt)] @@ -318,8 +324,8 @@ impl Catalog { upper: Vec, } - fn block_range_histogram( - conn: &mut PgConnection, + async fn block_range_histogram( + conn: &mut AsyncPgConnection, namespace: &Namespace, ) -> Result, StoreError> { let query = format!( @@ -333,7 +339,8 @@ impl Catalog { ); let result = sql_query(query) .bind::(namespace.as_str()) - .get_results::(conn)?; + .get_results::(conn) + .await?; Ok(result) } @@ -368,10 +375,11 @@ impl Catalog { .bind::(self.site.id) .bind::(self.site.namespace.as_str()) .load::(conn) + .await .map_err(StoreError::from)?; let mut range_histogram = if self.pg_stats_has_range_bounds_histogram { - block_range_histogram(conn, &self.site.namespace)? + block_range_histogram(conn, &self.site.namespace).await? } else { vec![] }; @@ -399,8 +407,8 @@ impl Catalog { } } -fn get_text_columns( - conn: &mut PgConnection, +async fn get_text_columns( + conn: &mut AsyncPgConnection, namespace: &Namespace, ) -> Result>, StoreError> { const QUERY: &str = " @@ -418,7 +426,8 @@ fn get_text_columns( let map: HashMap> = diesel::sql_query(QUERY) .bind::(namespace.as_str()) - .load::(conn)? + .load::(conn) + .await? .into_iter() .fold(HashMap::new(), |mut map, col| { map.entry(col.table_name) @@ -429,8 +438,8 @@ fn get_text_columns( Ok(map) } -pub fn table_exists( - conn: &mut PgConnection, +pub async fn table_exists( + conn: &mut AsyncPgConnection, namespace: &str, table: &SqlName, ) -> Result { @@ -445,28 +454,30 @@ pub fn table_exists( let result: Vec = diesel::sql_query(query) .bind::(namespace) .bind::(table.as_str()) - .load(conn)?; + .load(conn) + .await?; Ok(!result.is_empty()) } -pub fn supports_proof_of_indexing( - conn: &mut PgConnection, +pub async fn supports_proof_of_indexing( + conn: &mut AsyncPgConnection, namespace: &Namespace, ) -> Result { lazy_static! { static ref POI_TABLE_NAME: SqlName = SqlName::verbatim(POI_TABLE.to_owned()); } - table_exists(conn, namespace.as_str(), &POI_TABLE_NAME) + table_exists(conn, namespace.as_str(), &POI_TABLE_NAME).await } -pub fn current_servers(conn: &mut PgConnection) -> Result, StoreError> { +pub async fn current_servers(conn: &mut AsyncPgConnection) -> Result, StoreError> { #[derive(QueryableByName)] struct Srv { #[diesel(sql_type = Text)] srvname: String, } Ok(sql_query("select srvname from pg_foreign_server") - .get_results::(conn)? + .get_results::(conn) + .await? .into_iter() .map(|srv| srv.srvname) .collect()) @@ -474,8 +485,8 @@ pub fn current_servers(conn: &mut PgConnection) -> Result, StoreErro /// Return the options for the foreign server `name` as a map of option /// names to values -pub fn server_options( - conn: &mut PgConnection, +pub async fn server_options( + conn: &mut AsyncPgConnection, name: &str, ) -> Result>, StoreError> { #[derive(QueryableByName)] @@ -485,7 +496,8 @@ pub fn server_options( } let entries = sql_query("select srvoptions from pg_foreign_server where srvname = $1") .bind::(name) - .get_result::(conn)? + .get_result::(conn) + .await? .srvoptions .into_iter() .filter_map(|opt| { @@ -498,81 +510,98 @@ pub fn server_options( Ok(HashMap::from_iter(entries)) } -pub fn has_namespace(conn: &mut PgConnection, namespace: &Namespace) -> Result { +pub async fn has_namespace( + conn: &mut AsyncPgConnection, + namespace: &Namespace, +) -> Result { use pg_namespace as nsp; Ok(select(diesel::dsl::exists( nsp::table.filter(nsp::name.eq(namespace.as_str())), )) - .get_result::(conn)?) + .get_result::(conn) + .await?) } /// Drop the schema for `src` if it is a foreign schema imported from /// another database. If the schema does not exist, or is not a foreign /// schema, do nothing. This crucially depends on the fact that we never mix /// foreign and local tables in the same schema. -pub fn drop_foreign_schema(conn: &mut PgConnection, src: &Site) -> Result<(), StoreError> { +pub async fn drop_foreign_schema( + conn: &mut AsyncPgConnection, + src: &Site, +) -> Result<(), StoreError> { use foreign_tables as ft; let is_foreign = select(diesel::dsl::exists( ft::table.filter(ft::foreign_table_schema.eq(src.namespace.as_str())), )) - .get_result::(conn)?; + .get_result::(conn) + .await?; if is_foreign { let query = format!("drop schema if exists {} cascade", src.namespace); - conn.batch_execute(&query)?; + conn.batch_execute(&query).await?; } Ok(()) } -pub fn foreign_tables(conn: &mut PgConnection, nsp: &str) -> Result, StoreError> { +pub async fn foreign_tables( + conn: &mut AsyncPgConnection, + nsp: &str, +) -> Result, StoreError> { use foreign_tables as ft; ft::table .filter(ft::foreign_table_schema.eq(nsp)) .select(ft::foreign_table_name) .get_results::(conn) + .await .map_err(StoreError::from) } /// Drop the schema `nsp` and all its contents if it exists, and create it /// again so that `nsp` is an empty schema -pub fn recreate_schema(conn: &mut PgConnection, nsp: &str) -> Result<(), StoreError> { +pub async fn recreate_schema(conn: &mut AsyncPgConnection, nsp: &str) -> Result<(), StoreError> { let query = format!( "drop schema if exists {nsp} cascade;\ create schema {nsp};", nsp = nsp ); - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } /// Drop the schema `nsp` and all its contents if it exists -pub fn drop_schema(conn: &mut PgConnection, nsp: &str) -> Result<(), StoreError> { +pub async fn drop_schema(conn: &mut AsyncPgConnection, nsp: &str) -> Result<(), StoreError> { let query = format!("drop schema if exists {nsp} cascade;", nsp = nsp); - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } -pub fn migration_count(conn: &mut PgConnection) -> Result { +pub async fn migration_count(conn: &mut AsyncPgConnection) -> Result { use __diesel_schema_migrations as m; - if !table_exists(conn, NAMESPACE_PUBLIC, &MIGRATIONS_TABLE)? { + if !table_exists(conn, NAMESPACE_PUBLIC, &MIGRATIONS_TABLE).await? { return Ok(0); } m::table .count() .get_result(conn) + .await .map(|n: i64| n as usize) .map_err(StoreError::from) } -pub fn account_like(conn: &mut PgConnection, site: &Site) -> Result, StoreError> { +pub async fn account_like( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result, StoreError> { use table_stats as ts; let names = ts::table .filter(ts::deployment.eq(site.id)) .select((ts::table_name, ts::is_account_like)) .get_results::<(String, Option)>(conn) + .await .optional()? .unwrap_or_default() .into_iter() @@ -587,8 +616,8 @@ pub fn account_like(conn: &mut PgConnection, site: &Site) -> Result Result { @@ -623,11 +653,12 @@ pub fn copy_account_like( Ok(sql_query(query) .bind::(src.id) .bind::(dst.id) - .execute(conn)?) + .execute(conn) + .await?) } -pub fn set_last_pruned_block( - conn: &mut PgConnection, +pub async fn set_last_pruned_block( + conn: &mut AsyncPgConnection, site: &Site, table_name: &SqlName, last_pruned_block: BlockNumber, @@ -643,7 +674,8 @@ pub fn set_last_pruned_block( .on_conflict((ts::deployment, ts::table_name)) .do_update() .set(ts::last_pruned_block.eq(last_pruned_block)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } @@ -690,8 +722,8 @@ pub(crate) mod table_schema { } } - pub fn columns( - conn: &mut PgConnection, + pub async fn columns( + conn: &mut AsyncPgConnection, nsp: &str, table_name: &str, ) -> Result, StoreError> { @@ -709,7 +741,8 @@ pub(crate) mod table_schema { Ok(sql_query(QUERY) .bind::(nsp) .bind::(table_name) - .get_results::(conn)? + .get_results::(conn) + .await? .into_iter() .map(|ci| ci.into()) .collect()) @@ -719,8 +752,8 @@ pub(crate) mod table_schema { /// Return a SQL statement to create the foreign table /// `{dst_nsp}.{table_name}` for the server `server` which has the same /// schema as the (local) table `{src_nsp}.{table_name}` -pub fn create_foreign_table( - conn: &mut PgConnection, +pub async fn create_foreign_table( + conn: &mut AsyncPgConnection, src_nsp: &str, table_name: &str, dst_nsp: &str, @@ -753,7 +786,7 @@ pub fn create_foreign_table( Ok(query) } - let columns = table_schema::columns(conn, src_nsp, table_name)?; + let columns = table_schema::columns(conn, src_nsp, table_name).await?; let query = build_query(columns, src_nsp, table_name, dst_nsp, server).map_err(|_| { anyhow!( "failed to generate 'create foreign table' query for {}.{}", @@ -777,8 +810,8 @@ pub fn create_foreign_table( /// The list `shard_nsps` consists of pairs `(name, namespace)` where `name` /// is the name of the shard and `namespace` is the namespace where the /// `src_table` is mapped -pub fn create_cross_shard_view( - conn: &mut PgConnection, +pub async fn create_cross_shard_view( + conn: &mut AsyncPgConnection, src_nsp: &str, src_table: &str, dst_nsp: &str, @@ -805,7 +838,7 @@ pub fn create_cross_shard_view( Ok(query) } - let columns = table_schema::columns(conn, src_nsp, src_table)?; + let columns = table_schema::columns(conn, src_nsp, src_table).await?; let query = build_query(&columns, src_table, dst_nsp, shard_nsps).map_err(|_| { anyhow!( "failed to generate 'create foreign table' query for {}.{}", @@ -817,8 +850,8 @@ pub fn create_cross_shard_view( } /// Checks in the database if a given index is valid. -pub(crate) fn check_index_is_valid( - conn: &mut PgConnection, +pub(crate) async fn check_index_is_valid( + conn: &mut AsyncPgConnection, schema_name: &str, index_name: &str, ) -> Result { @@ -842,14 +875,15 @@ pub(crate) fn check_index_is_valid( .bind::(schema_name) .bind::(index_name) .get_result::(conn) + .await .optional() .map_err::(Into::into)? .map(|check| check.is_valid); Ok(matches!(result, Some(true))) } -pub(crate) fn indexes_for_table( - conn: &mut PgConnection, +pub(crate) async fn indexes_for_table( + conn: &mut AsyncPgConnection, schema_name: &str, table_name: &str, ) -> Result, StoreError> { @@ -873,13 +907,14 @@ pub(crate) fn indexes_for_table( .bind::(schema_name) .bind::(table_name) .load::(conn) + .await .map_err::(Into::into)?; Ok(results.into_iter().map(|i| i.def).collect()) } -pub(crate) fn drop_index( - conn: &mut PgConnection, +pub(crate) async fn drop_index( + conn: &mut AsyncPgConnection, schema_name: &str, index_name: &str, ) -> Result<(), StoreError> { @@ -888,6 +923,7 @@ pub(crate) fn drop_index( .bind::(schema_name) .bind::(index_name) .execute(conn) + .await .map_err::(Into::into)?; Ok(()) } @@ -895,7 +931,7 @@ pub(crate) fn drop_index( /// Return by how much the slowest replica connected to the database `conn` /// is lagging. The returned value has millisecond precision. If the /// database has no replicas, return `0` -pub(crate) fn replication_lag(conn: &mut PgConnection) -> Result { +pub(crate) async fn replication_lag(conn: &mut AsyncPgConnection) -> Result { #[derive(Queryable, QueryableByName)] struct Lag { #[diesel(sql_type = Nullable)] @@ -906,7 +942,7 @@ pub(crate) fn replication_lag(conn: &mut PgConnection) -> Result(conn)?; + .get_result::(conn).await?; let lag = lag .ms @@ -916,8 +952,8 @@ pub(crate) fn replication_lag(conn: &mut PgConnection) -> Result Result<(), StoreError> { sql_query( @@ -930,11 +966,12 @@ pub(crate) fn cancel_vacuum( and n.nspname = $1", ) .bind::(namespace) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } -pub(crate) fn default_stats_target(conn: &mut PgConnection) -> Result { +pub(crate) async fn default_stats_target(conn: &mut AsyncPgConnection) -> Result { #[derive(Queryable, QueryableByName)] struct Target { #[diesel(sql_type = Integer)] @@ -943,12 +980,13 @@ pub(crate) fn default_stats_target(conn: &mut PgConnection) -> Result(conn)?; + .get_result::(conn) + .await?; Ok(target.setting) } -pub(crate) fn stats_targets( - conn: &mut PgConnection, +pub(crate) async fn stats_targets( + conn: &mut AsyncPgConnection, namespace: &Namespace, ) -> Result>, StoreError> { use pg_attribute as a; @@ -962,7 +1000,8 @@ pub(crate) fn stats_targets( .filter(n::name.eq(namespace.as_str())) .filter(a::num.ge(1)) .select((c::name, a::name, a::stats_target)) - .load::<(String, String, i32)>(conn)? + .load::<(String, String, i32)>(conn) + .await? .into_iter() .map(|(table, column, target)| (SqlName::from(table), SqlName::from(column), target)); @@ -976,8 +1015,8 @@ pub(crate) fn stats_targets( Ok(map) } -pub(crate) fn set_stats_target( - conn: &mut PgConnection, +pub(crate) async fn set_stats_target( + conn: &mut AsyncPgConnection, namespace: &Namespace, table: &SqlName, columns: &[&SqlName], @@ -988,7 +1027,7 @@ pub(crate) fn set_stats_target( .map(|column| format!("alter column {} set statistics {}", column.quoted(), target)) .join(", "); let query = format!("alter table {}.{} {}", namespace, table.quoted(), columns); - conn.batch_execute(&query)?; + conn.batch_execute(&query).await?; Ok(()) } @@ -997,8 +1036,8 @@ pub(crate) fn set_stats_target( /// same logic that Postgres' [autovacuum /// daemon](https://www.postgresql.org/docs/current/routine-vacuuming.html#AUTOVACUUM) /// uses -pub(crate) fn needs_autoanalyze( - conn: &mut PgConnection, +pub(crate) async fn needs_autoanalyze( + conn: &mut AsyncPgConnection, namespace: &Namespace, ) -> Result, StoreError> { const QUERY: &str = "select relname \ @@ -1016,6 +1055,7 @@ pub(crate) fn needs_autoanalyze( let tables = sql_query(QUERY) .bind::(namespace.as_str()) .get_results::(conn) + .await .optional()? .map(|tables| tables.into_iter().map(|t| t.name).collect()) .unwrap_or(vec![]); @@ -1025,7 +1065,7 @@ pub(crate) fn needs_autoanalyze( /// Check whether the database for `conn` supports the `minmax_multi_ops` /// introduced in Postgres 14 -fn has_minmax_multi_ops(conn: &mut PgConnection) -> Result { +async fn has_minmax_multi_ops(conn: &mut AsyncPgConnection) -> Result { const QUERY: &str = "select count(*) = 2 as has_ops \ from pg_opclass \ where opcname in('int8_minmax_multi_ops', 'int4_minmax_multi_ops')"; @@ -1036,12 +1076,14 @@ fn has_minmax_multi_ops(conn: &mut PgConnection) -> Result { has_ops: bool, } - Ok(sql_query(QUERY).get_result::(conn)?.has_ops) + Ok(sql_query(QUERY).get_result::(conn).await?.has_ops) } /// Check whether the database for `conn` has the column /// `pg_stats.range_bounds_histogram` introduced in Postgres 17 -fn pg_stats_has_range_bounds_histogram(conn: &mut PgConnection) -> Result { +async fn pg_stats_has_range_bounds_histogram( + conn: &mut AsyncPgConnection, +) -> Result { #[derive(Queryable, QueryableByName)] struct HasIt { #[diesel(sql_type = Bool)] @@ -1057,12 +1099,13 @@ fn pg_stats_has_range_bounds_histogram(conn: &mut PgConnection) -> Result(conn) + .await .map(|h| h.has_it) .map_err(StoreError::from) } -pub(crate) fn histogram_bounds( - conn: &mut PgConnection, +pub(crate) async fn histogram_bounds( + conn: &mut AsyncPgConnection, namespace: &Namespace, table: &SqlName, column: &str, @@ -1084,6 +1127,7 @@ pub(crate) fn histogram_bounds( .bind::(table.as_str()) .bind::(column) .get_result::(conn) + .await .optional() .map(|bounds| bounds.map(|b| b.bounds).unwrap_or_default()) .map_err(StoreError::from) diff --git a/store/postgres/src/chain_head_listener.rs b/store/postgres/src/chain_head_listener.rs index 1880b343c3d..16c658e52df 100644 --- a/store/postgres/src/chain_head_listener.rs +++ b/store/postgres/src/chain_head_listener.rs @@ -254,19 +254,21 @@ impl ChainHeadUpdateSender { } } - pub fn send(&self, hash: &str, number: i64) -> Result<(), StoreError> { + pub async fn send(&self, hash: &str, number: i64) -> Result<(), StoreError> { let msg = json! ({ "network_name": &self.chain_name, "head_block_hash": hash, "head_block_number": number }); - let mut conn = self.pool.get()?; - self.sender.notify( - &mut conn, - CHANNEL_NAME.as_str(), - Some(&self.chain_name), - &msg, - ) + let mut conn = self.pool.get().await?; + self.sender + .notify( + &mut conn, + CHANNEL_NAME.as_str(), + Some(&self.chain_name), + &msg, + ) + .await } } diff --git a/store/postgres/src/chain_store.rs b/store/postgres/src/chain_store.rs index e3ee70f378d..bc62b426f98 100644 --- a/store/postgres/src/chain_store.rs +++ b/store/postgres/src/chain_store.rs @@ -1,9 +1,10 @@ use anyhow::anyhow; -use diesel::pg::PgConnection; -use diesel::prelude::*; -use diesel::r2d2::{ConnectionManager, PooledConnection}; +use async_trait::async_trait; use diesel::sql_types::Text; -use diesel::{insert_into, update}; +use diesel::{insert_into, update, ExpressionMethods, OptionalExtension, QueryDsl}; +use diesel_async::AsyncConnection; +use diesel_async::{scoped_futures::ScopedFutureExt, RunQueryDsl}; + use graph::components::store::ChainHeadStore; use graph::data::store::ethereum::call; use graph::derive::CheapClone; @@ -27,13 +28,13 @@ use graph::blockchain::{Block, BlockHash, ChainIdentifier, ExtendedBlockPtr}; use graph::cheap_clone::CheapClone; use graph::prelude::web3::types::{H256, U256}; use graph::prelude::{ - async_trait, serde_json as json, transaction_receipt::LightTransactionReceipt, BlockNumber, - BlockPtr, CachedEthereumCall, CancelableError, ChainStore as ChainStoreTrait, Error, - EthereumCallCache, StoreError, + serde_json as json, transaction_receipt::LightTransactionReceipt, BlockNumber, BlockPtr, + CachedEthereumCall, ChainStore as ChainStoreTrait, Error, EthereumCallCache, StoreError, }; use graph::{ensure, internal_error}; use self::recent_blocks_cache::RecentBlocksCache; +use crate::AsyncPgConnection; use crate::{ block_store::ChainStatus, chain_head_listener::ChainHeadUpdateSender, pool::ConnectionPool, }; @@ -84,20 +85,21 @@ pub use data::Storage; /// Encapuslate access to the blocks table for a chain. mod data { use crate::diesel::dsl::IntervalDsl; - use diesel::sql_types::{Array, Binary, Bool, Nullable}; - use diesel::{connection::SimpleConnection, insert_into}; - use diesel::{delete, prelude::*, sql_query}; + use crate::AsyncPgConnection; + use diesel::dsl::sql; + use diesel::insert_into; + use diesel::sql_types::{Array, Binary, Bool, Nullable, Text}; + use diesel::{delete, sql_query, ExpressionMethods, JoinOnDsl, OptionalExtension, QueryDsl}; use diesel::{ deserialize::FromSql, pg::Pg, serialize::{Output, ToSql}, - sql_types::Text, }; - use diesel::{dsl::sql, pg::PgConnection}; use diesel::{ sql_types::{BigInt, Bytea, Integer, Jsonb}, update, }; + use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; use graph::blockchain::{Block, BlockHash}; use graph::data::store::scalar::Bytes; use graph::internal_error; @@ -380,7 +382,7 @@ mod data { /// `Storage::Private`. If it uses `Storage::Shared`, do nothing since /// a regular migration will already have created the `ethereum_blocks` /// table - pub(super) fn create(&self, conn: &mut PgConnection) -> Result<(), Error> { + pub(super) async fn create(&self, conn: &mut AsyncPgConnection) -> Result<(), Error> { fn make_ddl(nsp: &str) -> String { format!( " @@ -413,7 +415,7 @@ mod data { match self { Storage::Shared => Ok(()), Storage::Private(Schema { name, .. }) => { - conn.batch_execute(&make_ddl(name))?; + conn.batch_execute(&make_ddl(name)).await?; Ok(()) } } @@ -428,48 +430,56 @@ mod data { } } - pub(super) fn drop_storage( + pub(super) async fn drop_storage( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, name: &str, ) -> Result<(), StoreError> { match &self { Storage::Shared => { use public::ethereum_blocks as b; - delete(b::table.filter(b::network_name.eq(name))).execute(conn)?; + delete(b::table.filter(b::network_name.eq(name))) + .execute(conn) + .await?; Ok(()) } Storage::Private(Schema { name, .. }) => { - conn.batch_execute(&format!("drop schema {} cascade", name))?; + conn.batch_execute(&format!("drop schema {} cascade", name)) + .await?; Ok(()) } } } - pub(super) fn truncate_block_cache( + pub(super) async fn truncate_block_cache( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result<(), StoreError> { let table_name = match &self { Storage::Shared => ETHEREUM_BLOCKS_TABLE_NAME, Storage::Private(Schema { blocks, .. }) => &blocks.qname, }; - conn.batch_execute(&format!("truncate table {} restart identity", table_name))?; + conn.batch_execute(&format!("truncate table {} restart identity", table_name)) + .await?; Ok(()) } - fn truncate_call_cache(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + async fn truncate_call_cache( + &self, + conn: &mut AsyncPgConnection, + ) -> Result<(), StoreError> { let table_name = match &self { Storage::Shared => ETHEREUM_CALL_CACHE_TABLE_NAME, Storage::Private(Schema { call_cache, .. }) => &call_cache.qname, }; - conn.batch_execute(&format!("truncate table {} restart identity", table_name))?; + conn.batch_execute(&format!("truncate table {} restart identity", table_name)) + .await?; Ok(()) } - pub(super) fn cleanup_shallow_blocks( + pub(super) async fn cleanup_shallow_blocks( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, lowest_block: i32, ) -> Result<(), StoreError> { let table_name = match &self { @@ -479,13 +489,14 @@ mod data { conn.batch_execute(&format!( "delete from {} WHERE number >= {} AND data->'block'->'data' = 'null'::jsonb;", table_name, lowest_block, - ))?; + )) + .await?; Ok(()) } - pub(super) fn remove_cursor( + pub(super) async fn remove_cursor( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, ) -> Result, StoreError> { use diesel::dsl::not; @@ -499,6 +510,7 @@ mod data { .set(head_block_cursor.eq(None as Option)) .returning(head_block_number) .get_result::>(conn) + .await .optional() { Ok(res) => match res { @@ -517,9 +529,9 @@ mod data { /// same hash, then overwrite that block since it may be adding /// transaction receipts. If `overwrite` is `true`, overwrite a /// possibly existing entry. If it is `false`, keep the old entry. - pub(super) fn upsert_block( + pub(super) async fn upsert_block( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, block: &dyn Block, overwrite: bool, @@ -555,13 +567,15 @@ mod data { .on_conflict(b::hash) .do_update() .set(values) - .execute(conn)?; + .execute(conn) + .await?; } else { insert_into(b::table) .values(values.clone()) .on_conflict(b::hash) .do_nothing() - .execute(conn)?; + .execute(conn) + .await?; } } Storage::Private(Schema { blocks, .. }) => { @@ -586,15 +600,16 @@ mod data { .bind::(number) .bind::(parent_hash.as_slice()) .bind::(data) - .execute(conn)?; + .execute(conn) + .await?; } }; Ok(()) } - pub(super) fn block_ptrs_by_numbers( + pub(super) async fn block_ptrs_by_numbers( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, numbers: &[BlockNumber], ) -> Result, StoreError> { @@ -612,21 +627,25 @@ mod data { .filter(b::network_name.eq(chain)) .filter(b::number.eq_any(Vec::from_iter(numbers.iter().map(|&n| n as i64)))) .load::<(BlockHash, i64, BlockHash, json::Value)>(conn) + .await + } + Storage::Private(Schema { blocks, .. }) => { + blocks + .table() + .select(( + blocks.hash(), + blocks.number(), + blocks.parent_hash(), + sql::("coalesce(data -> 'block', data)"), + )) + .filter( + blocks + .number() + .eq_any(Vec::from_iter(numbers.iter().map(|&n| n as i64))), + ) + .load::<(BlockHash, i64, BlockHash, json::Value)>(conn) + .await } - Storage::Private(Schema { blocks, .. }) => blocks - .table() - .select(( - blocks.hash(), - blocks.number(), - blocks.parent_hash(), - sql::("coalesce(data -> 'block', data)"), - )) - .filter( - blocks - .number() - .eq_any(Vec::from_iter(numbers.iter().map(|&n| n as i64))), - ) - .load::<(BlockHash, i64, BlockHash, json::Value)>(conn), }?; Ok(x.into_iter() @@ -636,9 +655,9 @@ mod data { .collect()) } - pub(super) fn blocks( + pub(super) async fn blocks( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, hashes: &[BlockHash], ) -> Result, StoreError> { @@ -666,21 +685,25 @@ mod data { .eq_any(Vec::from_iter(hashes.iter().map(|h| format!("{:x}", h)))), ) .load::<(BlockHash, i64, BlockHash, json::Value)>(conn) + .await + } + Storage::Private(Schema { blocks, .. }) => { + blocks + .table() + .select(( + blocks.hash(), + blocks.number(), + blocks.parent_hash(), + sql::("coalesce(data -> 'block', data)"), + )) + .filter( + blocks + .hash() + .eq_any(Vec::from_iter(hashes.iter().map(|h| h.as_slice()))), + ) + .load::<(BlockHash, i64, BlockHash, json::Value)>(conn) + .await } - Storage::Private(Schema { blocks, .. }) => blocks - .table() - .select(( - blocks.hash(), - blocks.number(), - blocks.parent_hash(), - sql::("coalesce(data -> 'block', data)"), - )) - .filter( - blocks - .hash() - .eq_any(Vec::from_iter(hashes.iter().map(|h| h.as_slice()))), - ) - .load::<(BlockHash, i64, BlockHash, json::Value)>(conn), }?; Ok(x.into_iter() .map(|(hash, nr, parent, data)| { @@ -689,9 +712,9 @@ mod data { .collect()) } - pub(super) fn block_hashes_by_block_number( + pub(super) async fn block_hashes_by_block_number( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, number: BlockNumber, ) -> Result, Error> { @@ -703,7 +726,8 @@ mod data { .select(b::hash) .filter(b::network_name.eq(&chain)) .filter(b::number.eq(number as i64)) - .get_results::(conn)? + .get_results::(conn) + .await? .into_iter() .map(|h| h.parse()) .collect::, _>>() @@ -713,16 +737,17 @@ mod data { .table() .select(blocks.hash()) .filter(blocks.number().eq(number as i64)) - .get_results::>(conn)? + .get_results::>(conn) + .await? .into_iter() .map(BlockHash::from) .collect::>()), } } - pub(super) fn confirm_block_hash( + pub(super) async fn confirm_block_hash( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, number: BlockNumber, hash: &BlockHash, @@ -739,6 +764,7 @@ mod data { .filter(b::number.eq(number)) .filter(b::hash.ne(&hash)) .execute(conn) + .await .map_err(Error::from) } Storage::Private(Schema { blocks, .. }) => { @@ -750,6 +776,7 @@ mod data { .bind::(number) .bind::(hash.as_slice()) .execute(conn) + .await .map_err(Error::from) } } @@ -757,9 +784,9 @@ mod data { /// timestamp's representation depends the blockchain::Block implementation, on /// ethereum this is a U256 but on different chains it will most likely be different. - pub(super) fn block_number( + pub(super) async fn block_number( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, hash: &BlockHash, ) -> Result, Option)>, StoreError> { const TIMESTAMP_QUERY: &str = @@ -777,6 +804,7 @@ mod data { )) .filter(b::hash.eq(format!("{:x}", hash))) .first::<(i64, Option, Option)>(conn) + .await .optional()? .map(|(number, ts, parent_hash)| { // Convert parent_hash from Hex String to Vec @@ -794,6 +822,7 @@ mod data { )) .filter(blocks.hash().eq(hash.as_slice())) .first::<(i64, Option, Vec)>(conn) + .await .optional()? .map(|(number, ts, parent_hash)| (number, ts, Some(parent_hash))), }; @@ -812,9 +841,9 @@ mod data { } } - pub(super) fn block_numbers( + pub(super) async fn block_numbers( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, hashes: &[BlockHash], ) -> Result, StoreError> { let pairs = match self { @@ -829,7 +858,8 @@ mod data { b::table .select((b::hash, b::number)) .filter(b::hash.eq_any(hashes)) - .load::<(String, i64)>(conn)? + .load::<(String, i64)>(conn) + .await? .into_iter() .map(|(hash, n)| { let hash = hex::decode(&hash).expect("Invalid hex in parent_hash"); @@ -843,7 +873,8 @@ mod data { .table() .select((blocks.hash(), blocks.number())) .filter(blocks.hash().eq_any(hashes)) - .load::<(BlockHash, i64)>(conn)? + .load::<(BlockHash, i64)>(conn) + .await? } }; @@ -856,9 +887,9 @@ mod data { /// Find the first block that is missing from the database needed to /// complete the chain from block `hash` to the block with number /// `first_block`. - pub(super) fn missing_parent( + pub(super) async fn missing_parent( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, first_block: i64, hash: H256, @@ -904,7 +935,8 @@ mod data { .bind::(&hash) .bind::(&genesis) .bind::(first_block) - .load::(conn)?; + .load::(conn) + .await?; let missing = match missing.len() { 0 => None, @@ -949,7 +981,8 @@ mod data { .bind::(hash.as_bytes()) .bind::(genesis.as_bytes()) .bind::(first_block) - .load::(conn)?; + .load::(conn) + .await?; let missing = match missing.len() { 0 => None, @@ -967,9 +1000,9 @@ mod data { /// with a higher block number than the current chain head. The returned /// value if the hash and number of the candidate and the genesis block /// hash for the chain - pub(super) fn chain_head_candidate( + pub(super) async fn chain_head_candidate( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, ) -> Result, Error> { use public::ethereum_networks as n; @@ -977,7 +1010,8 @@ mod data { let head = n::table .filter(n::name.eq(chain)) .select(n::head_block_number) - .first::>(conn)? + .first::>(conn) + .await? .unwrap_or(-1); match self { @@ -989,6 +1023,7 @@ mod data { .order_by((b::number.desc(), b::hash)) .select((b::hash, b::number)) .first::<(String, i64)>(conn) + .await .optional()? .map(|(hash, number)| BlockPtr::try_from((hash.as_str(), number))) .transpose() @@ -999,6 +1034,7 @@ mod data { .order_by((blocks.number().desc(), blocks.hash())) .select((blocks.hash(), blocks.number())) .first::<(Vec, i64)>(conn) + .await .optional()? .map(|(hash, number)| BlockPtr::try_from((hash.as_slice(), number))) .transpose(), @@ -1034,9 +1070,9 @@ mod data { /// Returns an ancestor of a specified block at a given offset, with an option to specify a `root` hash /// for a targeted search. If a `root` hash is provided, the search stops at the block whose parent hash /// matches the `root`. - pub(super) fn ancestor_block( + pub(super) async fn ancestor_block( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block_ptr: BlockPtr, offset: BlockNumber, root: Option, @@ -1071,6 +1107,7 @@ mod data { .bind::(offset as i64) .get_result::(conn), } + .await .optional()?; use public::ethereum_blocks as b; @@ -1081,7 +1118,8 @@ mod data { b::table .filter(b::hash.eq(&block.hash)) .select(b::data) - .first::(conn)?, + .first::(conn) + .await?, BlockPtr::new( BlockHash::from_str(&block.hash)?, i32::try_from(block.number).unwrap(), @@ -1101,7 +1139,7 @@ mod data { number: i64, } - let block = match root { + let block = match &root { Some(root) => sql_query(query) .bind::(block_ptr.hash_slice()) .bind::(offset as i64) @@ -1112,6 +1150,7 @@ mod data { .bind::(offset as i64) .get_result::(conn), } + .await .optional()?; match block { @@ -1121,7 +1160,8 @@ mod data { .table() .filter(blocks.hash().eq(&block.hash)) .select(blocks.data()) - .first::(conn)?, + .first::(conn) + .await?, BlockPtr::from((block.hash, block.number)), )), } @@ -1150,9 +1190,9 @@ mod data { Ok(data_and_ptr) } - pub(super) fn delete_blocks_before( + pub(super) async fn delete_blocks_before( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, block: i64, ) -> Result { @@ -1165,6 +1205,7 @@ mod data { .filter(b::number.lt(block)) .filter(b::number.gt(0)) .execute(conn) + .await .map_err(Error::from) } Storage::Private(Schema { blocks, .. }) => { @@ -1175,14 +1216,15 @@ mod data { sql_query(query) .bind::(block) .execute(conn) + .await .map_err(Error::from) } } } - pub(super) fn delete_blocks_by_hash( + pub(super) async fn delete_blocks_by_hash( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, chain: &str, block_hashes: &[&H256], ) -> Result { @@ -1200,6 +1242,7 @@ mod data { .filter(b::hash.eq_any(hashes)) .filter(b::number.gt(0)) // keep genesis .execute(conn) + .await .map_err(Error::from) } Storage::Private(Schema { blocks, .. }) => { @@ -1214,14 +1257,15 @@ mod data { sql_query(query) .bind::, _>(hashes) .execute(conn) + .await .map_err(Error::from) } } } - pub(super) fn get_call_and_access( + pub(super) async fn get_call_and_access( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &[u8], ) -> Result, Error> { match self { @@ -1237,6 +1281,7 @@ mod data { sql::("CURRENT_DATE > eth_call_meta.accessed_at"), )) .get_result(conn) + .await .optional() .map_err(Error::from) } @@ -1261,15 +1306,16 @@ mod data { )), )) .first::<(Vec, bool)>(conn) + .await .optional() .map_err(Error::from), } .map(|row| row.map(|(return_value, expired)| (Bytes::from(return_value), expired))) } - pub(super) fn get_calls_and_access( + pub(super) async fn get_calls_and_access( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ids: &[&[u8]], ) -> Result, Bytes, bool)>, Error> { let rows = match self { @@ -1286,6 +1332,7 @@ mod data { sql::("CURRENT_DATE > eth_call_meta.accessed_at"), )) .load(conn) + .await .map_err(Error::from) } Storage::Private(Schema { @@ -1310,6 +1357,7 @@ mod data { )), )) .load::<(Vec, Vec, bool)>(conn) + .await .map_err(Error::from), }?; Ok(rows @@ -1318,9 +1366,9 @@ mod data { .collect()) } - pub(super) fn get_calls_in_block( + pub(super) async fn get_calls_in_block( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block_ptr: BlockPtr, ) -> Result, Error> { let block_num = block_ptr.block_number(); @@ -1333,18 +1381,22 @@ mod data { .select((cache::id, cache::return_value, cache::contract_address)) .filter(cache::block_number.eq(block_num)) .order(cache::contract_address) - .get_results::<(Vec, Vec, Vec)>(conn)? + .get_results::<(Vec, Vec, Vec)>(conn) + .await? + } + Storage::Private(Schema { call_cache, .. }) => { + call_cache + .table() + .select(( + call_cache.id(), + call_cache.return_value(), + call_cache.contract_address(), + )) + .filter(call_cache.block_number().eq(block_num as i64)) + .order(call_cache.contract_address()) + .get_results::<(Vec, Vec, Vec)>(conn) + .await? } - Storage::Private(Schema { call_cache, .. }) => call_cache - .table() - .select(( - call_cache.id(), - call_cache.return_value(), - call_cache.contract_address(), - )) - .filter(call_cache.block_number().eq(block_num as i64)) - .order(call_cache.contract_address()) - .get_results::<(Vec, Vec, Vec)>(conn)?, }; Ok(rows @@ -1358,9 +1410,9 @@ mod data { .collect()) } - pub(super) fn clear_call_cache( + pub(super) async fn clear_call_cache( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, head: BlockNumber, from: BlockNumber, to: BlockNumber, @@ -1368,7 +1420,7 @@ mod data { if from <= 0 && to >= head { // We are removing the entire cache. Truncating is much // faster in that case - self.truncate_call_cache(conn)?; + self.truncate_call_cache(conn).await?; return Ok(()); } match self { @@ -1380,6 +1432,7 @@ mod data { .filter(cache::block_number.le(to)), ) .execute(conn) + .await .map_err(Error::from)?; Ok(()) } @@ -1395,15 +1448,16 @@ mod data { .bind::(from) .bind::(to) .execute(conn) + .await .map_err(Error::from) .map(|_| ()) } } } - pub fn clear_stale_call_cache( + pub async fn clear_stale_call_cache( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, logger: &Logger, ttl_days: i32, ttl_max_contracts: Option, @@ -1451,7 +1505,8 @@ mod data { .lt(diesel::dsl::date(diesel::dsl::now - ttl_days.days())), ) .limit(batch_limit) - .get_results::>(conn)?; + .get_results::>(conn) + .await?; if stale_contracts.is_empty() { info!( @@ -1468,10 +1523,12 @@ mod data { .select(cache::id) .filter(cache::contract_address.eq_any(&stale_contracts)) .limit(cache_batch_size as i64) - .get_results::>(conn)?; + .get_results::>(conn) + .await?; let deleted_count = diesel::delete(cache::table.filter(cache::id.eq_any(&next_batch))) - .execute(conn)?; + .execute(conn) + .await?; total_calls += deleted_count; @@ -1483,7 +1540,8 @@ mod data { let deleted_contracts = diesel::delete( meta::table.filter(meta::contract_address.eq_any(&stale_contracts)), ) - .execute(conn)?; + .execute(conn) + .await?; total_contracts += deleted_contracts as i64; } @@ -1546,7 +1604,8 @@ mod data { let stale_contracts: Vec> = sql_query(&select_query) .bind::(batch_limit) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(|r| r.contract_address) .collect(); @@ -1564,7 +1623,8 @@ mod data { loop { let deleted_count = sql_query(&delete_cache_query) .bind::, _>(&stale_contracts) - .execute(conn)?; + .execute(conn) + .await?; total_calls += deleted_count; @@ -1575,7 +1635,8 @@ mod data { let deleted_contracts = sql_query(&delete_meta_query) .bind::, _>(&stale_contracts) - .execute(conn)?; + .execute(conn) + .await?; total_contracts += deleted_contracts as i64; } @@ -1585,9 +1646,9 @@ mod data { } } - pub(super) fn update_accessed_at( + pub(super) async fn update_accessed_at( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, contract_address: &[u8], ) -> Result<(), Error> { let result = match self { @@ -1597,6 +1658,7 @@ mod data { update(meta::table.find::<&[u8]>(contract_address.as_ref())) .set(meta::accessed_at.eq(sql("CURRENT_DATE"))) .execute(conn) + .await } Storage::Private(Schema { call_meta, .. }) => { let query = format!( @@ -1606,14 +1668,15 @@ mod data { sql_query(query) .bind::(contract_address) .execute(conn) + .await } }; result.map(|_| ()).map_err(Error::from) } - pub(super) fn set_call( + pub(super) async fn set_call( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &[u8], contract_address: &[u8], block_number: i32, @@ -1632,7 +1695,8 @@ mod data { cache::return_value.eq(return_value), )) .on_conflict_do_nothing() - .execute(conn)?; + .execute(conn) + .await?; // See comment in the Private branch for why the // raciness of this check is ok @@ -1640,6 +1704,7 @@ mod data { .filter(meta::contract_address.eq(contract_address)) .select(sql::("accessed_at < current_date")) .first::(conn) + .await .optional()? .unwrap_or(true); if update_meta { @@ -1656,6 +1721,7 @@ mod data { // branch to avoid unnecessary updates (not entirely // trivial with diesel) .execute(conn) + .await } else { Ok(0) } @@ -1675,7 +1741,8 @@ mod data { .bind::(contract_address) .bind::(block_number) .bind::(return_value) - .execute(conn)?; + .execute(conn) + .await?; // Check whether we need to update `call_meta`. The // check is racy, since an update can happen between the @@ -1688,6 +1755,7 @@ mod data { .filter(call_meta.contract_address().eq(contract_address)) .select(sql::("accessed_at < current_date")) .first::(conn) + .await .optional()? .unwrap_or(true); @@ -1703,6 +1771,7 @@ mod data { sql_query(query) .bind::(contract_address) .execute(conn) + .await } else { Ok(0) } @@ -1713,7 +1782,7 @@ mod data { #[cfg(debug_assertions)] // used by `super::set_chain` for test support - pub(super) fn remove_chain(&self, conn: &mut PgConnection, chain_name: &str) { + pub(super) async fn remove_chain(&self, conn: &mut AsyncPgConnection, chain_name: &str) { match self { Storage::Shared => { use public::eth_call_cache as c; @@ -1722,11 +1791,12 @@ mod data { diesel::delete(b::table.filter(b::network_name.eq(chain_name))) .execute(conn) + .await .expect("Failed to delete ethereum_blocks"); // We don't have a good way to clean out the call cache // per chain; just nuke everything - diesel::delete(c::table).execute(conn).unwrap(); - diesel::delete(m::table).execute(conn).unwrap(); + diesel::delete(c::table).execute(conn).await.unwrap(); + diesel::delete(m::table).execute(conn).await.unwrap(); } Storage::Private(Schema { blocks, @@ -1738,6 +1808,7 @@ mod data { let query = format!("delete from {}", qname); sql_query(query) .execute(conn) + .await .unwrap_or_else(|_| panic!("Failed to delete {}", qname)); } } @@ -1745,9 +1816,9 @@ mod data { } /// Queries the database for all the transaction receipts in a given block. - pub(crate) fn find_transaction_receipts_in_block( + pub(crate) async fn find_transaction_receipts_in_block( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block_hash: H256, ) -> anyhow::Result> { let query = sql_query(format!( @@ -1774,12 +1845,18 @@ from ( // `chain*.blocks` tables, so we must check which one is being queried to bind the // `block_hash` parameter to the correct type match self { - Storage::Shared => query - .bind::(format!("{:x}", block_hash)) - .get_results(conn), - Storage::Private(_) => query - .bind::(block_hash.as_bytes()) - .get_results(conn), + Storage::Shared => { + query + .bind::(format!("{:x}", block_hash)) + .get_results(conn) + .await + } + Storage::Private(_) => { + query + .bind::(block_hash.as_bytes()) + .get_results(conn) + .await + } } }; query_results @@ -1944,65 +2021,78 @@ impl ChainStore { matches!(self.status, ChainStatus::Ingestible) } - fn get_conn(&self) -> Result>, Error> { - self.pool.get().map_err(Error::from) - } - - pub(crate) fn create(&self, ident: &ChainIdentifier) -> Result<(), Error> { + pub(crate) async fn create(&self, ident: &ChainIdentifier) -> Result<(), Error> { use public::ethereum_networks::dsl::*; - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; conn.transaction(|conn| { - insert_into(ethereum_networks) - .values(( - name.eq(&self.chain), - namespace.eq(&self.storage), - head_block_hash.eq::>(None), - head_block_number.eq::>(None), - net_version.eq(&ident.net_version), - genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), - )) - .on_conflict(name) - .do_nothing() - .execute(conn)?; - self.storage.create(conn) - })?; + async move { + insert_into(ethereum_networks) + .values(( + name.eq(&self.chain), + namespace.eq(&self.storage), + head_block_hash.eq::>(None), + head_block_number.eq::>(None), + net_version.eq(&ident.net_version), + genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), + )) + .on_conflict(name) + .do_nothing() + .execute(conn) + .await?; + self.storage.create(conn).await + } + .scope_boxed() + }) + .await?; Ok(()) } - pub fn update_name(&self, name: &str) -> Result<(), Error> { + pub async fn update_name(&self, name: &str) -> Result<(), Error> { use public::ethereum_networks as n; - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; conn.transaction(|conn| { - update(n::table.filter(n::name.eq(&self.chain))) - .set(n::name.eq(name)) - .execute(conn)?; - Ok(()) + async { + update(n::table.filter(n::name.eq(&self.chain))) + .set(n::name.eq(name)) + .execute(conn) + .await?; + Ok(()) + } + .scope_boxed() }) + .await } - pub(crate) fn drop_chain(&self) -> Result<(), Error> { + pub(crate) async fn drop_chain(&self) -> Result<(), Error> { use diesel::dsl::delete; use public::ethereum_networks as n; - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; conn.transaction(|conn| { - self.storage.drop_storage(conn, &self.chain)?; + async { + self.storage.drop_storage(conn, &self.chain).await?; - delete(n::table.filter(n::name.eq(&self.chain))).execute(conn)?; - Ok(()) + delete(n::table.filter(n::name.eq(&self.chain))) + .execute(conn) + .await?; + Ok(()) + } + .scope_boxed() }) + .await } - pub fn chain_head_pointers( - conn: &mut PgConnection, + pub async fn chain_head_pointers( + conn: &mut AsyncPgConnection, ) -> Result, StoreError> { use public::ethereum_networks as n; let pointers: Vec<(String, BlockPtr)> = n::table .select((n::name, n::head_block_hash, n::head_block_number)) - .load::<(String, Option, Option)>(conn)? + .load::<(String, Option, Option)>(conn) + .await? .into_iter() .filter_map(|(name, hash, number)| match (hash, number) { (Some(hash), Some(number)) => Some((name, hash, number)), @@ -2015,13 +2105,14 @@ impl ChainStore { Ok(HashMap::from_iter(pointers)) } - pub fn chain_head_block(&self, chain: &str) -> Result, StoreError> { + pub async fn chain_head_block(&self, chain: &str) -> Result, StoreError> { use public::ethereum_networks as n; let number: Option = n::table .filter(n::name.eq(chain)) .select(n::head_block_number) - .first::>(&mut self.get_conn()?) + .first::>(&mut self.pool.get().await?) + .await .optional()? .flatten(); @@ -2037,24 +2128,28 @@ impl ChainStore { ) } - pub(crate) fn set_chain_identifier(&self, ident: &ChainIdentifier) -> Result<(), Error> { + pub(crate) async fn set_chain_identifier(&self, ident: &ChainIdentifier) -> Result<(), Error> { use public::ethereum_networks as n; - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; diesel::update(n::table.filter(n::name.eq(&self.chain))) .set(( n::genesis_block_hash.eq(ident.genesis_block_hash.hash_hex()), n::net_version.eq(&ident.net_version), )) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; Ok(()) } #[cfg(debug_assertions)] - pub fn set_chain_identifier_for_tests(&self, ident: &ChainIdentifier) -> Result<(), Error> { - self.set_chain_identifier(ident) + pub async fn set_chain_identifier_for_tests( + &self, + ident: &ChainIdentifier, + ) -> Result<(), Error> { + self.set_chain_identifier(ident).await } /// Store the given chain as the blocks for the `network` set the @@ -2066,9 +2161,13 @@ impl ChainStore { genesis_hash: &str, chain: Vec>, ) -> Vec<(BlockPtr, BlockHash)> { - let mut conn = self.pool.get().expect("can get a database connection"); + let mut conn = self + .pool + .get() + .await + .expect("can get a database connection"); - self.storage.remove_chain(&mut conn, &self.chain); + self.storage.remove_chain(&mut conn, &self.chain).await; self.recent_blocks_cache.clear(); for block in chain { @@ -2079,6 +2178,7 @@ impl ChainStore { net_version: "0".to_string(), genesis_block_hash: BlockHash::try_from(genesis_hash).expect("valid block hash"), }) + .await .expect("unable to set chain identifier"); use public::ethereum_networks as n; @@ -2089,32 +2189,35 @@ impl ChainStore { n::head_block_number.eq::>(None), )) .execute(&mut conn) + .await .unwrap(); self.recent_blocks_cache.blocks() } - pub fn delete_blocks(&self, block_hashes: &[&H256]) -> Result { - let mut conn = self.get_conn()?; + pub async fn delete_blocks(&self, block_hashes: &[&H256]) -> Result { + let mut conn = self.pool.get().await?; self.storage .delete_blocks_by_hash(&mut conn, &self.chain, block_hashes) + .await } - pub fn cleanup_shallow_blocks(&self, lowest_block: i32) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; + pub async fn cleanup_shallow_blocks(&self, lowest_block: i32) -> Result<(), StoreError> { + let mut conn = self.pool.get().await?; self.storage - .cleanup_shallow_blocks(&mut conn, lowest_block)?; + .cleanup_shallow_blocks(&mut conn, lowest_block) + .await?; Ok(()) } // remove_cursor delete the chain_store cursor and return true if it was present - pub fn remove_cursor(&self, chain: &str) -> Result, StoreError> { - let mut conn = self.get_conn()?; - self.storage.remove_cursor(&mut conn, chain) + pub async fn remove_cursor(&self, chain: &str) -> Result, StoreError> { + let mut conn = self.pool.get().await?; + self.storage.remove_cursor(&mut conn, chain).await } - pub fn truncate_block_cache(&self) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - self.storage.truncate_block_cache(&mut conn)?; + pub async fn truncate_block_cache(&self) -> Result<(), StoreError> { + let mut conn = self.pool.get().await?; + self.storage.truncate_block_cache(&mut conn).await?; Ok(()) } @@ -2122,16 +2225,8 @@ impl ChainStore { self: &Arc, hashes: Vec, ) -> Result, StoreError> { - let store = self.cheap_clone(); - let pool = self.pool.clone(); - let values = pool - .with_conn(move |conn, _| { - store - .storage - .blocks(conn, &store.chain, &hashes) - .map_err(CancelableError::from) - }) - .await?; + let mut conn = self.pool.get().await?; + let values = self.storage.blocks(&mut conn, &self.chain, &hashes).await?; Ok(values) } @@ -2139,16 +2234,10 @@ impl ChainStore { self: &Arc, numbers: Vec, ) -> Result>, StoreError> { - let store = self.cheap_clone(); - let pool = self.pool.clone(); - - let values = pool - .with_conn(move |conn, _| { - store - .storage - .block_ptrs_by_numbers(conn, &store.chain, &numbers) - .map_err(CancelableError::from) - }) + let mut conn = self.pool.get().await?; + let values = self + .storage + .block_ptrs_by_numbers(&mut conn, &self.chain, &numbers) .await?; let mut block_map = BTreeMap::new(); @@ -2163,6 +2252,67 @@ impl ChainStore { Ok(block_map) } + + async fn attempt_chain_head_update_inner( + &self, + ancestor_count: BlockNumber, + ) -> Result<(Option, Option<(String, i64)>), StoreError> { + use public::ethereum_networks as n; + + let genesis_block_ptr = self.genesis_block_ptr().await?.hash_as_h256(); + + let mut conn = self.pool.get().await?; + let candidate = self + .storage + .chain_head_candidate(&mut conn, &self.chain) + .await?; + let (ptr, first_block) = match &candidate { + None => return Ok((None, None)), + Some(ptr) => (ptr, 0.max(ptr.number.saturating_sub(ancestor_count))), + }; + + match self + .storage + .missing_parent( + &mut conn, + &self.chain, + first_block as i64, + ptr.hash_as_h256(), + genesis_block_ptr, + ) + .await? + { + Some(missing) => { + return Ok((Some(missing), None)); + } + None => { /* we have a complete chain, no missing parents */ } + } + + let hash = ptr.hash_hex(); + let number = ptr.number as i64; + conn.transaction::<(Option, Option<(String, i64)>), StoreError, _>(|conn| { + async move { + update(n::table.filter(n::name.eq(&self.chain))) + .set(( + n::head_block_hash.eq(&hash), + n::head_block_number.eq(number), + )) + .execute(conn) + .await?; + Ok((None, Some((hash, number)))) + } + .scope_boxed() + }) + .await + } + + /// Helper for tests that need to directly modify the tables for the + /// chain store + #[cfg(debug_assertions)] + pub async fn get_conn_for_test(&self) -> Result { + let conn = self.pool.get().await?; + Ok(conn) + } } fn json_block_to_block_ptr_ext(json_block: &JsonBlock) -> Result { @@ -2186,46 +2336,44 @@ impl ChainHeadStore for ChainStore { async fn chain_head_ptr(self: Arc) -> Result, Error> { use public::ethereum_networks::dsl::*; - Ok(self - .cheap_clone() - .pool - .with_conn(move |conn, _| { - ethereum_networks - .select((head_block_hash, head_block_number)) - .filter(name.eq(&self.chain)) - .load::<(Option, Option)>(conn) - .map(|rows| { - rows.first() - .map(|(hash_opt, number_opt)| match (hash_opt, number_opt) { - (Some(hash), Some(number)) => Some( - ( - // FIXME: - // - // workaround for arweave - H256::from_slice(&hex::decode(hash).unwrap()[..32]), - *number, - ) - .into(), - ), - (None, None) => None, - _ => unreachable!(), - }) - .and_then(|opt: Option| opt) + let mut conn = self.pool.get().await?; + Ok(ethereum_networks + .select((head_block_hash, head_block_number)) + .filter(name.eq(&self.chain)) + .load::<(Option, Option)>(&mut conn) + .await + .map(|rows| { + rows.as_slice() + .first() + .map(|(hash_opt, number_opt)| match (hash_opt, number_opt) { + (Some(hash), Some(number)) => Some( + ( + // FIXME: + // + // workaround for arweave + H256::from_slice(&hex::decode(hash).unwrap()[..32]), + *number, + ) + .into(), + ), + (None, None) => None, + _ => unreachable!(), }) - .map_err(|e| CancelableError::from(StoreError::from(e))) - }) - .await?) + .and_then(|opt: Option| opt) + })?) } - fn chain_head_cursor(&self) -> Result, Error> { + async fn chain_head_cursor(&self) -> Result, Error> { use public::ethereum_networks::dsl::*; ethereum_networks .select(head_block_cursor) .filter(name.eq(&self.chain)) - .load::>(&mut self.get_conn()?) + .load::>(&mut self.pool.get().await?) + .await .map(|rows| { - rows.first() + rows.as_slice() + .first() .map(|cursor_opt| cursor_opt.as_ref().cloned()) .and_then(|opt| opt) }) @@ -2239,22 +2387,19 @@ impl ChainHeadStore for ChainStore { ) -> Result<(), Error> { use public::ethereum_networks as n; - let pool = self.pool.clone(); - let network = self.chain.clone(); - let storage = self.storage.clone(); - let ptr = block.ptr(); let hash = ptr.hash_hex(); let number = ptr.number as i64; //block height //this will send an update via postgres, channel: chain_head_updates - self.chain_head_update_sender.send(&hash, number)?; + self.chain_head_update_sender.send(&hash, number).await?; - pool.with_conn(move |conn, _| { - conn.transaction(|conn| -> Result<(), StoreError> { - storage - .upsert_block(conn, &network, block.as_ref(), true) - .map_err(CancelableError::from)?; + let mut conn = self.pool.get().await?; + conn.transaction(|conn| { + async { + self.storage + .upsert_block(conn, &self.chain, block.as_ref(), true) + .await?; update(n::table.filter(n::name.eq(&self.chain))) .set(( @@ -2262,22 +2407,22 @@ impl ChainHeadStore for ChainStore { n::head_block_number.eq(number), n::head_block_cursor.eq(cursor), )) - .execute(conn)?; + .execute(conn) + .await?; - Ok(()) - }) - .map_err(CancelableError::from) + Ok::<(), StoreError>(()) + } + .scope_boxed() }) .await?; - Ok(()) } } #[async_trait] impl ChainStoreTrait for ChainStore { - fn genesis_block_ptr(&self) -> Result { - let ident = self.chain_identifier()?; + async fn genesis_block_ptr(&self) -> Result { + let ident = self.chain_identifier().await?; Ok(BlockPtr { hash: ident.genesis_block_hash, @@ -2292,25 +2437,22 @@ impl ChainStoreTrait for ChainStore { self.recent_blocks_cache.insert_block(block); } - let pool = self.pool.clone(); - let network = self.chain.clone(); - let storage = self.storage.clone(); - pool.with_conn(move |conn, _| { - conn.transaction(|conn| { - storage - .upsert_block(conn, &network, block.as_ref(), true) - .map_err(CancelableError::from) - }) + let mut conn = self.pool.get().await?; + conn.transaction(|conn| { + self.storage + .upsert_block(conn, &self.chain, block.as_ref(), true) + .scope_boxed() }) .await .map_err(Error::from) } - fn upsert_light_blocks(&self, blocks: &[&dyn Block]) -> Result<(), Error> { - let mut conn = self.pool.get()?; + async fn upsert_light_blocks(&self, blocks: &[&dyn Block]) -> Result<(), Error> { + let mut conn = self.pool.get().await?; for block in blocks { self.storage - .upsert_block(&mut conn, &self.chain, *block, false)?; + .upsert_block(&mut conn, &self.chain, *block, false) + .await?; } Ok(()) } @@ -2319,59 +2461,10 @@ impl ChainStoreTrait for ChainStore { self: Arc, ancestor_count: BlockNumber, ) -> Result, Error> { - use public::ethereum_networks as n; - - let (missing, ptr) = { - let chain_store = self.clone(); - let genesis_block_ptr = self.genesis_block_ptr()?.hash_as_h256(); - self.pool - .with_conn(move |conn, _| { - let candidate = chain_store - .storage - .chain_head_candidate(conn, &chain_store.chain) - .map_err(CancelableError::from)?; - let (ptr, first_block) = match &candidate { - None => return Ok((None, None)), - Some(ptr) => (ptr, 0.max(ptr.number.saturating_sub(ancestor_count))), - }; + let (missing, ptr) = self.attempt_chain_head_update_inner(ancestor_count).await?; - match chain_store - .storage - .missing_parent( - conn, - &chain_store.chain, - first_block as i64, - ptr.hash_as_h256(), - genesis_block_ptr, - ) - .map_err(CancelableError::from)? - { - Some(missing) => { - return Ok((Some(missing), None)); - } - None => { /* we have a complete chain, no missing parents */ } - } - - let hash = ptr.hash_hex(); - let number = ptr.number as i64; - - conn.transaction( - |conn| -> Result<(Option, Option<(String, i64)>), StoreError> { - update(n::table.filter(n::name.eq(&chain_store.chain))) - .set(( - n::head_block_hash.eq(&hash), - n::head_block_number.eq(number), - )) - .execute(conn)?; - Ok((None, Some((hash, number)))) - }, - ) - .map_err(CancelableError::from) - }) - .await? - }; if let Some((hash, number)) = ptr { - self.chain_head_update_sender.send(&hash, number)?; + self.chain_head_update_sender.send(&hash, number).await?; } Ok(missing) @@ -2551,22 +2644,13 @@ impl ChainStoreTrait for ChainStore { return Ok(Some((data, ptr))); } - let block_ptr_clone = block_ptr.clone(); - let chain_store = self.cheap_clone(); - - self.pool - .with_conn(move |conn, _| { - chain_store - .storage - .ancestor_block(conn, block_ptr_clone, offset, root) - .map_err(StoreError::from) - .map_err(CancelableError::from) - }) + let mut conn = self.pool.get().await?; + self.storage + .ancestor_block(&mut conn, block_ptr, offset, root) .await - .map_err(Into::into) } - fn cleanup_cached_blocks( + async fn cleanup_cached_blocks( &self, ancestor_count: BlockNumber, ) -> Result, Error> { @@ -2597,7 +2681,7 @@ impl ChainStoreTrait for ChainStore { // // See 8b6ad0c64e244023ac20ced7897fe666 - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; let query = " select coalesce( least(a.block, @@ -2615,58 +2699,61 @@ impl ChainStoreTrait for ChainStore { and a.id = d.id and not d.failed and ds.network = $2) a;"; - diesel::sql_query(query) + let Some(block) = diesel::sql_query(query) .bind::(ancestor_count) .bind::(&self.chain) - .load::(&mut conn)? + .load::(&mut conn) + .await? + .as_slice() .first() - .map(|MinBlock { block }| { - // If we could not determine a minimum block, the query - // returns -1, and we should not do anything. We also guard - // against removing the genesis block - if *block > 0 { - self.storage - .delete_blocks_before(&mut conn, &self.chain, *block as i64) - .map(|rows| Some((*block, rows))) - } else { - Ok(None) - } - }) - .unwrap_or(Ok(None)) - .map_err(Into::into) + .map(|MinBlock { block }| *block) + else { + return Ok(None); + }; + // If we could not determine a minimum block, the query + // returns -1, and we should not do anything. We also guard + // against removing the genesis block + if block > 0 { + self.storage + .delete_blocks_before(&mut conn, &self.chain, block as i64) + .await + .map(|rows| Some((block, rows))) + } else { + Ok(None) + } } - fn block_hashes_by_block_number(&self, number: BlockNumber) -> Result, Error> { - let mut conn = self.get_conn()?; + async fn block_hashes_by_block_number( + &self, + number: BlockNumber, + ) -> Result, Error> { + let mut conn = self.pool.get().await?; self.storage .block_hashes_by_block_number(&mut conn, &self.chain, number) + .await } - fn confirm_block_hash(&self, number: BlockNumber, hash: &BlockHash) -> Result { - let mut conn = self.get_conn()?; + async fn confirm_block_hash( + &self, + number: BlockNumber, + hash: &BlockHash, + ) -> Result { + let mut conn = self.pool.get().await?; self.storage .confirm_block_hash(&mut conn, &self.chain, number, hash) + .await } async fn block_number( &self, hash: &BlockHash, ) -> Result, Option)>, StoreError> { - let hash = hash.clone(); - let storage = self.storage.clone(); - let chain = self.chain.clone(); - self.pool - .with_conn(move |conn, _| { - storage - .block_number(conn, &hash) - .map(|opt| { - opt.map(|(number, timestamp, parent_hash)| { - (chain.clone(), number, timestamp, parent_hash) - }) - }) - .map_err(|e| e.into()) + let mut conn = self.pool.get().await?; + self.storage.block_number(&mut conn, hash).await.map(|opt| { + opt.map(|(number, timestamp, parent_hash)| { + (self.chain.clone(), number, timestamp, parent_hash) }) - .await + }) } async fn block_numbers( @@ -2677,20 +2764,18 @@ impl ChainStoreTrait for ChainStore { return Ok(HashMap::new()); } - let storage = self.storage.clone(); - self.pool - .with_conn(move |conn, _| { - storage - .block_numbers(conn, hashes.as_slice()) - .map_err(|e| e.into()) - }) + let mut conn = self.pool.get().await?; + self.storage + .block_numbers(&mut conn, hashes.as_slice()) .await } async fn clear_call_cache(&self, from: BlockNumber, to: BlockNumber) -> Result<(), Error> { - let mut conn = self.get_conn()?; - if let Some(head) = self.chain_head_block(&self.chain)? { - self.storage.clear_call_cache(&mut conn, head, from, to)?; + let mut conn = self.pool.get().await?; + if let Some(head) = self.chain_head_block(&self.chain).await? { + self.storage + .clear_call_cache(&mut conn, head, from, to) + .await?; } Ok(()) } @@ -2700,33 +2785,31 @@ impl ChainStoreTrait for ChainStore { ttl_days: i32, ttl_max_contracts: Option, ) -> Result<(), Error> { - let conn = &mut *self.get_conn()?; + let conn = &mut self.pool.get().await?; self.storage .clear_stale_call_cache(conn, &self.logger, ttl_days, ttl_max_contracts) + .await } async fn transaction_receipts_in_block( &self, block_hash: &H256, ) -> Result, StoreError> { - let pool = self.pool.clone(); - let storage = self.storage.clone(); - let block_hash = *block_hash; - pool.with_conn(move |conn, _| { - storage - .find_transaction_receipts_in_block(conn, block_hash) - .map_err(|e| StoreError::from(e).into()) - }) - .await + let mut conn = self.pool.get().await?; + self.storage + .find_transaction_receipts_in_block(&mut conn, *block_hash) + .await + .map_err(StoreError::from) } - fn chain_identifier(&self) -> Result { - let mut conn = self.pool.get()?; + async fn chain_identifier(&self) -> Result { + let mut conn = self.pool.get().await?; use public::ethereum_networks as n; let (genesis_block_hash, net_version) = n::table .select((n::genesis_block_hash, n::net_version)) .filter(n::name.eq(&self.chain)) - .get_result::<(BlockHash, String)>(&mut conn)?; + .get_result::<(BlockHash, String)>(&mut conn) + .await?; Ok(ChainIdentifier { net_version, @@ -2954,34 +3037,41 @@ fn try_parse_timestamp(ts: Option) -> Result, StoreError> { .map(Some) } +#[async_trait] impl EthereumCallCache for ChainStore { - fn get_call( + async fn get_call( &self, req: &call::Request, block: BlockPtr, ) -> Result, Error> { let id = contract_call_id(req, &block); - let conn = &mut *self.get_conn()?; - let return_value = conn.transaction::<_, Error, _>(|conn| { - if let Some((return_value, update_accessed_at)) = - self.storage.get_call_and_access(conn, id.as_ref())? - { - if update_accessed_at { - self.storage - .update_accessed_at(conn, req.address.as_ref())?; + let conn = &mut self.pool.get().await?; + let return_value = conn + .transaction::<_, Error, _>(|conn| { + async { + if let Some((return_value, update_accessed_at)) = + self.storage.get_call_and_access(conn, id.as_ref()).await? + { + if update_accessed_at { + self.storage + .update_accessed_at(conn, req.address.as_ref()) + .await?; + } + Ok(Some(return_value)) + } else { + Ok(None) + } } - Ok(Some(return_value)) - } else { - Ok(None) - } - })?; + .scope_boxed() + }) + .await?; Ok(return_value.map(|return_value| { req.cheap_clone() .response(call::Retval::Value(return_value), call::Source::Store) })) } - fn get_calls( + async fn get_calls( &self, reqs: &[call::Request], block: BlockPtr, @@ -2996,9 +3086,14 @@ impl EthereumCallCache for ChainStore { .collect(); let id_refs: Vec<_> = ids.iter().map(|id| id.as_slice()).collect(); - let conn = &mut *self.get_conn()?; + let conn = &mut self.pool.get().await?; let rows = conn - .transaction::<_, Error, _>(|conn| self.storage.get_calls_and_access(conn, &id_refs))?; + .transaction::<_, Error, _>(|conn| { + self.storage + .get_calls_and_access(conn, &id_refs) + .scope_boxed() + }) + .await?; let mut found: Vec = Vec::new(); let mut resps = Vec::new(); @@ -3024,13 +3119,16 @@ impl EthereumCallCache for ChainStore { Ok((resps, calls)) } - fn get_calls_in_block(&self, block: BlockPtr) -> Result, Error> { - let conn = &mut *self.get_conn()?; - conn.transaction::<_, Error, _>(|conn| self.storage.get_calls_in_block(conn, block)) + async fn get_calls_in_block(&self, block: BlockPtr) -> Result, Error> { + let conn = &mut self.pool.get().await?; + conn.transaction::<_, Error, _>(|conn| { + self.storage.get_calls_in_block(conn, block).scope_boxed() + }) + .await } - fn set_call( - &self, + async fn set_call( + self: Arc, _: &Logger, call: call::Request, block: BlockPtr, @@ -3051,16 +3149,20 @@ impl EthereumCallCache for ChainStore { }; let id = contract_call_id(&call, &block); - let conn = &mut *self.get_conn()?; - conn.transaction(|conn| { - self.storage.set_call( - conn, - id.as_ref(), - call.address.as_ref(), - block.number, - &return_value, - ) + let conn = &mut self.pool.get().await?; + conn.transaction::<_, anyhow::Error, _>(|conn| { + self.storage + .set_call( + conn, + id.as_ref(), + call.address.as_ref(), + block.number, + &return_value, + ) + .scope_boxed() }) + .await?; + Ok(()) } } diff --git a/store/postgres/src/copy.rs b/store/postgres/src/copy.rs index 9a8b4fd4328..80830b3e61b 100644 --- a/store/postgres/src/copy.rs +++ b/store/postgres/src/copy.rs @@ -23,22 +23,27 @@ use std::{ }; use diesel::{ - connection::SimpleConnection as _, - dsl::sql, - insert_into, - r2d2::{ConnectionManager, PooledConnection}, - select, sql_query, update, Connection as _, ExpressionMethods, OptionalExtension, PgConnection, - QueryDsl, RunQueryDsl, + dsl::sql, insert_into, select, sql_query, update, ExpressionMethods, OptionalExtension, + QueryDsl, }; +use diesel_async::{ + scoped_futures::{ScopedBoxFuture, ScopedFutureExt}, + AsyncConnection, +}; +use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; +use tokio; + use graph::{ - futures03::{future::select_all, FutureExt as _}, + futures03::{ + future::{select_all, BoxFuture}, + FutureExt as _, + }, internal_error, prelude::{ info, lazy_static, o, warn, BlockNumber, BlockPtr, CheapClone, Logger, StoreError, ENV_VARS, }, schema::EntityType, slog::error, - tokio, }; use itertools::Itertools; @@ -49,7 +54,7 @@ use crate::{ relational::{index::IndexList, Layout, Table}, relational_queries as rq, vid_batcher::{VidBatcher, VidRange}, - ConnectionPool, + AsyncPgConnection, ConnectionPool, }; const LOG_INTERVAL: Duration = Duration::from_secs(3 * 60); @@ -117,8 +122,8 @@ struct CopyState { } impl CopyState { - fn new( - conn: &mut PgConnection, + async fn new( + conn: &mut AsyncPgConnection, primary: Primary, src: Arc, dst: Arc, @@ -128,13 +133,14 @@ impl CopyState { let crosses_shards = dst.site.shard != src.site.shard; if crosses_shards { - src.import_schema(conn)?; + src.import_schema(conn).await?; } let state = match cs::table .filter(cs::dst.eq(dst.site.id)) .select((cs::src, cs::target_block_hash, cs::target_block_number)) .first::<(DeploymentId, Vec, BlockNumber)>(conn) + .await .optional()? { Some((src_id, hash, number)) => { @@ -158,22 +164,22 @@ impl CopyState { src.site.id )); } - Self::load(conn, primary, src, dst, target_block) + Self::load(conn, primary, src, dst, target_block).await } - None => Self::create(conn, primary.cheap_clone(), src, dst, target_block), + None => Self::create(conn, primary.cheap_clone(), src, dst, target_block).await, }?; Ok(state) } - fn load( - conn: &mut PgConnection, + async fn load( + conn: &mut AsyncPgConnection, primary: Primary, src: Arc, dst: Arc, target_block: BlockPtr, ) -> Result { - let tables = TableState::load(conn, primary, src.as_ref(), dst.as_ref())?; + let tables = TableState::load(conn, primary, src.as_ref(), dst.as_ref()).await?; let (finished, mut unfinished): (Vec<_>, Vec<_>) = tables.into_iter().partition(|table| table.finished()); unfinished.sort_by_key(|table| table.dst.object.to_string()); @@ -186,8 +192,8 @@ impl CopyState { }) } - fn create( - conn: &mut PgConnection, + async fn create( + conn: &mut AsyncPgConnection, primary: Primary, src: Arc, dst: Arc, @@ -203,27 +209,26 @@ impl CopyState { cs::target_block_hash.eq(target_block.hash_slice()), cs::target_block_number.eq(target_block.number), )) - .execute(conn)?; - - let mut unfinished: Vec<_> = dst - .tables - .values() - .filter_map(|dst_table| { - src.table_for_entity(&dst_table.object) - .ok() - .map(|src_table| { - TableState::init( - conn, - primary.cheap_clone(), - dst.site.clone(), - &src, - src_table.clone(), - dst_table.clone(), - &target_block, - ) - }) - }) - .collect::>()?; + .execute(conn) + .await?; + + let mut unfinished = Vec::new(); + for dst_table in dst.tables.values() { + if let Some(src_table) = src.table_for_entity(&dst_table.object).ok() { + unfinished.push( + TableState::init( + conn, + primary.cheap_clone(), + dst.site.clone(), + &src, + src_table.clone(), + dst_table.clone(), + &target_block, + ) + .await?, + ); + } + } unfinished.sort_by_key(|table| table.dst.object.to_string()); let values = unfinished @@ -238,7 +243,7 @@ impl CopyState { ) }) .collect::>(); - insert_into(cts::table).values(values).execute(conn)?; + insert_into(cts::table).values(values).execute(conn).await?; Ok(CopyState { src, @@ -253,12 +258,13 @@ impl CopyState { self.dst.site.shard != self.src.site.shard } - fn finished(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + async fn finished(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { use copy_state as cs; update(cs::table.filter(cs::dst.eq(self.dst.site.id))) .set(cs::finished_at.eq(sql("now()"))) - .execute(conn)?; + .execute(conn) + .await?; // If we imported the schema for `src`, and no other in-progress // copy is using it, get rid of it again @@ -268,13 +274,14 @@ impl CopyState { .filter(cs::src.eq(self.src.site.id)) .filter(cs::finished_at.is_null()), )) - .get_result::(conn)?; + .get_result::(conn) + .await?; if !has_active_copies { // This is a foreign schema that nobody is using anymore, // get rid of it. As a safety check (on top of the one that // drop_foreign_schema does), see that we do not have // metadata for `src` - if crate::deployment::exists(conn, &self.src.site)? { + if crate::deployment::exists(conn, &self.src.site).await? { return Err(internal_error!( "we think we are copying {}[{}] across shards from {} to {}, but the \ source subgraph is actually in this shard", @@ -284,7 +291,7 @@ impl CopyState { self.dst.site.shard )); } - crate::catalog::drop_foreign_schema(conn, self.src.site.as_ref())?; + crate::catalog::drop_foreign_schema(conn, self.src.site.as_ref()).await?; } } Ok(()) @@ -295,8 +302,8 @@ impl CopyState { } } -pub(crate) fn source( - conn: &mut PgConnection, +pub(crate) async fn source( + conn: &mut AsyncPgConnection, dst: &Site, ) -> Result, StoreError> { use copy_state as cs; @@ -305,6 +312,7 @@ pub(crate) fn source( .filter(cs::dst.eq(dst.id)) .select(cs::src) .get_result::(conn) + .await .optional() .map_err(StoreError::from) } @@ -325,8 +333,8 @@ struct TableState { } impl TableState { - fn init( - conn: &mut PgConnection, + async fn init( + conn: &mut AsyncPgConnection, primary: Primary, dst_site: Arc, src_layout: &Layout, @@ -334,8 +342,9 @@ impl TableState { dst: Arc
, target_block: &BlockPtr, ) -> Result { - let vid_range = VidRange::for_copy(conn, &src, target_block)?; - let batcher = VidBatcher::load(conn, &src_layout.site.namespace, src.as_ref(), vid_range)?; + let vid_range = VidRange::for_copy(conn, &src, target_block).await?; + let batcher = + VidBatcher::load(conn, &src_layout.site.namespace, src.as_ref(), vid_range).await?; Ok(Self { primary, src, @@ -350,8 +359,8 @@ impl TableState { self.batcher.finished() } - fn load( - conn: &mut PgConnection, + async fn load( + conn: &mut AsyncPgConnection, primary: Primary, src_layout: &Layout, dst_layout: &Layout, @@ -380,7 +389,8 @@ impl TableState { .map(|table| table.clone()) } - cts::table + let mut states = Vec::new(); + for (id, entity_type, current_vid, target_vid, size, duration_ms) in cts::table .filter(cts::dst.eq(dst_layout.site.id)) .select(( cts::id, @@ -391,50 +401,44 @@ impl TableState { cts::duration_ms, )) .order_by(cts::entity_type) - .load::<(i32, String, i64, i64, i64, i64)>(conn)? + .load::<(i32, String, i64, i64, i64, i64)>(conn) + .await? .into_iter() - .map( - |(id, entity_type, current_vid, target_vid, size, duration_ms)| { - let entity_type = src_layout.input_schema.entity_type(&entity_type)?; - let src = - resolve_entity(src_layout, "source", &entity_type, dst_layout.site.id, id); - let dst = resolve_entity( - dst_layout, - "destination", - &entity_type, - dst_layout.site.id, - id, - ); - match (src, dst) { - (Ok(src), Ok(dst)) => { - let batcher = VidBatcher::load( - conn, - &src_layout.site.namespace, - &src, - VidRange::new(current_vid, target_vid), - )? - .with_batch_size(size as usize); - - Ok(TableState { - primary: primary.cheap_clone(), - src, - dst, - dst_site: dst_layout.site.clone(), - batcher, - duration_ms, - }) - } - (Err(e), _) => Err(e), - (_, Err(e)) => Err(e), - } - }, + { + let entity_type = src_layout.input_schema.entity_type(&entity_type)?; + let src = resolve_entity(src_layout, "source", &entity_type, dst_layout.site.id, id)?; + let dst = resolve_entity( + dst_layout, + "destination", + &entity_type, + dst_layout.site.id, + id, + )?; + let batcher = VidBatcher::load( + conn, + &src_layout.site.namespace, + &src, + VidRange::new(current_vid, target_vid), ) - .collect() + .await? + .with_batch_size(size as usize); + + let state = TableState { + primary: primary.cheap_clone(), + src, + dst, + dst_site: dst_layout.site.clone(), + batcher, + duration_ms, + }; + states.push(state); + } + Ok(states) } - fn record_progress( + async fn record_progress( &mut self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, elapsed: Duration, ) -> Result<(), StoreError> { use copy_table_state as cts; @@ -453,7 +457,8 @@ impl TableState { .filter(cts::duration_ms.eq(0)), ) .set(cts::started_at.eq(sql("now()"))) - .execute(conn)?; + .execute(conn) + .await?; let values = ( cts::next_vid.eq(self.batcher.next_vid()), cts::batch_size.eq(self.batcher.batch_size() as i64), @@ -465,11 +470,12 @@ impl TableState { .filter(cts::entity_type.eq(self.dst.object.as_str())), ) .set(values) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - fn record_finished(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + async fn record_finished(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { use copy_table_state as cts; update( @@ -478,46 +484,57 @@ impl TableState { .filter(cts::entity_type.eq(self.dst.object.as_str())), ) .set(cts::finished_at.eq(sql("now()"))) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - fn is_cancelled(&self, conn: &mut PgConnection) -> Result { + async fn is_cancelled(&self, conn: &mut AsyncPgConnection) -> Result { let dst = self.dst_site.as_ref(); - let canceled = self.primary.is_copy_cancelled(dst)?; + let canceled = self.primary.is_copy_cancelled(dst).await?; if canceled { use copy_state as cs; update(cs::table.filter(cs::dst.eq(dst.id))) .set(cs::cancelled_at.eq(sql("now()"))) - .execute(conn)?; + .execute(conn) + .await?; } Ok(canceled) } - fn copy_batch(&mut self, conn: &mut PgConnection) -> Result { - let (duration, count) = self.batcher.step(|start, end| { - let count = rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, start, end)? - .count_current() - .get_result::(conn) - .optional()?; - Ok(count.unwrap_or(0) as i32) - })?; + async fn copy_batch(&mut self, conn: &mut AsyncPgConnection) -> Result { + let (duration, count) = self + .batcher + .step(async |start, end| { + let count = + rq::CopyEntityBatchQuery::new(self.dst.as_ref(), &self.src, start, end)? + .count_current() + .get_result::(conn) + .await + .optional()?; + Ok(count.unwrap_or(0) as i32) + }) + .await?; let count = count.unwrap_or(0); - deployment::update_entity_count(conn, &self.dst_site, count)?; + deployment::update_entity_count(conn, &self.dst_site, count).await?; - self.record_progress(conn, duration)?; + self.record_progress(conn, duration).await?; if self.finished() { - self.record_finished(conn)?; + self.record_finished(conn).await?; } Ok(Status::Finished) } - fn set_batch_size(&mut self, conn: &mut PgConnection, size: usize) -> Result<(), StoreError> { + async fn set_batch_size( + &mut self, + conn: &mut AsyncPgConnection, + size: usize, + ) -> Result<(), StoreError> { use copy_table_state as cts; self.batcher.set_batch_size(size); @@ -528,7 +545,8 @@ impl TableState { .filter(cts::entity_type.eq(self.dst.object.as_str())), ) .set(cts::batch_size.eq(self.batcher.batch_size() as i64)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } @@ -615,7 +633,8 @@ impl CopyProgress { last_log }); if last_log.elapsed() > LOG_INTERVAL { - let total_current_vid = self.current_vid.load(Ordering::SeqCst) + batcher.next_vid(); + let total_current_vid = + AtomicI64::load(&self.current_vid, Ordering::SeqCst) + batcher.next_vid(); info!( self.logger, "Copied {:.2}% of `{}` entities ({}/{} entity versions), {:.2}% of overall data", @@ -646,7 +665,7 @@ impl CopyProgress { } fn is_cancelled(&self) -> bool { - self.cancelled.load(Ordering::SeqCst) + AtomicBool::load(&self.cancelled, Ordering::SeqCst) } } @@ -674,26 +693,18 @@ impl From> for WorkerResult { /// This struct helps us with that. It wraps a connection and tracks whether /// the connection was used to acquire the copy lock struct LockTrackingConnection { - inner: PooledConnection>, + inner: AsyncPgConnection, has_lock: bool, } impl LockTrackingConnection { - fn new(inner: PooledConnection>) -> Self { + fn new(inner: AsyncPgConnection) -> Self { Self { inner, has_lock: false, } } - fn transaction(&mut self, f: F) -> Result - where - F: FnOnce(&mut PgConnection) -> Result, - { - let conn = &mut self.inner; - conn.transaction(|conn| f(conn)) - } - /// Put `self` into `other` if `self` has the lock. fn extract(self, other: &mut Option) { if self.has_lock { @@ -701,17 +712,17 @@ impl LockTrackingConnection { } } - fn lock(&mut self, logger: &Logger, dst: &Site) -> Result<(), StoreError> { + async fn lock(&mut self, logger: &Logger, dst: &Site) -> Result<(), StoreError> { if self.has_lock { warn!(logger, "already acquired copy lock for {}", dst); return Ok(()); } - advisory_lock::lock_copying(&mut self.inner, dst)?; + advisory_lock::lock_copying(&mut self.inner, dst).await?; self.has_lock = true; Ok(()) } - fn unlock(&mut self, logger: &Logger, dst: &Site) -> Result<(), StoreError> { + async fn unlock(&mut self, logger: &Logger, dst: &Site) -> Result<(), StoreError> { if !self.has_lock { error!( logger, @@ -719,7 +730,7 @@ impl LockTrackingConnection { ); return Ok(()); } - advisory_lock::unlock_copying(&mut self.inner, dst)?; + advisory_lock::unlock_copying(&mut self.inner, dst).await?; self.has_lock = false; Ok(()) } @@ -746,7 +757,7 @@ impl CopyTableWorker { async fn run(mut self, logger: Logger, progress: Arc) -> WorkerResult { let object = self.table.dst.object.cheap_clone(); graph::spawn_blocking_allow_panic(move || { - self.result = self.run_inner(logger, &progress); + self.result = graph::block_on(self.run_inner(logger, &progress)); self }) .await @@ -754,7 +765,11 @@ impl CopyTableWorker { .into() } - fn run_inner(&mut self, logger: Logger, progress: &CopyProgress) -> Result { + async fn run_inner( + &mut self, + logger: Logger, + progress: &CopyProgress, + ) -> Result { use Status::*; let conn = &mut self.conn.inner; @@ -763,14 +778,14 @@ impl CopyTableWorker { // It is important that this check happens outside the write // transaction so that we do not hold on to locks acquired // by the check - if self.table.is_cancelled(conn)? || progress.is_cancelled() { + if self.table.is_cancelled(conn).await? || progress.is_cancelled() { progress.cancel(); return Ok(Cancelled); } // Pause copying if replication is lagging behind to avoid // overloading replicas - let mut lag = catalog::replication_lag(conn)?; + let mut lag = catalog::replication_lag(conn).await?; if lag > MAX_REPLICATION_LAG { loop { info!(logger, @@ -778,7 +793,7 @@ impl CopyTableWorker { REPLICATION_SLEEP.as_secs(); "lag_s" => lag.as_secs()); std::thread::sleep(REPLICATION_SLEEP); - lag = catalog::replication_lag(conn)?; + lag = catalog::replication_lag(conn).await?; if lag <= ACCEPTABLE_REPLICATION_LAG { break; } @@ -791,12 +806,18 @@ impl CopyTableWorker { break Cancelled; } - match conn.transaction(|conn| { - if let Some(timeout) = BATCH_STATEMENT_TIMEOUT.as_ref() { - conn.batch_execute(timeout)?; - } - self.table.copy_batch(conn) - }) { + match conn + .transaction(|conn| { + async { + if let Some(timeout) = BATCH_STATEMENT_TIMEOUT.as_ref() { + conn.batch_execute(timeout).await?; + } + self.table.copy_batch(conn).await + } + .scope_boxed() + }) + .await + { Ok(status) => { break status; } @@ -832,7 +853,8 @@ impl CopyTableWorker { // that is hard to predict. This mechanism ensures // that if our estimation is wrong, the consequences // aren't too severe. - conn.transaction(|conn| self.table.set_batch_size(conn, 1))?; + conn.transaction(|conn| self.table.set_batch_size(conn, 1).scope_boxed()) + .await?; } }; @@ -847,6 +869,8 @@ impl CopyTableWorker { } } +type WorkerFuture = Pin + Send>>; + /// A helper to manage the workers that are copying data. Besides the actual /// workers it also keeps a worker that wakes us up periodically to give us /// a chance to create more workers if there are database connections @@ -854,7 +878,7 @@ impl CopyTableWorker { struct Workers { /// The list of workers that are currently running. This will always /// include a future that wakes us up periodically - futures: Vec>>>, + futures: Vec, } impl Workers { @@ -864,7 +888,7 @@ impl Workers { } } - fn add(&mut self, worker: Pin>>) { + fn add(&mut self, worker: WorkerFuture) { self.futures.push(worker); } @@ -887,7 +911,7 @@ impl Workers { result } - fn waker() -> Pin>> { + fn waker() -> WorkerFuture { let sleep = tokio::time::sleep(ENV_VARS.store.batch_target_duration); Box::pin(sleep.map(|()| WorkerResult::Wake)) } @@ -931,7 +955,7 @@ impl Connection { /// will block until it was able to get a fdw connection. The overall /// effect is that new copy requests will not start until a connection /// is available. - pub fn new( + pub async fn new( logger: &Logger, primary: Primary, pool: ConnectionPool, @@ -952,13 +976,15 @@ impl Connection { } let mut last_log = Instant::now(); - let conn = pool.get_fdw(&logger, || { - if last_log.elapsed() > LOG_INTERVAL { - info!(&logger, "waiting for other copy operations to finish"); - last_log = Instant::now(); - } - false - })?; + let conn = pool + .get_fdw(&logger, || { + if last_log.elapsed() > LOG_INTERVAL { + info!(&logger, "waiting for other copy operations to finish"); + last_log = Instant::now(); + } + false + }) + .await?; let src_manifest_idx_and_name = Arc::new(src_manifest_idx_and_name); let dst_manifest_idx_and_name = Arc::new(dst_manifest_idx_and_name); let conn = Some(LockTrackingConnection::new(conn)); @@ -976,34 +1002,53 @@ impl Connection { }) } - fn transaction(&mut self, f: F) -> Result + /// Run `callback` in a transaction using the connection in `self.conn`. + /// This will return an error if `self.conn` is `None`, which happens + /// while a background task is copying a table. + fn transaction<'a, 'conn, R, F>( + &'conn mut self, + callback: F, + ) -> Result>, StoreError> where - F: FnOnce(&mut PgConnection) -> Result, + F: for<'r> FnOnce( + &'r mut AsyncPgConnection, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Send + + 'a, + R: Send + 'a, + 'a: 'conn, { let Some(conn) = self.conn.as_mut() else { return Err(internal_error!( "copy connection has been handed to background task but not returned yet (transaction)" )); }; - conn.transaction(|conn| f(conn)) + let conn = &mut conn.inner; + Ok(conn.transaction(|conn| callback(conn).scope_boxed())) } /// Copy private data sources if the source uses a schema version that /// has a private data sources table. The copying is done in its own /// transaction. - fn copy_private_data_sources(&mut self, state: &CopyState) -> Result<(), StoreError> { + async fn copy_private_data_sources(&mut self, state: &CopyState) -> Result<(), StoreError> { let src_manifest_idx_and_name = self.src_manifest_idx_and_name.cheap_clone(); let dst_manifest_idx_and_name = self.dst_manifest_idx_and_name.cheap_clone(); if state.src.site.schema_version.private_data_sources() { self.transaction(|conn| { - DataSourcesTable::new(state.src.site.namespace.clone()).copy_to( - conn, - &DataSourcesTable::new(state.dst.site.namespace.clone()), - state.target_block.number, - &src_manifest_idx_and_name, - &dst_manifest_idx_and_name, - ) - })?; + async { + DataSourcesTable::new(state.src.site.namespace.clone()) + .copy_to( + conn, + &DataSourcesTable::new(state.dst.site.namespace.clone()), + state.target_block.number, + &src_manifest_idx_and_name, + &dst_manifest_idx_and_name, + ) + .await + } + .scope_boxed() + })? + .await?; } Ok(()) } @@ -1015,7 +1060,7 @@ impl Connection { &mut self, state: &mut CopyState, progress: &Arc, - ) -> Option>>> { + ) -> Option { let Some(conn) = self.conn.take() else { return None; }; @@ -1033,16 +1078,17 @@ impl Connection { /// Opportunistically create an extra worker if we have more tables to /// copy and there are idle fdw connections. If there are no more tables /// or no idle connections, this will return `None`. - fn extra_worker( + async fn extra_worker( &mut self, state: &mut CopyState, progress: &Arc, - ) -> Option>>> { + ) -> Option { // It's important that we get the connection before the table since // we remove the table from the state and could drop it otherwise let Some(conn) = self .pool .try_get_fdw(&self.logger, ENV_VARS.store.batch_worker_wait) + .await else { return None; }; @@ -1105,8 +1151,11 @@ impl Connection { let dst = self.dst.clone(); let target_block = self.target_block.clone(); let primary = self.primary.cheap_clone(); - let mut state = - self.transaction(|conn| CopyState::new(conn, primary, src, dst, target_block))?; + let mut state = self + .transaction(|conn| { + CopyState::new(conn, primary, src, dst, target_block).scope_boxed() + })? + .await?; let progress = Arc::new(CopyProgress::new(self.logger.cheap_clone(), &state)); progress.start(); @@ -1132,7 +1181,7 @@ impl Connection { if workers.len() >= self.workers { break; } - let Some(worker) = self.extra_worker(&mut state, &progress) else { + let Some(worker) = self.extra_worker(&mut state, &progress).await else { break; }; workers.add(worker); @@ -1203,7 +1252,10 @@ impl Connection { for (_, sql) in arr { let query = sql_query(format!("{};", sql)); - self.transaction(|conn| query.execute(conn).map_err(StoreError::from))?; + self.transaction(|conn| { + async { query.execute(conn).await.map_err(StoreError::from) }.scope_boxed() + })? + .await?; } } @@ -1222,13 +1274,17 @@ impl Connection { .into_iter() { let query = sql_query(sql); - self.transaction(|conn| query.execute(conn).map_err(StoreError::from))?; + self.transaction(|conn| { + async { query.execute(conn).await.map_err(StoreError::from) }.scope_boxed() + })? + .await?; } } - self.copy_private_data_sources(&state)?; + self.copy_private_data_sources(&state).await?; - self.transaction(|conn| state.finished(conn))?; + self.transaction(|conn| state.finished(conn).scope_boxed())? + .await?; progress.finished(); Ok(Status::Finished) @@ -1268,7 +1324,7 @@ impl Connection { let Some(conn) = self.conn.as_mut() else { return Err(internal_error!("copy connection went missing (copy_data)")); }; - conn.lock(&self.logger, &dst_site)?; + conn.lock(&self.logger, &dst_site).await?; let res = self.copy_data_internal(index_list).await; @@ -1284,7 +1340,7 @@ impl Connection { ); } Some(conn) => { - conn.unlock(&self.logger, &dst_site)?; + conn.unlock(&self.logger, &dst_site).await?; } } diff --git a/store/postgres/src/deployment.rs b/store/postgres/src/deployment.rs index 340d80d1184..1a8cf9586c8 100644 --- a/store/postgres/src/deployment.rs +++ b/store/postgres/src/deployment.rs @@ -1,18 +1,17 @@ //! Utilities for dealing with deployment metadata. Any connection passed //! into these methods must be for the shard that holds the actual //! deployment data and metadata -use crate::{advisory_lock, detail::GraphNodeVersion, primary::DeploymentId}; -use diesel::pg::PgConnection; +use crate::{advisory_lock, detail::GraphNodeVersion, primary::DeploymentId, AsyncPgConnection}; use diesel::{ - connection::SimpleConnection, dsl::{count, delete, insert_into, now, select, sql, update}, sql_types::{Bool, Integer}, }; use diesel::{ - prelude::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl}, + prelude::{ExpressionMethods, OptionalExtension, QueryDsl}, sql_query, sql_types::{Nullable, Text}, }; +use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; use graph::{ blockchain::block_stream::FirehoseCursor, data::subgraph::schema::SubgraphError, @@ -231,8 +230,8 @@ joinable!(head -> deployment(id)); /// return it. If `pending_only` is `true`, only return `Some(_)` if the /// deployment has not progressed past the graft point, i.e., data has not /// been copied for the graft -fn graft( - conn: &mut PgConnection, +async fn graft( + conn: &mut AsyncPgConnection, id: &DeploymentHash, pending_only: bool, ) -> Result, StoreError> { @@ -248,11 +247,13 @@ fn graft( .inner_join(h::table) .filter(h::block_number.is_null()) .first(conn) + .await .optional()? .unwrap_or((None, None, None)) } else { graft_query .first(conn) + .await .optional()? .unwrap_or((None, None, None)) }; @@ -282,28 +283,28 @@ fn graft( /// return it. Returns `None` if the deployment does not have /// a graft or if the subgraph has already progress past the graft point, /// indicating that the data copying for grafting has been performed -pub fn graft_pending( - conn: &mut PgConnection, +pub async fn graft_pending( + conn: &mut AsyncPgConnection, id: &DeploymentHash, ) -> Result, StoreError> { - graft(conn, id, true) + graft(conn, id, true).await } /// Look up the graft point for the given subgraph in the database and /// return it. Returns `None` if the deployment does not have /// a graft. -pub fn graft_point( - conn: &mut PgConnection, +pub async fn graft_point( + conn: &mut AsyncPgConnection, id: &DeploymentHash, ) -> Result, StoreError> { - graft(conn, id, false) + graft(conn, id, false).await } /// Look up the debug fork for the given subgraph in the database and /// return it. Returns `None` if the deployment does not have /// a debug fork. -pub fn debug_fork( - conn: &mut PgConnection, +pub async fn debug_fork( + conn: &mut AsyncPgConnection, id: &DeploymentHash, ) -> Result, StoreError> { use deployment as sd; @@ -311,7 +312,8 @@ pub fn debug_fork( let debug_fork: Option = sd::table .select(sd::debug_fork) .filter(sd::subgraph.eq(id.as_str())) - .first(conn)?; + .first(conn) + .await?; match debug_fork { Some(fork) => Ok(Some(DeploymentHash::new(fork.clone()).map_err(|_| { @@ -324,12 +326,16 @@ pub fn debug_fork( } } -pub fn schema(conn: &mut PgConnection, site: &Site) -> Result<(InputSchema, bool), StoreError> { +pub async fn schema( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result<(InputSchema, bool), StoreError> { use subgraph_manifest as sm; let (s, spec_ver, use_bytea_prefix) = sm::table .select((sm::schema, sm::spec_version, sm::use_bytea_prefix)) .filter(sm::id.eq(site.id)) - .first::<(String, String, bool)>(conn)?; + .first::<(String, String, bool)>(conn) + .await?; let spec_version = Version::parse(spec_ver.as_str()).map_err(|err| StoreError::Unknown(err.into()))?; InputSchema::parse(&spec_version, s.as_str(), site.deployment.clone()) @@ -345,7 +351,10 @@ pub struct ManifestInfo { } impl ManifestInfo { - pub fn load(conn: &mut PgConnection, site: &Site) -> Result { + pub async fn load( + conn: &mut AsyncPgConnection, + site: &Site, + ) -> Result { use subgraph_manifest as sm; let (description, repository, spec_version, features): ( Option, @@ -360,7 +369,8 @@ impl ManifestInfo { sm::features, )) .filter(sm::id.eq(site.id)) - .first(conn)?; + .first(conn) + .await?; // Using the features field to store the instrument flag is a bit // backhanded, but since this will be used very rarely, should not @@ -377,17 +387,21 @@ impl ManifestInfo { } // Return how many blocks of history this subgraph should keep -pub fn history_blocks(conn: &mut PgConnection, site: &Site) -> Result { +pub async fn history_blocks( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result { use subgraph_manifest as sm; sm::table .select(sm::history_blocks) .filter(sm::id.eq(site.id)) .first::(conn) + .await .map_err(StoreError::from) } -pub fn set_history_blocks( - conn: &mut PgConnection, +pub async fn set_history_blocks( + conn: &mut AsyncPgConnection, site: &Site, history_blocks: BlockNumber, ) -> Result<(), StoreError> { @@ -396,13 +410,14 @@ pub fn set_history_blocks( update(sm::table.filter(sm::id.eq(site.id))) .set(sm::history_blocks.eq(history_blocks)) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } /// This migrates subgraphs that existed before the raw_yaml column was added. -pub fn set_manifest_raw_yaml( - conn: &mut PgConnection, +pub async fn set_manifest_raw_yaml( + conn: &mut AsyncPgConnection, site: &Site, raw_yaml: &str, ) -> Result<(), StoreError> { @@ -412,24 +427,26 @@ pub fn set_manifest_raw_yaml( .filter(sm::raw_yaml.is_null()) .set(sm::raw_yaml.eq(raw_yaml)) .execute(conn) + .await .map(|_| ()) .map_err(|e| e.into()) } /// Most of the time, this will be a noop; the only time we actually modify /// the deployment table is the first forward block after a reorg -fn reset_reorg_count(conn: &mut PgConnection, site: &Site) -> StoreResult<()> { +async fn reset_reorg_count(conn: &mut AsyncPgConnection, site: &Site) -> StoreResult<()> { use deployment as d; update(d::table.filter(d::id.eq(site.id))) .filter(d::current_reorg_depth.gt(0)) .set(d::current_reorg_depth.eq(0)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } -pub fn transact_block( - conn: &mut PgConnection, +pub async fn transact_block( + conn: &mut AsyncPgConnection, site: &Site, ptr: &BlockPtr, firehose_cursor: &FirehoseCursor, @@ -445,7 +462,7 @@ pub fn transact_block( // Performance note: This costs us an extra DB query on every update. We used to put this in the // `where` clause of the `update` statement, but that caused Postgres to use bitmap scans instead // of a simple primary key lookup. So a separate query it is. - let block_ptr = block_ptr(conn, &site)?; + let block_ptr = block_ptr(conn, &site).await?; if let Some(block_ptr_from) = block_ptr { if block_ptr_from.number >= ptr.number { return Err(StoreError::DuplicateBlockProcessing( @@ -455,7 +472,7 @@ pub fn transact_block( } } - reset_reorg_count(conn, site)?; + reset_reorg_count(conn, site).await?; let rows = update(h::table.filter(h::id.eq(site.id))) .set(( @@ -465,6 +482,7 @@ pub fn transact_block( h::entity_count.eq(sql(&count_sql)), )) .execute(conn) + .await .map_err(StoreError::from)?; match rows { @@ -479,6 +497,7 @@ pub fn transact_block( .filter(d::id.eq(site.id)) .select(d::earliest_block_number) .get_result::(conn) + .await .map_err(StoreError::from) } @@ -495,15 +514,15 @@ pub fn transact_block( } } -pub fn forward_block_ptr( - conn: &mut PgConnection, +pub async fn forward_block_ptr( + conn: &mut AsyncPgConnection, site: &Site, ptr: &BlockPtr, ) -> Result<(), StoreError> { use crate::diesel::BoolExpressionMethods; use head as h; - reset_reorg_count(conn, site)?; + reset_reorg_count(conn, site).await?; let row_count = update(h::table.filter(h::id.eq(site.id)).filter( // Asserts that the processing direction is forward. @@ -514,6 +533,7 @@ pub fn forward_block_ptr( h::block_hash.eq(ptr.hash_slice()), )) .execute(conn) + .await .map_err(StoreError::from)?; match row_count { @@ -522,7 +542,7 @@ pub fn forward_block_ptr( // No matching rows were found. This is an error. By the filter conditions, this can only be // due to a missing deployment (which `block_ptr` catches) or duplicate block processing. - 0 => match block_ptr(conn, &site)? { + 0 => match block_ptr(conn, &site).await? { Some(block_ptr_from) if block_ptr_from.number >= ptr.number => Err( StoreError::DuplicateBlockProcessing(site.deployment.clone(), ptr.number), ), @@ -538,8 +558,8 @@ pub fn forward_block_ptr( } } -pub fn get_subgraph_firehose_cursor( - conn: &mut PgConnection, +pub async fn get_subgraph_firehose_cursor( + conn: &mut AsyncPgConnection, site: Arc, ) -> Result, StoreError> { use head as h; @@ -548,12 +568,13 @@ pub fn get_subgraph_firehose_cursor( .filter(h::id.eq(site.id)) .select(h::firehose_cursor) .first::>(conn) + .await .map_err(StoreError::from); res } -pub fn revert_block_ptr( - conn: &mut PgConnection, +pub async fn revert_block_ptr( + conn: &mut AsyncPgConnection, site: &Site, ptr: BlockPtr, firehose_cursor: &FirehoseCursor, @@ -575,7 +596,8 @@ pub fn revert_block_ptr( d::current_reorg_depth.eq(d::current_reorg_depth + 1), d::max_reorg_depth.eq(sql("greatest(current_reorg_depth + 1, max_reorg_depth)")), )) - .execute(conn)?; + .execute(conn) + .await?; update(h::table.filter(h::id.eq(site.id))) .set(( @@ -583,7 +605,8 @@ pub fn revert_block_ptr( h::block_hash.eq(ptr.hash_slice()), h::firehose_cursor.eq(firehose_cursor.as_ref()), )) - .execute(conn)?; + .execute(conn) + .await?; match affected_rows { 1 => Ok(()), @@ -597,13 +620,17 @@ pub fn revert_block_ptr( } } -pub fn block_ptr(conn: &mut PgConnection, site: &Site) -> Result, StoreError> { +pub async fn block_ptr( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result, StoreError> { use head as h; let (number, hash) = h::table .filter(h::id.eq(site.id)) .select((h::block_number, h::block_hash)) .first::<(Option, Option>)>(conn) + .await .map_err(|e| match e { diesel::result::Error::NotFound => { StoreError::DeploymentNotFound(site.deployment.to_string()) @@ -624,14 +651,17 @@ pub fn block_ptr(conn: &mut PgConnection, site: &Site) -> Result Result<(), StoreError> { +pub async fn initialize_block_ptr( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result<(), StoreError> { use head as h; use subgraph_manifest as m; let needs_init = h::table .filter(h::id.eq(site.id)) .select(h::block_hash) - .first::>>(conn) + .first::>>(conn).await .map_err(|e| { internal_error!( "deployment sgd{} must have been created before calling initialize_block_ptr but we got {}", @@ -644,11 +674,13 @@ pub fn initialize_block_ptr(conn: &mut PgConnection, site: &Site) -> Result<(), if let (Some(hash), Some(number)) = m::table .filter(m::id.eq(site.id)) .select((m::start_block_hash, m::start_block_number)) - .first::<(Option>, Option)>(conn)? + .first::<(Option>, Option)>(conn) + .await? { update(h::table.filter(h::id.eq(site.id))) .set((h::block_hash.eq(&hash), h::block_number.eq(number))) .execute(conn) + .await .map(|_| ()) .map_err(|e| e.into()) } else { @@ -674,7 +706,10 @@ fn convert_to_u32(number: Option, field: &str, subgraph: &str) -> Result Result { +pub async fn state( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result { use deployment as d; use head as h; use subgraph_error as e; @@ -702,6 +737,7 @@ pub fn state(conn: &mut PgConnection, site: &Site) -> Result(conn) + .await .optional()? { None => Err(StoreError::QueryExecutionError(format!( @@ -743,7 +779,8 @@ pub fn state(conn: &mut PgConnection, site: &Site) -> Result>("min(lower(block_range))")) - .first::>(conn)? + .first::>(conn) + .await? } else { None }; @@ -760,8 +797,8 @@ pub fn state(conn: &mut PgConnection, site: &Site) -> Result Result<(), StoreError> { @@ -776,37 +813,43 @@ pub fn set_synced( d::synced_at.eq(now), d::synced_at_block_number.eq(block_ptr.number), )) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } /// Returns `true` if the deployment (as identified by `site.id`) -pub fn exists(conn: &mut PgConnection, site: &Site) -> Result { +pub async fn exists(conn: &mut AsyncPgConnection, site: &Site) -> Result { use deployment as d; let exists = d::table .filter(d::id.eq(site.id)) .count() - .get_result::(conn)? + .get_result::(conn) + .await? > 0; Ok(exists) } /// Returns `true` if the deployment `id` exists and is synced -pub fn exists_and_synced(conn: &mut PgConnection, id: &str) -> Result { +pub async fn exists_and_synced(conn: &mut AsyncPgConnection, id: &str) -> Result { use deployment as d; let synced = d::table .filter(d::subgraph.eq(id)) .select(d::synced_at.is_not_null()) .first(conn) + .await .optional()? .unwrap_or(false); Ok(synced) } // Does nothing if the error already exists. Returns the error id. -fn insert_subgraph_error(conn: &mut PgConnection, error: &SubgraphError) -> anyhow::Result { +async fn insert_subgraph_error( + conn: &mut AsyncPgConnection, + error: &SubgraphError, +) -> anyhow::Result { use subgraph_error as e; let error_id = hex::encode(stable_hash_legacy::utils::stable_hash::( @@ -836,25 +879,26 @@ fn insert_subgraph_error(conn: &mut PgConnection, error: &SubgraphError) -> anyh e::block_range.eq((Bound::Included(block_num), Bound::Unbounded)), )) .on_conflict_do_nothing() - .execute(conn)?; + .execute(conn) + .await?; Ok(error_id) } -pub fn fail( - conn: &mut PgConnection, +pub async fn fail( + conn: &mut AsyncPgConnection, id: &DeploymentHash, error: &SubgraphError, ) -> Result<(), StoreError> { - let error_id = insert_subgraph_error(conn, error)?; + let error_id = insert_subgraph_error(conn, error).await?; - update_deployment_status(conn, id, SubgraphHealth::Failed, Some(error_id), None)?; + update_deployment_status(conn, id, SubgraphHealth::Failed, Some(error_id), None).await?; Ok(()) } -pub fn update_non_fatal_errors( - conn: &mut PgConnection, +pub async fn update_non_fatal_errors( + conn: &mut AsyncPgConnection, deployment_id: &DeploymentHash, health: SubgraphHealth, non_fatal_errors: Option<&[SubgraphError]>, @@ -870,14 +914,14 @@ pub fn update_non_fatal_errors( .collect::>() }); - update_deployment_status(conn, deployment_id, health, None, error_ids)?; + update_deployment_status(conn, deployment_id, health, None, error_ids).await?; Ok(()) } /// If `block` is `None`, assumes the latest block. -pub(crate) fn has_deterministic_errors( - conn: &mut PgConnection, +pub(crate) async fn has_deterministic_errors( + conn: &mut AsyncPgConnection, id: &DeploymentHash, block: BlockNumber, ) -> Result { @@ -889,11 +933,12 @@ pub(crate) fn has_deterministic_errors( .filter(sql::("block_range @> ").bind::(block)), )) .get_result(conn) + .await .map_err(|e| e.into()) } -pub fn update_deployment_status( - conn: &mut PgConnection, +pub async fn update_deployment_status( + conn: &mut AsyncPgConnection, deployment_id: &DeploymentHash, health: SubgraphHealth, fatal_error: Option, @@ -909,6 +954,7 @@ pub fn update_deployment_status( d::non_fatal_errors.eq::>(non_fatal_errors.unwrap_or(vec![])), )) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } @@ -917,9 +963,9 @@ pub fn update_deployment_status( /// unhealthy. The `latest_block` is only used to check whether the subgraph /// is healthy as of that block; errors are inserted according to the /// `block_ptr` they contain -pub(crate) fn insert_subgraph_errors( +pub(crate) async fn insert_subgraph_errors( logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &DeploymentHash, deterministic_errors: &[SubgraphError], latest_block: BlockNumber, @@ -932,15 +978,15 @@ pub(crate) fn insert_subgraph_errors( ); for error in deterministic_errors { - insert_subgraph_error(conn, error)?; + insert_subgraph_error(conn, error).await?; } - check_health(logger, conn, id, latest_block) + check_health(logger, conn, id, latest_block).await } #[cfg(debug_assertions)] -pub(crate) fn error_count( - conn: &mut PgConnection, +pub(crate) async fn error_count( + conn: &mut AsyncPgConnection, id: &DeploymentHash, ) -> Result { use subgraph_error as e; @@ -948,20 +994,21 @@ pub(crate) fn error_count( Ok(e::table .filter(e::subgraph_id.eq(id.as_str())) .count() - .get_result::(conn)? as usize) + .get_result::(conn) + .await? as usize) } /// Checks if the subgraph is healthy or unhealthy as of the given block, or the subgraph latest /// block if `None`, based on the presence of deterministic errors. Has no effect on failed subgraphs. -fn check_health( +async fn check_health( logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &DeploymentHash, block: BlockNumber, ) -> Result<(), StoreError> { use deployment as d; - let has_errors = has_deterministic_errors(conn, id, block)?; + let has_errors = has_deterministic_errors(conn, id, block).await?; let (new, old) = match has_errors { true => { @@ -983,12 +1030,13 @@ fn check_health( ) .set(d::health.eq(new)) .execute(conn) + .await .map(|_| ()) .map_err(|e| e.into()) } -pub(crate) fn health( - conn: &mut PgConnection, +pub(crate) async fn health( + conn: &mut AsyncPgConnection, id: DeploymentId, ) -> Result { use deployment as d; @@ -997,11 +1045,12 @@ pub(crate) fn health( .filter(d::id.eq(id)) .select(d::health) .get_result(conn) + .await .map_err(|e| e.into()) } -pub(crate) fn entities_with_causality_region( - conn: &mut PgConnection, +pub(crate) async fn entities_with_causality_region( + conn: &mut AsyncPgConnection, id: DeploymentId, schema: &InputSchema, ) -> Result, StoreError> { @@ -1011,6 +1060,7 @@ pub(crate) fn entities_with_causality_region( .filter(sm::id.eq(id)) .select(sm::entities_with_causality_region) .get_result::>(conn) + .await .map_err(|e| e.into()) .map(|ents| { // It is possible to have entity types in @@ -1023,9 +1073,9 @@ pub(crate) fn entities_with_causality_region( } /// Reverts the errors and updates the subgraph health if necessary. -pub(crate) fn revert_subgraph_errors( +pub(crate) async fn revert_subgraph_errors( logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, id: &DeploymentHash, reverted_block: BlockNumber, ) -> Result<(), StoreError> { @@ -1038,12 +1088,13 @@ pub(crate) fn revert_subgraph_errors( .filter(e::subgraph_id.eq(id.as_str())) .filter(sql::(&lower_geq).bind::(reverted_block)), ) - .execute(conn)?; + .execute(conn) + .await?; // The result will be the same at `reverted_block` or `reverted_block - 1` since the errors at // `reverted_block` were just deleted, but semantically we care about `reverted_block - 1` which // is the block being reverted to. - check_health(&logger, conn, id, reverted_block - 1)?; + check_health(&logger, conn, id, reverted_block - 1).await?; // If the deployment is failed in both `failed` and `status` columns, // update both values respectively to `false` and `healthy`. Basically @@ -1056,22 +1107,27 @@ pub(crate) fn revert_subgraph_errors( ) .set((d::failed.eq(false), d::health.eq(SubgraphHealth::Healthy))) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } -pub(crate) fn delete_error(conn: &mut PgConnection, error_id: &str) -> Result<(), StoreError> { +pub(crate) async fn delete_error( + conn: &mut AsyncPgConnection, + error_id: &str, +) -> Result<(), StoreError> { use subgraph_error as e; delete(e::table.filter(e::id.eq(error_id))) .execute(conn) + .await .map(|_| ()) .map_err(StoreError::from) } /// Copy the dynamic data sources for `src` to `dst`. All data sources that /// were created up to and including `target_block` will be copied. -pub(crate) fn copy_errors( - conn: &mut PgConnection, +pub(crate) async fn copy_errors( + conn: &mut AsyncPgConnection, src: &Site, dst: &Site, target_block: &BlockPtr, @@ -1085,7 +1141,8 @@ pub(crate) fn copy_errors( let count = e::table .filter(e::subgraph_id.eq(dst.deployment.as_str())) .select(count(e::vid)) - .get_result::(conn)?; + .get_result::(conn) + .await?; if count > 0 { return Ok(count as usize); } @@ -1115,7 +1172,8 @@ pub(crate) fn copy_errors( .bind::(src.deployment.as_str()) .bind::(dst.deployment.as_str()) .bind::(target_block.number) - .execute(conn)?) + .execute(conn) + .await?) } /// Drop the schema `namespace`. This deletes all data for the subgraph, and @@ -1126,28 +1184,30 @@ pub(crate) fn copy_errors( /// schema, could block dropping the schema indefinitely, this operation /// will wait at most 2s to acquire all necessary locks, and fail if that is /// not possible. -pub fn drop_schema( - conn: &mut PgConnection, +pub async fn drop_schema( + conn: &mut AsyncPgConnection, namespace: &crate::primary::Namespace, ) -> Result<(), StoreError> { let query = format!( "set local lock_timeout=2000; drop schema if exists {} cascade", namespace ); - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } -pub fn drop_metadata(conn: &mut PgConnection, site: &Site) -> Result<(), StoreError> { +pub async fn drop_metadata(conn: &mut AsyncPgConnection, site: &Site) -> Result<(), StoreError> { use head as h; // We don't need to delete from `deployment`, `subgraph_manifest`, or // `subgraph_error` since that cascades from deleting `head` - delete(h::table.filter(h::id.eq(site.id))).execute(conn)?; + delete(h::table.filter(h::id.eq(site.id))) + .execute(conn) + .await?; Ok(()) } -pub fn create_deployment( - conn: &mut PgConnection, +pub async fn create_deployment( + conn: &mut AsyncPgConnection, site: &Site, create: DeploymentCreate, exists: bool, @@ -1212,7 +1272,7 @@ pub fn create_deployment( d::debug_fork.eq(debug_fork.as_ref().map(|s| s.as_str())), ); - let graph_node_version_id = GraphNodeVersion::create_or_get(conn)?; + let graph_node_version_id = GraphNodeVersion::create_or_get(conn).await?; let manifest_values = ( m::id.eq(site.id), @@ -1235,25 +1295,33 @@ pub fn create_deployment( if exists && replace { update(h::table.filter(h::id.eq(site.id))) .set(head_values) - .execute(conn)?; + .execute(conn) + .await?; update(d::table.filter(d::subgraph.eq(site.deployment.as_str()))) .set(deployment_values) - .execute(conn)?; + .execute(conn) + .await?; update(m::table.filter(m::id.eq(site.id))) .set(manifest_values) - .execute(conn)?; + .execute(conn) + .await?; } else { - insert_into(h::table).values(head_values).execute(conn)?; + insert_into(h::table) + .values(head_values) + .execute(conn) + .await?; insert_into(d::table) .values(deployment_values) - .execute(conn)?; + .execute(conn) + .await?; insert_into(m::table) .values(manifest_values) - .execute(conn)?; + .execute(conn) + .await?; } Ok(()) } @@ -1262,8 +1330,8 @@ fn entity_count_sql(count: i32) -> String { format!("entity_count + ({count})") } -pub fn update_entity_count( - conn: &mut PgConnection, +pub async fn update_entity_count( + conn: &mut AsyncPgConnection, site: &Site, count: i32, ) -> Result<(), StoreError> { @@ -1276,17 +1344,22 @@ pub fn update_entity_count( let count_sql = entity_count_sql(count); update(h::table.filter(h::id.eq(site.id))) .set(h::entity_count.eq(sql(&count_sql))) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } /// Set the deployment's entity count back to `0` -pub fn clear_entity_count(conn: &mut PgConnection, site: &Site) -> Result<(), StoreError> { +pub async fn clear_entity_count( + conn: &mut AsyncPgConnection, + site: &Site, +) -> Result<(), StoreError> { use head as h; update(h::table.filter(h::id.eq(site.id))) .set(h::entity_count.eq(0)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } @@ -1295,8 +1368,8 @@ pub fn clear_entity_count(conn: &mut PgConnection, site: &Site) -> Result<(), St /// go backwards, only forward. This is important so that copying into /// `site` can not move the earliest block backwards if `site` was also /// pruned while the copy was running. -pub fn set_earliest_block( - conn: &mut PgConnection, +pub async fn set_earliest_block( + conn: &mut AsyncPgConnection, site: &Site, earliest_block: BlockNumber, ) -> Result<(), StoreError> { @@ -1305,15 +1378,16 @@ pub fn set_earliest_block( update(d::table.filter(d::id.eq(site.id))) .set(d::earliest_block_number.eq(earliest_block)) .filter(d::earliest_block_number.lt(earliest_block)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } /// Copy the `earliest_block` attribute from `src` to `dst`. The copy might /// go across shards and use the metadata tables mapped into the shard for /// `conn` which must be the shard for `dst` -pub fn copy_earliest_block( - conn: &mut PgConnection, +pub async fn copy_earliest_block( + conn: &mut AsyncPgConnection, src: &Site, dst: &Site, ) -> Result<(), StoreError> { @@ -1328,23 +1402,28 @@ pub fn copy_earliest_block( update(d::table.filter(d::id.eq(dst.id))) .set(d::earliest_block_number.eq(sql(&query))) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } -pub fn on_sync(conn: &mut PgConnection, id: impl Into) -> Result { +pub async fn on_sync( + conn: &mut AsyncPgConnection, + id: impl Into, +) -> Result { use subgraph_manifest as m; let s = m::table .filter(m::id.eq(id.into())) .select(m::on_sync) - .get_result::>(conn)?; + .get_result::>(conn) + .await?; OnSync::try_from(s.as_deref()) } -pub fn set_on_sync( - conn: &mut PgConnection, +pub async fn set_on_sync( + conn: &mut AsyncPgConnection, site: &Site, on_sync: OnSync, ) -> Result<(), StoreError> { @@ -1352,7 +1431,8 @@ pub fn set_on_sync( let n = update(m::table.filter(m::id.eq(site.id))) .set(m::on_sync.eq(on_sync.to_sql())) - .execute(conn)?; + .execute(conn) + .await?; match n { 0 => Err(StoreError::DeploymentNotFound(site.to_string())), @@ -1369,15 +1449,19 @@ pub fn set_on_sync( /// while other write activity for that deployment is locked out. Block the /// current thread until we can acquire the lock. // see also: deployment-lock-for-update -pub fn with_lock(conn: &mut PgConnection, site: &Site, f: F) -> Result +pub async fn with_lock( + conn: &mut AsyncPgConnection, + site: &Site, + f: F, +) -> Result where - F: FnOnce(&mut PgConnection) -> Result, + F: AsyncFnOnce(&mut AsyncPgConnection) -> Result, { let mut backoff = ExponentialBackoff::new(Duration::from_millis(100), Duration::from_secs(15)); - while !advisory_lock::lock_deployment_session(conn, site)? { + while !advisory_lock::lock_deployment_session(conn, site).await? { backoff.sleep(); } - let res = f(conn); - advisory_lock::unlock_deployment_session(conn, site)?; + let res = f(conn).await; + advisory_lock::unlock_deployment_session(conn, site).await?; res } diff --git a/store/postgres/src/deployment_store.rs b/store/postgres/src/deployment_store.rs index 7aaedf12895..a537611130a 100644 --- a/store/postgres/src/deployment_store.rs +++ b/store/postgres/src/deployment_store.rs @@ -1,8 +1,9 @@ use detail::DeploymentDetail; -use diesel::connection::SimpleConnection; -use diesel::pg::PgConnection; -use diesel::r2d2::{ConnectionManager, PooledConnection}; -use diesel::{prelude::*, sql_query}; +use diesel::sql_query; +use diesel_async::scoped_futures::ScopedFutureExt; +use diesel_async::{AsyncConnection as _, RunQueryDsl, SimpleAsyncConnection}; +use tokio::task::JoinHandle; + use graph::anyhow::Context; use graph::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; use graph::blockchain::BlockTime; @@ -18,18 +19,14 @@ use graph::data::subgraph::{status, SPEC_VERSION_0_0_6}; use graph::data_source::CausalityRegion; use graph::derive::CheapClone; use graph::futures03::FutureExt; -use graph::prelude::{ - ApiVersion, CancelHandle, CancelToken, CancelableError, EntityOperation, PoolWaitStats, - SubgraphDeploymentEntity, -}; +use graph::prelude::{ApiVersion, EntityOperation, PoolWaitStats, SubgraphDeploymentEntity}; use graph::semver::Version; -use graph::tokio::task::JoinHandle; use itertools::Itertools; use lru_time_cache::LruCache; use rand::{rng, seq::SliceRandom}; use std::collections::{BTreeMap, HashMap}; use std::convert::Into; -use std::ops::{Bound, DerefMut}; +use std::ops::Bound; use std::ops::{Deref, Range}; use std::str::FromStr; use std::sync::{atomic::AtomicUsize, Arc, Mutex}; @@ -55,7 +52,7 @@ use crate::primary::{DeploymentId, Primary}; use crate::relational::index::{CreateIndex, IndexList, Method}; use crate::relational::{self, Layout, LayoutCache, SqlName, Table}; use crate::relational_queries::FromEntityData; -use crate::{advisory_lock, catalog, retry}; +use crate::{advisory_lock, catalog, retry, AsyncPgConnection}; use crate::{detail, ConnectionPool}; use crate::{dynds, primary::Site}; @@ -181,7 +178,7 @@ impl DeploymentStore { // indexes are created later on, when the subgraph has synced. In case this parameter is None, all // indexes are created with the default creation strategy for a new subgraph, and also from the very // start. - pub(crate) fn create_deployment( + pub(crate) async fn create_deployment( &self, schema: &InputSchema, deployment: DeploymentCreate, @@ -191,108 +188,120 @@ impl DeploymentStore { on_sync: OnSync, index_def: Option, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - conn.transaction(|conn| -> Result<_, StoreError> { - let exists = deployment::exists(conn, &site)?; - - // Create (or update) the metadata. Update only happens in tests - let entities_with_causality_region = - deployment.manifest.entities_with_causality_region.clone(); - - // If `GRAPH_HISTORY_BLOCKS_OVERRIDE` is set, override the history_blocks - // setting with the value of the environment variable. - let deployment = - if let Some(history_blocks_global_override) = ENV_VARS.history_blocks_override { + let mut conn = self.pool.get().await?; + conn.transaction::<_, StoreError, _>(|conn| { + async { + let exists = deployment::exists(conn, &site).await?; + + // Create (or update) the metadata. Update only happens in tests + let entities_with_causality_region = + deployment.manifest.entities_with_causality_region.clone(); + + // If `GRAPH_HISTORY_BLOCKS_OVERRIDE` is set, override the history_blocks + // setting with the value of the environment variable. + let deployment = if let Some(history_blocks_global_override) = + ENV_VARS.history_blocks_override + { deployment.with_history_blocks_override(history_blocks_global_override) } else { deployment }; - if replace || !exists { - deployment::create_deployment(conn, &site, deployment, exists, replace)?; - }; + if replace || !exists { + deployment::create_deployment(conn, &site, deployment, exists, replace).await?; + }; + + // Create the schema for the subgraph data + if !exists { + let query = format!("create schema {}", &site.namespace); + conn.batch_execute(&query).await?; - // Create the schema for the subgraph data - if !exists { - let query = format!("create schema {}", &site.namespace); - conn.batch_execute(&query)?; - - let layout = Layout::create_relational_schema( - conn, - site.clone(), - schema, - entities_with_causality_region.into_iter().collect(), - index_def, - )?; - // See if we are grafting and check that the graft is permissible - if let Some(base) = graft_base { - let errors = layout.can_copy_from(&base); - if !errors.is_empty() { - return Err(StoreError::Unknown(anyhow!( - "The subgraph `{}` cannot be used as the graft base \ + let layout = Layout::create_relational_schema( + conn, + site.clone(), + schema, + entities_with_causality_region.into_iter().collect(), + index_def, + ) + .await?; + // See if we are grafting and check that the graft is permissible + if let Some(base) = graft_base { + let errors = layout.can_copy_from(&base); + if !errors.is_empty() { + return Err(StoreError::Unknown(anyhow!( + "The subgraph `{}` cannot be used as the graft base \ for `{}` because the schemas are incompatible:\n - {}", - &base.catalog.site.namespace, - &layout.catalog.site.namespace, - errors.join("\n - ") - ))); + &base.catalog.site.namespace, + &layout.catalog.site.namespace, + errors.join("\n - ") + ))); + } } - } - // Create data sources table - if site.schema_version.private_data_sources() { - conn.batch_execute(&DataSourcesTable::new(site.namespace.clone()).as_ddl())?; + // Create data sources table + if site.schema_version.private_data_sources() { + conn.batch_execute(&DataSourcesTable::new(site.namespace.clone()).as_ddl()) + .await?; + } } - } - deployment::set_on_sync(conn, &site, on_sync)?; + deployment::set_on_sync(conn, &site, on_sync).await?; - Ok(()) + Ok(()) + } + .scope_boxed() }) + .await } - pub(crate) fn load_deployment( + pub(crate) async fn load_deployment( &self, site: Arc, ) -> Result { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site.clone())?; + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site.clone()).await?; Ok( detail::deployment_entity(&mut conn, &site, &layout.input_schema) + .await .with_context(|| format!("Deployment details not found for {}", site.deployment))?, ) } // Remove the data and metadata for the deployment `site`. This operation // is not reversible - pub(crate) fn drop_deployment(&self, site: &Site) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; + pub(crate) async fn drop_deployment(&self, site: &Site) -> Result<(), StoreError> { + let mut conn = self.pool.get().await?; conn.transaction(|conn| { - crate::deployment::drop_schema(conn, &site.namespace)?; - if !site.schema_version.private_data_sources() { - crate::dynds::shared::drop(conn, &site.deployment)?; + async { + crate::deployment::drop_schema(conn, &site.namespace).await?; + if !site.schema_version.private_data_sources() { + crate::dynds::shared::drop(conn, &site.deployment).await?; + } + crate::deployment::drop_metadata(conn, site).await } - crate::deployment::drop_metadata(conn, site) + .scope_boxed() }) + .await } - pub(crate) fn execute_query( + pub(crate) async fn execute_query( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, query: EntityQuery, ) -> Result<(Vec, Trace), QueryExecutionError> { - let layout = self.layout(conn, site)?; + let layout = self.layout(conn, site).await?; let logger = query .logger .cheap_clone() .unwrap_or_else(|| self.logger.cheap_clone()); - layout.query(&logger, conn, query) + layout.query(&logger, conn, query).await } - fn check_intf_uniqueness( + async fn check_intf_uniqueness( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, layout: &Layout, group: &RowGroup, ) -> Result<(), StoreError> { @@ -301,8 +310,9 @@ impl DeploymentStore { return Ok(()); } - if let Some((conflicting_entity, id)) = - layout.conflicting_entities(conn, &types_with_shared_interface, group)? + if let Some((conflicting_entity, id)) = layout + .conflicting_entities(conn, &types_with_shared_interface, group) + .await? { return Err(StoreError::ConflictingId( group.entity_type.to_string(), @@ -313,9 +323,9 @@ impl DeploymentStore { Ok(()) } - fn apply_entity_modifications<'a>( + async fn apply_entity_modifications<'a>( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, layout: &Layout, groups: impl Iterator, stopwatch: &StopwatchMetrics, @@ -328,93 +338,33 @@ impl DeploymentStore { // Clamp entities before inserting them to avoid having versions // with overlapping block ranges let section = stopwatch.start_section("apply_entity_modifications_delete"); - layout.delete(conn, group, stopwatch)?; + layout.delete(conn, group, stopwatch).await?; section.end(); let section = stopwatch.start_section("check_interface_entity_uniqueness"); - self.check_intf_uniqueness(conn, layout, group)?; + self.check_intf_uniqueness(conn, layout, group).await?; section.end(); let section = stopwatch.start_section("apply_entity_modifications_insert"); - layout.insert(conn, group, stopwatch)?; + layout.insert(conn, group, stopwatch).await?; section.end(); } Ok(count) } - /// Execute a closure with a connection to the database. - /// - /// # API - /// The API of using a closure to bound the usage of the connection serves several - /// purposes: - /// - /// * Moves blocking database access out of the `Future::poll`. Within - /// `Future::poll` (which includes all `async` methods) it is illegal to - /// perform a blocking operation. This includes all accesses to the - /// database, acquiring of locks, etc. Calling a blocking operation can - /// cause problems with `Future` combinators (including but not limited - /// to select, timeout, and FuturesUnordered) and problems with - /// executors/runtimes. This method moves the database work onto another - /// thread in a way which does not block `Future::poll`. - /// - /// * Limit the total number of connections. Because the supplied closure - /// takes a reference, we know the scope of the usage of all entity - /// connections and can limit their use in a non-blocking way. - /// - /// # Cancellation - /// The normal pattern for futures in Rust is drop to cancel. Once we - /// spawn the database work in a thread though, this expectation no longer - /// holds because the spawned task is the independent of this future. So, - /// this method provides a cancel token which indicates that the `Future` - /// has been dropped. This isn't *quite* as good as drop on cancel, - /// because a drop on cancel can do things like cancel http requests that - /// are in flight, but checking for cancel periodically is a significant - /// improvement. - /// - /// The implementation of the supplied closure should check for cancel - /// between every operation that is potentially blocking. This includes - /// any method which may interact with the database. The check can be - /// conveniently written as `token.check_cancel()?;`. It is low overhead - /// to check for cancel, so when in doubt it is better to have too many - /// checks than too few. - /// - /// # Panics: - /// * This task will panic if the supplied closure panics - /// * This task will panic if the supplied closure returns Err(Cancelled) - /// when the supplied cancel token is not cancelled. - pub(crate) async fn with_conn( - &self, - f: impl 'static - + Send - + FnOnce( - &mut PooledConnection>, - &CancelHandle, - ) -> Result>, - ) -> Result { - self.pool.with_conn(f).await - } - - /// Deprecated. Use `with_conn` instead. - fn get_conn(&self) -> Result>, StoreError> { - self.pool.get() - } - /// Panics if `idx` is not a valid index for a read only pool. - fn read_only_conn( - &self, - idx: usize, - ) -> Result>, Error> { - self.read_only_pools[idx].get().map_err(Error::from) + async fn read_only_conn(&self, idx: usize) -> Result { + self.read_only_pools[idx].get().await.map_err(Error::from) } - pub(crate) fn get_replica_conn( + pub(crate) async fn get_replica_conn( &self, replica: ReplicaId, - ) -> Result>, Error> { + ) -> Result { let conn = match replica { - ReplicaId::Main => self.get_conn()?, - ReplicaId::ReadOnly(idx) => self.read_only_conn(idx)?, + ReplicaId::Main => self.pool.get().await?, + ReplicaId::ReadOnly(idx) => self.read_only_conn(idx).await?, }; Ok(conn) } @@ -440,42 +390,43 @@ impl DeploymentStore { /// the Store. Layout objects with a pending migration can not be /// cached for longer than a transaction since they might change /// without us knowing - pub(crate) fn layout( + pub(crate) async fn layout( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, ) -> Result, StoreError> { - self.layout_cache.get(&self.logger, conn, site) + self.layout_cache.get(&self.logger, conn, site).await } /// Return the layout for a deployment. This might use a database /// connection for the lookup and should only be called if the caller /// does not have a connection currently. If it does, use `layout` - pub(crate) fn find_layout(&self, site: Arc) -> Result, StoreError> { + pub(crate) async fn find_layout(&self, site: Arc) -> Result, StoreError> { if let Some(layout) = self.layout_cache.find(site.as_ref()) { return Ok(layout); } - let mut conn = self.get_conn()?; - self.layout(&mut conn, site) + let mut conn = self.pool.get().await?; + self.layout(&mut conn, site).await } - fn subgraph_info_with_conn( + async fn subgraph_info_with_conn( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, ) -> Result { if let Some(info) = self.subgraph_cache.lock().unwrap().get(&site.deployment) { return Ok(info.clone()); } - let layout = self.layout(conn, site.cheap_clone())?; - let manifest_info = deployment::ManifestInfo::load(conn, &site)?; + let layout = self.layout(conn, site.cheap_clone()).await?; + let manifest_info = deployment::ManifestInfo::load(conn, &site).await?; - let graft_block = - deployment::graft_point(conn, &site.deployment)?.map(|(_, ptr)| ptr.number); + let graft_block = deployment::graft_point(conn, &site.deployment) + .await? + .map(|(_, ptr)| ptr.number); - let debug_fork = deployment::debug_fork(conn, &site.deployment)?; + let debug_fork = deployment::debug_fork(conn, &site.deployment).await?; // Generate an API schema for the subgraph and make sure all types in the // API schema have a @subgraphId directive as well @@ -514,94 +465,94 @@ impl DeploymentStore { } } - pub(crate) fn subgraph_info(&self, site: Arc) -> Result { + pub(crate) async fn subgraph_info(&self, site: Arc) -> Result { if let Some(info) = self.subgraph_cache.lock().unwrap().get(&site.deployment) { return Ok(info.clone()); } - let mut conn = self.get_conn()?; - self.subgraph_info_with_conn(&mut conn, site) + let mut conn = self.pool.get().await?; + self.subgraph_info_with_conn(&mut conn, site).await } - fn block_ptr_with_conn( - conn: &mut PgConnection, + async fn block_ptr_with_conn( + conn: &mut AsyncPgConnection, site: Arc, ) -> Result, StoreError> { - deployment::block_ptr(conn, &site) + deployment::block_ptr(conn, &site).await } - pub(crate) fn deployment_details( + pub(crate) async fn deployment_details( &self, ids: Vec, ) -> Result, StoreError> { - let conn = &mut *self.get_conn()?; - conn.transaction(|conn| -> Result<_, StoreError> { detail::deployment_details(conn, ids) }) + let conn = &mut self.pool.get().await?; + detail::deployment_details(conn, ids).await } - pub fn deployment_details_for_id( + pub async fn deployment_details_for_id( &self, locator: &DeploymentLocator, ) -> Result { let id = DeploymentId::from(locator.clone()); - let conn = &mut *self.get_conn()?; - conn.transaction(|conn| -> Result<_, StoreError> { - detail::deployment_details_for_id(conn, &id) - }) + let conn = &mut self.pool.get().await?; + detail::deployment_details_for_id(conn, &id).await } - pub(crate) fn deployment_statuses( + pub(crate) async fn deployment_statuses( &self, sites: &[Arc], ) -> Result, StoreError> { - let conn = &mut *self.get_conn()?; - conn.transaction(|conn| -> Result, StoreError> { - detail::deployment_statuses(conn, sites) - }) + let conn = &mut self.pool.get().await?; + detail::deployment_statuses(conn, sites).await } - pub(crate) fn deployment_exists_and_synced( + pub(crate) async fn deployment_exists_and_synced( &self, id: &DeploymentHash, ) -> Result { - let mut conn = self.get_conn()?; - deployment::exists_and_synced(&mut conn, id.as_str()) + let mut conn = self.pool.get().await?; + deployment::exists_and_synced(&mut conn, id.as_str()).await } - pub(crate) fn deployment_synced( + pub(crate) async fn deployment_synced( &self, id: &DeploymentHash, block_ptr: BlockPtr, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - conn.transaction(|conn| deployment::set_synced(conn, id, block_ptr)) + let mut conn = self.pool.get().await?; + conn.transaction(|conn| deployment::set_synced(conn, id, block_ptr).scope_boxed()) + .await } /// Look up the on_sync action for this deployment - pub(crate) fn on_sync(&self, site: &Site) -> Result { - let mut conn = self.get_conn()?; - deployment::on_sync(&mut conn, site.id) + pub(crate) async fn on_sync(&self, site: &Site) -> Result { + let mut conn = self.pool.get().await?; + deployment::on_sync(&mut conn, site.id).await } /// Return the source if `site` or `None` if `site` is neither a graft /// nor a copy - pub(crate) fn source_of_copy(&self, site: &Site) -> Result, StoreError> { - let mut conn = self.get_conn()?; - crate::copy::source(&mut conn, site) + pub(crate) async fn source_of_copy( + &self, + site: &Site, + ) -> Result, StoreError> { + let mut conn = self.pool.get().await?; + crate::copy::source(&mut conn, site).await } // Only used for tests #[cfg(debug_assertions)] - pub(crate) fn drop_deployment_schema( + pub(crate) async fn drop_deployment_schema( &self, namespace: &crate::primary::Namespace, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - deployment::drop_schema(&mut conn, namespace) + let mut conn = self.pool.get().await?; + deployment::drop_schema(&mut conn, namespace).await } // Only used for tests #[cfg(debug_assertions)] - pub(crate) fn drop_all_metadata(&self) -> Result<(), StoreError> { + pub(crate) async fn drop_all_metadata(&self) -> Result<(), StoreError> { // Delete metadata entities in each shard // This needs to touch all the tables in the subgraphs schema @@ -617,55 +568,59 @@ impl DeploymentStore { delete from active_copies; "; - let mut conn = self.get_conn()?; - conn.batch_execute(QUERY)?; - conn.batch_execute("delete from deployment_schemas;")?; + let mut conn = self.pool.get().await?; + conn.batch_execute(QUERY).await?; + conn.batch_execute("delete from deployment_schemas;") + .await?; Ok(()) } pub(crate) async fn vacuum(&self) -> Result<(), StoreError> { - self.with_conn(|conn, _| { - conn.batch_execute("vacuum (analyze) subgraphs.head, subgraphs.deployment")?; - Ok(()) - }) - .await + let mut conn = self.pool.get().await?; + conn.batch_execute("vacuum (analyze) subgraphs.head, subgraphs.deployment") + .await?; + Ok(()) } /// Runs the SQL `ANALYZE` command in a table. - pub(crate) fn analyze(&self, site: Arc, entity: Option<&str>) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; + pub(crate) async fn analyze( + &self, + site: Arc, + entity: Option<&str>, + ) -> Result<(), StoreError> { + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; let tables = entity .map(|entity| resolve_table_name(&layout, entity)) .transpose()? .map(|table| vec![table]) .unwrap_or_else(|| layout.tables.values().map(Arc::as_ref).collect()); for table in tables { - table.analyze(&mut conn)?; + table.analyze(&mut conn).await?; } Ok(()) } - pub(crate) fn stats_targets( + pub(crate) async fn stats_targets( &self, site: Arc, ) -> Result<(i32, BTreeMap>), StoreError> { - let mut conn = self.get_conn()?; - let default = catalog::default_stats_target(&mut conn)?; - let targets = catalog::stats_targets(&mut conn, &site.namespace)?; + let mut conn = self.pool.get().await?; + let default = catalog::default_stats_target(&mut conn).await?; + let targets = catalog::stats_targets(&mut conn, &site.namespace).await?; Ok((default, targets)) } - pub(crate) fn set_stats_target( + pub(crate) async fn set_stats_target( &self, site: Arc, entity: Option<&str>, columns: Vec, target: i32, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site.clone())?; + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site.clone()).await?; let tables = entity .map(|entity| resolve_table_name(&layout, entity)) @@ -674,27 +629,32 @@ impl DeploymentStore { .unwrap_or_else(|| layout.tables.values().map(Arc::as_ref).collect()); conn.transaction(|conn| { - for table in tables { - let (columns, _) = resolve_column_names_and_index_exprs(table, &columns)?; + async { + for table in tables { + let (columns, _) = resolve_column_names_and_index_exprs(table, &columns)?; - catalog::set_stats_target(conn, &site.namespace, &table.name, &columns, target)?; + catalog::set_stats_target(conn, &site.namespace, &table.name, &columns, target) + .await?; + } + Ok(()) } - Ok(()) + .scope_boxed() }) + .await } /// Runs the SQL `ANALYZE` command in a table, with a shared connection. - pub(crate) fn analyze_with_conn( + pub(crate) async fn analyze_with_conn( &self, site: Arc, entity_name: &str, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result<(), StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); - let layout = store.layout(conn, site)?; + let layout = store.layout(conn, site).await?; let table = resolve_table_name(&layout, &entity_name)?; - table.analyze(conn) + table.analyze(conn).await } /// Creates a new index in the specified Entity table if it doesn't already exist. @@ -710,34 +670,27 @@ impl DeploymentStore { ) -> Result<(), StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); - self.with_conn(move |conn, _| { - let schema_name = site.namespace.clone(); - let layout = store.layout(conn, site)?; - let (index_name, sql) = generate_index_creation_sql( - layout, - &entity_name, - field_names, - index_method, - after, - )?; - - // This might take a long time. - sql_query(sql).execute(conn)?; - // check if the index creation was successfull - let index_is_valid = - catalog::check_index_is_valid(conn, schema_name.as_str(), &index_name)?; - if index_is_valid { - Ok(()) - } else { - // Index creation falied. We should drop the index before returning. - let drop_index_sql = - format!("drop index concurrently if exists {schema_name}.{index_name}"); - sql_query(drop_index_sql).execute(conn)?; - Err(StoreError::Canceled) - } - .map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + let schema_name = site.namespace.clone(); + let layout = store.layout(&mut conn, site).await?; + let (index_name, sql) = + generate_index_creation_sql(layout, &entity_name, field_names, index_method, after)?; + + // This might take a long time. + sql_query(sql).execute(&mut conn).await?; + // check if the index creation was successfull + let index_is_valid = + catalog::check_index_is_valid(&mut conn, schema_name.as_str(), &index_name).await?; + if index_is_valid { + Ok(()) + } else { + // Index creation falied. We should drop the index before returning. + let drop_index_sql = + format!("drop index concurrently if exists {schema_name}.{index_name}"); + sql_query(drop_index_sql).execute(&mut conn).await?; + Err(StoreError::Canceled) + } + .map_err(Into::into) } /// Returns a list of all existing indexes for the specified Entity table. @@ -748,24 +701,22 @@ impl DeploymentStore { ) -> Result, StoreError> { let store = self.clone(); let entity_name = entity_name.to_owned(); - self.with_conn(move |conn, _| { - let schema_name = site.namespace.clone(); - let layout = store.layout(conn, site)?; - let table = resolve_table_name(&layout, &entity_name)?; - let table_name = &table.name; - let indexes = - catalog::indexes_for_table(conn, schema_name.as_str(), table_name.as_str()) - .map_err(StoreError::from)?; - Ok(indexes.into_iter().map(CreateIndex::parse).collect()) - }) - .await + let mut conn = self.pool.get().await?; + let schema_name = site.namespace.clone(); + let layout = store.layout(&mut conn, site).await?; + let table = resolve_table_name(&layout, &entity_name)?; + let table_name = &table.name; + let indexes = + catalog::indexes_for_table(&mut conn, schema_name.as_str(), table_name.as_str()) + .await + .map_err(StoreError::from)?; + Ok(indexes.into_iter().map(CreateIndex::parse).collect()) } - pub(crate) fn load_indexes(&self, site: Arc) -> Result { + pub(crate) async fn load_indexes(&self, site: Arc) -> Result { let store = self.clone(); - let mut binding = self.get_conn()?; - let conn = binding.deref_mut(); - IndexList::load(conn, site, store) + let mut conn = self.pool.get().await?; + IndexList::load(&mut conn, site, store).await } /// Drops an index for a given deployment, concurrently. @@ -775,11 +726,9 @@ impl DeploymentStore { index_name: &str, ) -> Result<(), StoreError> { let index_name = String::from(index_name); - self.with_conn(move |mut conn, _| { - let schema_name = site.namespace.clone(); - catalog::drop_index(&mut conn, schema_name.as_str(), &index_name).map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + let schema_name = site.namespace.clone(); + catalog::drop_index(&mut conn, schema_name.as_str(), &index_name).await } pub(crate) async fn set_account_like( @@ -790,16 +739,13 @@ impl DeploymentStore { ) -> Result<(), StoreError> { let store = self.clone(); let table = table.to_string(); - self.with_conn(move |mut conn, _| { - let layout = store.layout(&mut conn, site.clone())?; - let table = resolve_table_name(&layout, &table)?; - catalog::set_account_like(&mut conn, &site, &table.name, is_account_like) - .map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + let layout = store.layout(&mut conn, site.clone()).await?; + let table = resolve_table_name(&layout, &table)?; + catalog::set_account_like(&mut conn, &site, &table.name, is_account_like).await } - pub(crate) fn set_history_blocks( + pub(crate) async fn set_history_blocks( &self, site: &Site, history_blocks: BlockNumber, @@ -818,8 +764,8 @@ impl DeploymentStore { // will use the updated value self.layout_cache.remove(site); - let mut conn = self.get_conn()?; - deployment::set_history_blocks(&mut conn, site, history_blocks) + let mut conn = self.pool.get().await?; + deployment::set_history_blocks(&mut conn, site, history_blocks).await } pub(crate) async fn prune( @@ -828,17 +774,15 @@ impl DeploymentStore { site: Arc, req: PruneRequest, ) -> Result, StoreError> { - fn do_prune( + async fn do_prune( store: Arc, - mut conn: &mut PooledConnection>, + mut conn: &mut AsyncPgConnection, site: Arc, - cancel: &CancelHandle, req: PruneRequest, mut reporter: Box, - ) -> Result, CancelableError> { - let layout = store.layout(&mut conn, site.clone())?; - cancel.check_cancel()?; - let state = deployment::state(&mut conn, &site)?; + ) -> Result, StoreError> { + let layout = store.layout(&mut conn, site.clone()).await?; + let state = deployment::state(&mut conn, &site).await?; if state.latest_block.number <= req.history_blocks { // We haven't accumulated enough history yet, nothing to prune @@ -853,39 +797,36 @@ impl DeploymentStore { conn.transaction(|conn| { deployment::set_earliest_block(conn, site.as_ref(), req.earliest_block) - })?; - - cancel.check_cancel()?; + .scope_boxed() + }) + .await?; - layout.prune(&store.logger, reporter.as_mut(), &mut conn, &req, cancel)?; + layout + .prune(&store.logger, reporter.as_mut(), &mut conn, &req) + .await?; Ok(reporter) } let store = self.clone(); - self.with_conn(move |conn, cancel| { - // We lock pruning for this deployment to make sure that if the - // deployment is reassigned to another node, that node won't - // kick off a pruning run while this node might still be pruning - if advisory_lock::try_lock_pruning(conn, &site)? { - let res = do_prune(store, conn, site.cheap_clone(), cancel, req, reporter); - advisory_lock::unlock_pruning(conn, &site)?; - res - } else { - Ok(reporter) - } - }) - .await + let mut conn = self.pool.get().await?; + // We lock pruning for this deployment to make sure that if the + // deployment is reassigned to another node, that node won't + // kick off a pruning run while this node might still be pruning + if advisory_lock::try_lock_pruning(&mut conn, &site).await? { + let res = do_prune(store, &mut conn, site.cheap_clone(), req, reporter).await; + advisory_lock::unlock_pruning(&mut conn, &site).await?; + res + } else { + Ok(reporter) + } } pub(crate) async fn prune_viewer( self: &Arc, site: Arc, ) -> Result { - let store = self.cheap_clone(); - let layout = self - .pool - .with_conn(move |conn, _| store.layout(conn, site.clone()).map_err(|e| e.into())) - .await?; + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site.clone()).await?; Ok(relational::prune::Viewer::new(self.pool.clone(), layout)) } @@ -896,33 +837,28 @@ impl DeploymentStore { pub(crate) async fn block_ptr(&self, site: Arc) -> Result, StoreError> { let site = site.cheap_clone(); - self.with_conn(|conn, cancel| { - cancel.check_cancel()?; - - Self::block_ptr_with_conn(conn, site).map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + Self::block_ptr_with_conn(&mut conn, site).await } pub(crate) async fn block_cursor(&self, site: Arc) -> Result { let site = site.cheap_clone(); - self.with_conn(|conn, cancel| { - cancel.check_cancel()?; - - deployment::get_subgraph_firehose_cursor(conn, site) - .map(FirehoseCursor::from) - .map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + deployment::get_subgraph_firehose_cursor(&mut conn, site) + .await + .map(FirehoseCursor::from) } - pub(crate) fn block_time(&self, site: Arc) -> Result, StoreError> { + pub(crate) async fn block_time( + &self, + site: Arc, + ) -> Result, StoreError> { let store = self.cheap_clone(); - let mut conn = self.get_conn()?; - let layout = store.layout(&mut conn, site.cheap_clone())?; - layout.last_rollup(&mut conn) + let mut conn = self.pool.get().await?; + let layout = store.layout(&mut conn, site.cheap_clone()).await?; + layout.last_rollup(&mut conn).await } pub(crate) async fn get_proof_of_indexing( @@ -934,65 +870,57 @@ impl DeploymentStore { let indexer = *indexer; let site2 = site.cheap_clone(); let store = self.cheap_clone(); - let layout = self.find_layout(site.cheap_clone())?; - let info = self.subgraph_info(site.cheap_clone())?; + let layout = self.find_layout(site.cheap_clone()).await?; + let info = self.subgraph_info(site.cheap_clone()).await?; let poi_digest = layout.input_schema.poi_digest(); - let entities: Option<(Vec, BlockPtr)> = self - .with_conn(move |conn, cancel| { - let site = site.clone(); - cancel.check_cancel()?; - - let layout = store.layout(conn, site.cheap_clone())?; + let mut conn = self.pool.get().await?; + let entities: Option<(Vec, BlockPtr)> = { + let site = site.clone(); - conn.transaction::<_, CancelableError, _>(move |conn| { - let mut block_ptr = block.cheap_clone(); - let latest_block_ptr = - match Self::block_ptr_with_conn(conn, site.cheap_clone())? { - Some(inner) => inner, - None => return Ok(None), - }; + let layout = store.layout(&mut conn, site.cheap_clone()).await?; - cancel.check_cancel()?; + let mut block_ptr = block.cheap_clone(); + let latest_block_ptr = + match Self::block_ptr_with_conn(&mut conn, site.cheap_clone()).await? { + Some(inner) => inner, + None => return Ok(None), + }; - // FIXME: (Determinism) - // - // It is vital to ensure that the block hash given in the query - // is a parent of the latest block indexed for the subgraph. - // Unfortunately the machinery needed to do this is not yet in place. - // The best we can do right now is just to make sure that the block number - // is high enough. - if latest_block_ptr.number < block.number { - // If a subgraph has failed deterministically then any blocks past head - // should return the same POI - let fatal_error = ErrorDetail::fatal(conn, &site.deployment)?; - block_ptr = match fatal_error { - Some(se) => TryInto::::try_into(se)? - .block_ptr - .unwrap_or(block_ptr), - None => return Ok(None), - }; - }; - - let query = EntityQuery::new( - site.deployment.cheap_clone(), - block_ptr.number, - EntityCollection::All(vec![( - layout.input_schema.poi_type().clone(), - AttributeNames::All, - )]), - ); - let entities = store - .execute_query::(conn, site, query) - .map(|(entities, _)| entities) - .map_err(anyhow::Error::from)?; - - Ok(Some((entities, block_ptr))) - }) - .map_err(Into::into) - }) - .await?; + // FIXME: (Determinism) + // + // It is vital to ensure that the block hash given in the query + // is a parent of the latest block indexed for the subgraph. + // Unfortunately the machinery needed to do this is not yet in place. + // The best we can do right now is just to make sure that the block number + // is high enough. + if latest_block_ptr.number < block.number { + // If a subgraph has failed deterministically then any blocks past head + // should return the same POI + let fatal_error = ErrorDetail::fatal(&mut conn, &site.deployment).await?; + block_ptr = match fatal_error { + Some(se) => TryInto::::try_into(se)? + .block_ptr + .unwrap_or(block_ptr), + None => return Ok(None), + }; + }; + let query = EntityQuery::new( + site.deployment.cheap_clone(), + block_ptr.number, + EntityCollection::All(vec![( + layout.input_schema.poi_type().clone(), + AttributeNames::All, + )]), + ); + let entities = store + .execute_query::(&mut conn, site, query) + .await + .map(|(entities, _)| entities) + .map_err(StoreError::from)?; + Some((entities, block_ptr)) + }; let (entities, block_ptr) = if let Some((entities, bp)) = entities { (entities, bp) } else { @@ -1026,20 +954,20 @@ impl DeploymentStore { /// Get the entity matching `key` from the deployment `site`. Only /// consider entities as of the given `block` - pub(crate) fn get( + pub(crate) async fn get( &self, site: Arc, key: &EntityKey, block: BlockNumber, ) -> Result, StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; - layout.find(&mut conn, key, block) + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; + layout.find(&mut conn, key, block).await } /// Retrieve all the entities matching `ids_for_type`, both the type and causality region, from /// the deployment `site`. Only consider entities as of the given `block` - pub(crate) fn get_many( + pub(crate) async fn get_many( &self, site: Arc, ids_for_type: &BTreeMap<(EntityType, CausalityRegion), IdList>, @@ -1048,61 +976,66 @@ impl DeploymentStore { if ids_for_type.is_empty() { return Ok(BTreeMap::new()); } - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; - layout.find_many(&mut conn, ids_for_type, block) + layout.find_many(&mut conn, ids_for_type, block).await } - pub(crate) fn get_range( + pub(crate) async fn get_range( &self, site: Arc, entity_types: Vec, causality_region: CausalityRegion, block_range: Range, ) -> Result>, StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; - layout.find_range(&mut conn, entity_types, causality_region, block_range) + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; + layout + .find_range(&mut conn, entity_types, causality_region, block_range) + .await } - pub(crate) fn get_derived( + pub(crate) async fn get_derived( &self, site: Arc, derived_query: &DerivedEntityQuery, block: BlockNumber, excluded_keys: &Vec, ) -> Result, StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; - layout.find_derived(&mut conn, derived_query, block, excluded_keys) + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; + layout + .find_derived(&mut conn, derived_query, block, excluded_keys) + .await } - pub(crate) fn get_changes( + pub(crate) async fn get_changes( &self, site: Arc, block: BlockNumber, ) -> Result, StoreError> { - let mut conn = self.get_conn()?; - let layout = self.layout(&mut conn, site)?; - let changes = layout.find_changes(&mut conn, block)?; + let mut conn = self.pool.get().await?; + let layout = self.layout(&mut conn, site).await?; + let changes = layout.find_changes(&mut conn, block).await?; Ok(changes) } // Only used by tests #[cfg(debug_assertions)] - pub(crate) fn find( + pub(crate) async fn find( &self, site: Arc, query: EntityQuery, ) -> Result, QueryExecutionError> { - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; self.execute_query(&mut conn, site, query) + .await .map(|(entities, _)| entities) } - pub(crate) fn transact_block_operations( + pub(crate) async fn transact_block_operations( self: &Arc, logger: &Logger, site: Arc, @@ -1113,65 +1046,75 @@ impl DeploymentStore { ) -> Result<(), StoreError> { let mut conn = { let _section = stopwatch.start_section("transact_blocks_get_conn"); - self.get_conn()? + self.pool.get().await? }; - let (layout, earliest_block) = deployment::with_lock(&mut conn, &site, |conn| { - conn.transaction(|conn| -> Result<_, StoreError> { - // Make the changes - let layout = self.layout(conn, site.clone())?; + let (layout, earliest_block) = deployment::with_lock(&mut conn, &site, async |conn| { + conn.transaction(|conn| { + async { + // Make the changes + let layout = self.layout(conn, site.clone()).await?; - let section = stopwatch.start_section("apply_entity_modifications"); - let count = self.apply_entity_modifications( - conn, - layout.as_ref(), - batch.groups(), - stopwatch, - )?; - section.end(); + let section = stopwatch.start_section("apply_entity_modifications"); + let count = self + .apply_entity_modifications( + conn, + layout.as_ref(), + batch.groups(), + stopwatch, + ) + .await?; + section.end(); - layout.rollup(conn, last_rollup, &batch.block_times)?; + layout.rollup(conn, last_rollup, &batch.block_times).await?; - dynds::insert(conn, &site, &batch.data_sources, manifest_idx_and_name)?; + dynds::insert(conn, &site, &batch.data_sources, manifest_idx_and_name).await?; - dynds::update_offchain_status(conn, &site, &batch.offchain_to_remove)?; + dynds::update_offchain_status(conn, &site, &batch.offchain_to_remove).await?; - if !batch.deterministic_errors.is_empty() { - deployment::insert_subgraph_errors( - &self.logger, - conn, - &site.deployment, - &batch.deterministic_errors, - batch.block_ptr.number, - )?; - - if batch.is_non_fatal_errors_active { - debug!( - logger, - "Updating non-fatal errors for subgraph"; - "subgraph" => site.deployment.to_string(), - "block" => batch.block_ptr.number, - ); - deployment::update_non_fatal_errors( + if !batch.deterministic_errors.is_empty() { + deployment::insert_subgraph_errors( + &self.logger, conn, &site.deployment, - deployment::SubgraphHealth::Unhealthy, - Some(&batch.deterministic_errors), - )?; + &batch.deterministic_errors, + batch.block_ptr.number, + ) + .await?; + + if batch.is_non_fatal_errors_active { + debug!( + logger, + "Updating non-fatal errors for subgraph"; + "subgraph" => site.deployment.to_string(), + "block" => batch.block_ptr.number, + ); + deployment::update_non_fatal_errors( + conn, + &site.deployment, + deployment::SubgraphHealth::Unhealthy, + Some(&batch.deterministic_errors), + ) + .await?; + } } - } - - let earliest_block = deployment::transact_block( - conn, - &site, - &batch.block_ptr, - &batch.firehose_cursor, - count, - )?; - Ok((layout, earliest_block)) + let earliest_block = deployment::transact_block( + conn, + &site, + &batch.block_ptr, + &batch.firehose_cursor, + count, + ) + .await?; + + Ok((layout, earliest_block)) + } + .scope_boxed() }) - })?; + .await + }) + .await?; if batch.block_ptr.number as f64 > earliest_block as f64 @@ -1249,7 +1192,7 @@ impl DeploymentStore { req: PruneRequest, ) -> Result<(), StoreError> { { - if store.is_source(&site)? { + if store.is_source(&site).await? { debug!( logger, "Skipping pruning since this deployment is being copied" @@ -1258,7 +1201,7 @@ impl DeploymentStore { } } let logger2 = logger.cheap_clone(); - retry::forever_async(&logger2, "prune", move || { + retry::forever(&logger2, "prune", move || { let store = store.cheap_clone(); let reporter = OngoingPruneReporter::new(logger.cheap_clone()); let site = site.cheap_clone(); @@ -1287,53 +1230,59 @@ impl DeploymentStore { Ok(()) } - fn rewind_or_truncate_with_conn( + async fn rewind_or_truncate_with_conn( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, truncate: bool, ) -> Result<(), StoreError> { let logger = self.logger.cheap_clone(); - deployment::with_lock(conn, &site, |conn| { - conn.transaction(|conn| -> Result<_, StoreError> { - // The revert functions want the number of the first block that we need to get rid of - let block = block_ptr_to.number + 1; - - deployment::revert_block_ptr(conn, &site, block_ptr_to, firehose_cursor)?; + deployment::with_lock(conn, &site, async |conn| { + conn.transaction(|conn| { + async { + // The revert functions want the number of the first block that we need to get rid of + let block = block_ptr_to.number + 1; + + deployment::revert_block_ptr(conn, &site, block_ptr_to, firehose_cursor) + .await?; + + // Revert the data + let layout = self.layout(conn, site.clone()).await?; + + if truncate { + layout.truncate_tables(conn).await?; + deployment::clear_entity_count(conn, site.as_ref()).await?; + } else { + let count = layout.revert_block(conn, block).await?; + deployment::update_entity_count(conn, site.as_ref(), count).await?; + } - // Revert the data - let layout = self.layout(conn, site.clone())?; + // Revert the meta data changes that correspond to this subgraph. + // Only certain meta data changes need to be reverted, most + // importantly creation of dynamic data sources. We ensure in the + // rest of the code that we only record history for those meta data + // changes that might need to be reverted + Layout::revert_metadata(&logger, conn, &site, block).await?; - if truncate { - layout.truncate_tables(conn)?; - deployment::clear_entity_count(conn, site.as_ref())?; - } else { - let count = layout.revert_block(conn, block)?; - deployment::update_entity_count(conn, site.as_ref(), count)?; + Ok(()) } - - // Revert the meta data changes that correspond to this subgraph. - // Only certain meta data changes need to be reverted, most - // importantly creation of dynamic data sources. We ensure in the - // rest of the code that we only record history for those meta data - // changes that might need to be reverted - Layout::revert_metadata(&logger, conn, &site, block)?; - - Ok(()) + .scope_boxed() }) + .await }) + .await } - pub(crate) fn truncate( + pub(crate) async fn truncate( &self, site: Arc, block_ptr_to: BlockPtr, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; - let block_ptr_from = Self::block_ptr_with_conn(&mut conn, site.cheap_clone())?; + let block_ptr_from = Self::block_ptr_with_conn(&mut conn, site.cheap_clone()).await?; // Sanity check on block numbers let from_number = block_ptr_from.map(|ptr| ptr.number); @@ -1354,12 +1303,17 @@ impl DeploymentStore { &FirehoseCursor::None, true, ) + .await } - pub(crate) fn rewind(&self, site: Arc, block_ptr_to: BlockPtr) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; + pub(crate) async fn rewind( + &self, + site: Arc, + block_ptr_to: BlockPtr, + ) -> Result<(), StoreError> { + let mut conn = self.pool.get().await?; - let block_ptr_from = Self::block_ptr_with_conn(&mut conn, site.cheap_clone())?; + let block_ptr_from = Self::block_ptr_with_conn(&mut conn, site.cheap_clone()).await?; // Sanity check on block numbers let from_number = block_ptr_from.map(|ptr| ptr.number); @@ -1380,17 +1334,20 @@ impl DeploymentStore { &FirehoseCursor::None, false, ) + .await } - pub(crate) fn revert_block_operations( + pub(crate) async fn revert_block_operations( &self, site: Arc, block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, ) -> Result<(), StoreError> { - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; // Unwrap: If we are reverting then the block ptr is not `None`. - let deployment_head = Self::block_ptr_with_conn(&mut conn, site.cheap_clone())?.unwrap(); + let deployment_head = Self::block_ptr_with_conn(&mut conn, site.cheap_clone()) + .await? + .unwrap(); // Confidence check on revert to ensure we go backward only if block_ptr_to.number >= deployment_head.number { @@ -1398,7 +1355,9 @@ impl DeploymentStore { } // Don't revert past a graft point - let info = self.subgraph_info_with_conn(&mut conn, site.cheap_clone())?; + let info = self + .subgraph_info_with_conn(&mut conn, site.cheap_clone()) + .await?; if let Some(graft_block) = info.graft_block { if graft_block > block_ptr_to.number { return Err(internal_error!( @@ -1413,14 +1372,15 @@ impl DeploymentStore { } self.rewind_or_truncate_with_conn(&mut conn, site, block_ptr_to, firehose_cursor, false) + .await } pub(crate) async fn deployment_state( &self, site: Arc, ) -> Result { - self.with_conn(move |conn, _| deployment::state(conn, &site).map_err(|e| e.into())) - .await + let mut conn = self.pool.get().await?; + deployment::state(&mut conn, &site).await } pub(crate) async fn fail_subgraph( @@ -1428,12 +1388,9 @@ impl DeploymentStore { id: DeploymentHash, error: SubgraphError, ) -> Result<(), StoreError> { - self.with_conn(move |conn, _| { - conn.transaction(|conn| deployment::fail(conn, &id, &error)) - .map_err(Into::into) - }) - .await?; - Ok(()) + let mut conn = self.pool.get().await?; + conn.transaction(|conn| deployment::fail(conn, &id, &error).scope_boxed()) + .await } pub(crate) fn replica_for_query(&self) -> Result { @@ -1457,37 +1414,29 @@ impl DeploymentStore { block: BlockNumber, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { - self.with_conn(move |conn, _| { - conn.transaction(|conn| crate::dynds::load(conn, &site, block, manifest_idx_and_name)) - .map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + crate::dynds::load(&mut conn, &site, block, manifest_idx_and_name).await } pub(crate) async fn causality_region_curr_val( &self, site: Arc, ) -> Result, StoreError> { - self.with_conn(move |conn, _| { - Ok(conn.transaction(|conn| crate::dynds::causality_region_curr_val(conn, &site))?) - }) - .await + let mut conn = self.pool.get().await?; + crate::dynds::causality_region_curr_val(&mut conn, &site).await } pub(crate) async fn exists_and_synced(&self, id: DeploymentHash) -> Result { - self.with_conn(move |conn, _| { - conn.transaction(|conn| deployment::exists_and_synced(conn, &id)) - .map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + deployment::exists_and_synced(&mut conn, &id).await } - pub(crate) fn graft_pending( + pub(crate) async fn graft_pending( &self, id: &DeploymentHash, ) -> Result, StoreError> { - let mut conn = self.get_conn()?; - deployment::graft_pending(&mut conn, id) + let mut conn = self.pool.get().await?; + deployment::graft_pending(&mut conn, id).await } /// Bring the subgraph into a state where we can start or resume @@ -1506,7 +1455,7 @@ impl DeploymentStore { site: Arc, graft_src: Option<(Arc, BlockPtr, SubgraphDeploymentEntity, IndexList)>, ) -> Result<(), StoreError> { - let dst = self.find_layout(site.cheap_clone())?; + let dst = self.find_layout(site.cheap_clone()).await?; // If `graft_src` is `Some`, then there is a pending graft. if let Some((src, block, src_deployment, index_list)) = graft_src { @@ -1519,7 +1468,8 @@ impl DeploymentStore { let src_manifest_idx_and_name = src_deployment.manifest.template_idx_and_name()?; let dst_manifest_idx_and_name = self - .load_deployment(dst.site.clone())? + .load_deployment(dst.site.clone()) + .await? .manifest .template_idx_and_name()?; @@ -1537,90 +1487,103 @@ impl DeploymentStore { block.clone(), src_manifest_idx_and_name, dst_manifest_idx_and_name, - )?; + ) + .await?; let status = copy_conn.copy_data(index_list).await?; if status == crate::copy::Status::Cancelled { return Err(StoreError::Canceled); } - let mut conn = self.get_conn()?; - conn.transaction(|conn| -> Result<(), StoreError> { - // Copy shared dynamic data sources and adjust their ID; if - // the subgraph uses private data sources, that is done by - // `copy::Connection::copy_data` since it requires access to - // the source schema which in sharded setups is only - // available while that function runs - let start = Instant::now(); - let count = dynds::shared::copy(conn, &src.site, &dst.site, block.number)?; - info!(logger, "Copied {} dynamic data sources", count; + let mut conn = self.pool.get().await?; + conn.transaction::<(), StoreError, _>(|conn| { + async { + // Copy shared dynamic data sources and adjust their ID; if + // the subgraph uses private data sources, that is done by + // `copy::Connection::copy_data` since it requires access to + // the source schema which in sharded setups is only + // available while that function runs + let start = Instant::now(); + let count = + dynds::shared::copy(conn, &src.site, &dst.site, block.number).await?; + info!(logger, "Copied {} dynamic data sources", count; "time_ms" => start.elapsed().as_millis()); - // Copy errors across - let start = Instant::now(); - let count = deployment::copy_errors(conn, &src.site, &dst.site, &block)?; - info!(logger, "Copied {} existing errors", count; + // Copy errors across + let start = Instant::now(); + let count = deployment::copy_errors(conn, &src.site, &dst.site, &block).await?; + info!(logger, "Copied {} existing errors", count; "time_ms" => start.elapsed().as_millis()); - catalog::copy_account_like(conn, &src.site, &dst.site)?; + catalog::copy_account_like(conn, &src.site, &dst.site).await?; - // Analyze all tables for this deployment - info!(logger, "Analyzing all {} tables", dst.tables.len()); - for entity_name in dst.tables.keys() { - self.analyze_with_conn(site.cheap_clone(), entity_name.as_str(), conn)?; - } + // Analyze all tables for this deployment + info!(logger, "Analyzing all {} tables", dst.tables.len()); + for entity_name in dst.tables.keys() { + self.analyze_with_conn(site.cheap_clone(), entity_name.as_str(), conn) + .await?; + } - // Rewind the subgraph so that entity versions that are - // clamped in the future (beyond `block`) become valid for - // all blocks after `block`. `revert_block` gets rid of - // everything including the block passed to it. We want to - // preserve `block` and therefore revert `block+1` - let start = Instant::now(); - let block_to_revert: BlockNumber = block - .number - .checked_add(1) - .expect("block numbers fit into an i32"); - info!(logger, "Rewinding to block {}", block.number); - let count = dst.revert_block(conn, block_to_revert)?; - deployment::update_entity_count(conn, &dst.site, count)?; - - info!(logger, "Rewound subgraph to block {}", block.number; + // Rewind the subgraph so that entity versions that are + // clamped in the future (beyond `block`) become valid for + // all blocks after `block`. `revert_block` gets rid of + // everything including the block passed to it. We want to + // preserve `block` and therefore revert `block+1` + let start = Instant::now(); + let block_to_revert: BlockNumber = block + .number + .checked_add(1) + .expect("block numbers fit into an i32"); + info!(logger, "Rewinding to block {}", block.number); + let count = dst.revert_block(conn, block_to_revert).await?; + deployment::update_entity_count(conn, &dst.site, count).await?; + + info!(logger, "Rewound subgraph to block {}", block.number; "time_ms" => start.elapsed().as_millis()); - deployment::set_history_blocks( - conn, - &dst.site, - src_deployment.manifest.history_blocks, - )?; - - // The `earliest_block` for `src` might have changed while - // we did the copy if `src` was pruned while we copied; - // adjusting it very late in the copy process ensures that - // we truly do have all the data starting at - // `earliest_block` and do not inadvertently expose data - // that might be incomplete because a prune on the source - // removed data just before we copied it - deployment::copy_earliest_block(conn, &src.site, &dst.site)?; - - // Set the block ptr to the graft point to signal that we successfully - // performed the graft - crate::deployment::forward_block_ptr(conn, &dst.site, &block)?; - info!(logger, "Subgraph successfully initialized"; + deployment::set_history_blocks( + conn, + &dst.site, + src_deployment.manifest.history_blocks, + ) + .await?; + + // The `earliest_block` for `src` might have changed while + // we did the copy if `src` was pruned while we copied; + // adjusting it very late in the copy process ensures that + // we truly do have all the data starting at + // `earliest_block` and do not inadvertently expose data + // that might be incomplete because a prune on the source + // removed data just before we copied it + deployment::copy_earliest_block(conn, &src.site, &dst.site).await?; + + // Set the block ptr to the graft point to signal that we successfully + // performed the graft + crate::deployment::forward_block_ptr(conn, &dst.site, &block).await?; + info!(logger, "Subgraph successfully initialized"; "time_ms" => start.elapsed().as_millis()); - Ok(()) - })?; + Ok(()) + } + .scope_boxed() + }) + .await?; } - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; if ENV_VARS.postpone_attribute_index_creation { // check if all indexes are valid and recreate them if they aren't - self.load_indexes(site.clone())? - .recreate_invalid_indexes(&mut conn, &dst)?; + self.load_indexes(site.clone()) + .await? + .recreate_invalid_indexes(&mut conn, &dst) + .await?; } // Make sure the block pointer is set. This is important for newly // deployed subgraphs so that we respect the 'startBlock' setting // the first time the subgraph is started - conn.transaction(|conn| crate::deployment::initialize_block_ptr(conn, &dst.site))?; + conn.transaction(|conn| { + crate::deployment::initialize_block_ptr(conn, &dst.site).scope_boxed() + }) + .await?; Ok(()) } @@ -1634,18 +1597,19 @@ impl DeploymentStore { // // - There's no fatal error for the subgraph // - The error is NOT deterministic - pub(crate) fn unfail_deterministic_error( + pub(crate) async fn unfail_deterministic_error( &self, site: Arc, current_ptr: &BlockPtr, parent_ptr: &BlockPtr, ) -> Result { - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; let deployment_id = &site.deployment; conn.transaction(|conn| { + async { // We'll only unfail subgraphs that had fatal errors - let subgraph_error = match ErrorDetail::fatal(conn, deployment_id)? { + let subgraph_error = match ErrorDetail::fatal(conn, deployment_id).await? { Some(fatal_error) => fatal_error, // If the subgraph is not failed then there is nothing to do. None => return Ok(UnfailOutcome::Noop), @@ -1659,7 +1623,7 @@ impl DeploymentStore { use deployment::SubgraphHealth::*; // Decide status based on if there are any errors for the previous/parent block let prev_health = - if deployment::has_deterministic_errors(conn, deployment_id, parent_ptr.number)? { + if deployment::has_deterministic_errors(conn, deployment_id, parent_ptr.number).await? { Unhealthy } else { Healthy @@ -1685,10 +1649,10 @@ impl DeploymentStore { // We reset the firehose cursor. That way, on resume, Firehose will start from // the block_ptr instead (with sanity checks to ensure it's resuming at the // correct block). - let _ = self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None)?; + let _ = self.revert_block_operations(site.clone(), parent_ptr.clone(), &FirehoseCursor::None).await?; // Unfail the deployment. - deployment::update_deployment_status(conn, deployment_id, prev_health, None,None)?; + deployment::update_deployment_status(conn, deployment_id, prev_health, None,None).await?; Ok(UnfailOutcome::Unfailed) } @@ -1717,8 +1681,8 @@ impl DeploymentStore { Ok(UnfailOutcome::Noop) } - } - }) + } }.scope_boxed() + }).await } // If a non-deterministic error happens and the deployment head advances, @@ -1731,17 +1695,17 @@ impl DeploymentStore { // // - There's no fatal error for the subgraph // - The error IS deterministic - pub(crate) fn unfail_non_deterministic_error( + pub(crate) async fn unfail_non_deterministic_error( &self, site: Arc, current_ptr: &BlockPtr, ) -> Result { - let mut conn = self.get_conn()?; + let mut conn = self.pool.get().await?; let deployment_id = &site.deployment; - conn.transaction(|conn| { + conn.transaction(|conn| async { // We'll only unfail subgraphs that had fatal errors - let subgraph_error = match ErrorDetail::fatal(conn, deployment_id)? { + let subgraph_error = match ErrorDetail::fatal(conn, deployment_id).await? { Some(fatal_error) => fatal_error, // If the subgraph is not failed then there is nothing to do. None => return Ok(UnfailOutcome::Noop), @@ -1772,10 +1736,10 @@ impl DeploymentStore { deployment::SubgraphHealth::Healthy, None, None, - )?; + ).await?; // Delete the fatal error. - deployment::delete_error(conn, &subgraph_error.id)?; + deployment::delete_error(conn, &subgraph_error.id).await?; Ok(UnfailOutcome::Unfailed) } @@ -1794,13 +1758,13 @@ impl DeploymentStore { Ok(UnfailOutcome::Noop) } } - }) + }.scope_boxed()).await } #[cfg(debug_assertions)] - pub fn error_count(&self, id: &DeploymentHash) -> Result { - let mut conn = self.get_conn()?; - deployment::error_count(&mut conn, id) + pub async fn error_count(&self, id: &DeploymentHash) -> Result { + let mut conn = self.pool.get().await?; + deployment::error_count(&mut conn, id).await } pub(crate) async fn mirror_primary_tables(&self, logger: &Logger) { @@ -1821,16 +1785,12 @@ impl DeploymentStore { "info.subgraph_sizes", "info.chain_sizes", ]; - store - .with_conn(|conn, cancel| { - for view in VIEWS { - let query = format!("refresh materialized view {}", view); - diesel::sql_query(&query).execute(conn)?; - cancel.check_cancel()?; - } - Ok(()) - }) - .await + let mut conn = store.pool.get().await?; + for view in VIEWS { + let query = format!("refresh materialized view {}", view); + diesel::sql_query(&query).execute(&mut conn).await?; + } + Ok(()) } run(self).await.unwrap_or_else(|e| { @@ -1845,8 +1805,8 @@ impl DeploymentStore { site: &Site, ) -> Result { let id = site.id; - self.with_conn(move |conn, _| deployment::health(conn, id).map_err(Into::into)) - .await + let mut conn = self.pool.get().await?; + deployment::health(&mut conn, id).await } pub(crate) async fn set_manifest_raw_yaml( @@ -1854,14 +1814,12 @@ impl DeploymentStore { site: Arc, raw_yaml: String, ) -> Result<(), StoreError> { - self.with_conn(move |conn, _| { - deployment::set_manifest_raw_yaml(conn, &site, &raw_yaml).map_err(Into::into) - }) - .await + let mut conn = self.pool.get().await?; + deployment::set_manifest_raw_yaml(&mut conn, &site, &raw_yaml).await } - fn is_source(&self, site: &Site) -> Result { - self.primary.is_source(site) + async fn is_source(&self, site: &Site) -> Result { + self.primary.is_source(site).await } } diff --git a/store/postgres/src/detail.rs b/store/postgres/src/detail.rs index 0be3909a2c9..9aa11298add 100644 --- a/store/postgres/src/detail.rs +++ b/store/postgres/src/detail.rs @@ -4,9 +4,10 @@ #![allow(unused_macros)] use diesel::dsl::sql; use diesel::prelude::{ - ExpressionMethods, JoinOnDsl, NullableExpressionMethods, OptionalExtension, PgConnection, - QueryDsl, RunQueryDsl, SelectableHelper as _, + ExpressionMethods, JoinOnDsl, NullableExpressionMethods, OptionalExtension, QueryDsl, + SelectableHelper as _, }; +use diesel_async::RunQueryDsl; use diesel_derives::Associations; use git_testament::{git_testament, git_testament_macros}; use graph::blockchain::BlockHash; @@ -29,6 +30,7 @@ use crate::deployment::{ subgraph_manifest, SubgraphHealth as HealthType, }; use crate::primary::{DeploymentId, Site}; +use crate::AsyncPgConnection; git_testament_macros!(version); git_testament!(TESTAMENT); @@ -153,8 +155,8 @@ pub(crate) struct ErrorDetail { impl ErrorDetail { /// Fetches the fatal error, if present, associated with the given /// [`DeploymentHash`]. - pub fn fatal( - conn: &mut PgConnection, + pub async fn fatal( + conn: &mut AsyncPgConnection, deployment_id: &DeploymentHash, ) -> Result, StoreError> { use subgraph_deployment as d; @@ -165,6 +167,7 @@ impl ErrorDetail { .inner_join(e::table.on(e::id.nullable().eq(d::fatal_error))) .select(ErrorDetail::as_select()) .get_result(conn) + .await .optional() .map_err(StoreError::from) } @@ -305,8 +308,8 @@ pub(crate) fn info_from_details( } /// Return the details for `deployments` -pub(crate) fn deployment_details( - conn: &mut PgConnection, +pub(crate) async fn deployment_details( + conn: &mut AsyncPgConnection, deployments: Vec, ) -> Result, StoreError> { use subgraph_deployment as d; @@ -319,13 +322,15 @@ pub(crate) fn deployment_details( d::table .inner_join(h::table) .select(cols) - .load::<(Deployment, Head)>(conn)? + .load::<(Deployment, Head)>(conn) + .await? } else { d::table .inner_join(h::table) .filter(d::subgraph.eq_any(&deployments)) .select(cols) - .load::<(Deployment, Head)>(conn)? + .load::<(Deployment, Head)>(conn) + .await? } .into_iter() .map(DeploymentDetail::from) @@ -334,8 +339,8 @@ pub(crate) fn deployment_details( } /// Return the details for `deployment` -pub(crate) fn deployment_details_for_id( - conn: &mut PgConnection, +pub(crate) async fn deployment_details_for_id( + conn: &mut AsyncPgConnection, deployment: &DeploymentId, ) -> Result { use subgraph_deployment as d; @@ -348,12 +353,13 @@ pub(crate) fn deployment_details_for_id( .filter(d::id.eq(&deployment)) .select(cols) .first::<(Deployment, Head)>(conn) + .await .map_err(StoreError::from) .map(DeploymentDetail::from) } -pub(crate) fn deployment_statuses( - conn: &mut PgConnection, +pub(crate) async fn deployment_statuses( + conn: &mut AsyncPgConnection, sites: &[Arc], ) -> Result, StoreError> { use subgraph_deployment as d; @@ -376,14 +382,16 @@ pub(crate) fn deployment_statuses( .inner_join(h::table) .left_outer_join(join) .select(cols) - .load::<(Deployment, Head, Option)>(conn)? + .load::<(Deployment, Head, Option)>(conn) + .await? } else { d::table .inner_join(h::table) .left_outer_join(join) .filter(d::id.eq_any(sites.iter().map(|site| site.id))) .select(cols) - .load::<(Deployment, Head, Option)>(conn)? + .load::<(Deployment, Head, Option)>(conn) + .await? } }; @@ -395,13 +403,15 @@ pub(crate) fn deployment_statuses( d::table .inner_join(join) .select((d::id, ErrorDetail::as_select())) - .load::<(DeploymentId, ErrorDetail)>(conn)? + .load::<(DeploymentId, ErrorDetail)>(conn) + .await? } else { d::table .inner_join(join) .filter(d::id.eq_any(sites.iter().map(|site| site.id))) .select((d::id, ErrorDetail::as_select())) - .load::<(DeploymentId, ErrorDetail)>(conn)? + .load::<(DeploymentId, ErrorDetail)>(conn) + .await? } .into_iter() .into_group_map() @@ -411,12 +421,14 @@ pub(crate) fn deployment_statuses( if sites.is_empty() { sm::table .select((sm::id, sm::history_blocks)) - .load::<(DeploymentId, i32)>(conn)? + .load::<(DeploymentId, i32)>(conn) + .await? } else { sm::table .filter(sm::id.eq_any(sites.iter().map(|site| site.id))) .select((sm::id, sm::history_blocks)) - .load::<(DeploymentId, i32)>(conn)? + .load::<(DeploymentId, i32)>(conn) + .await? } .into_iter() .collect() @@ -538,8 +550,8 @@ impl StoredDeploymentEntity { } } -pub fn deployment_entity( - conn: &mut PgConnection, +pub async fn deployment_entity( + conn: &mut AsyncPgConnection, site: &Site, schema: &InputSchema, ) -> Result { @@ -550,13 +562,15 @@ pub fn deployment_entity( let manifest = m::table .find(site.id) .select(StoredSubgraphManifest::as_select()) - .first::(conn)?; + .first::(conn) + .await?; let detail = d::table .inner_join(h::table) .filter(d::id.eq(site.id)) .select(<(Deployment, Head)>::as_select()) .first::<(Deployment, Head)>(conn) + .await .map(DeploymentDetail::from)?; StoredDeploymentEntity(detail, manifest).as_subgraph_deployment(schema) @@ -575,7 +589,7 @@ pub struct GraphNodeVersion { } impl GraphNodeVersion { - pub(crate) fn create_or_get(conn: &mut PgConnection) -> anyhow::Result { + pub(crate) async fn create_or_get(conn: &mut AsyncPgConnection) -> anyhow::Result { let git_commit_hash = version_commit_hash!(); let git_repository_dirty = !&TESTAMENT.modifications.is_empty(); let crate_version = CARGO_PKG_VERSION; @@ -603,7 +617,8 @@ impl GraphNodeVersion { g::patch.eq(&patch), )) .on_conflict_do_nothing() - .execute(conn)?; + .execute(conn) + .await?; // select the id for the row we just inserted g::graph_node_versions @@ -614,7 +629,8 @@ impl GraphNodeVersion { .filter(g::major.eq(&major)) .filter(g::minor.eq(&minor)) .filter(g::patch.eq(&patch)) - .get_result(conn)? + .get_result(conn) + .await? }; Ok(graph_node_version_id) } diff --git a/store/postgres/src/dynds/mod.rs b/store/postgres/src/dynds/mod.rs index 27ab4e78a10..a1155c96678 100644 --- a/store/postgres/src/dynds/mod.rs +++ b/store/postgres/src/dynds/mod.rs @@ -3,8 +3,7 @@ pub(crate) mod shared; pub(crate) use private::DataSourcesTable; -use crate::primary::Site; -use diesel::PgConnection; +use crate::{primary::Site, AsyncPgConnection}; use graph::{ components::store::{write, StoredDynamicDataSource}, data_source::CausalityRegion, @@ -12,43 +11,55 @@ use graph::{ prelude::{BlockNumber, StoreError}, }; -pub fn load( - conn: &mut PgConnection, +pub async fn load( + conn: &mut AsyncPgConnection, site: &Site, block: BlockNumber, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).load(conn, block), - false => shared::load(conn, site.deployment.as_str(), block, manifest_idx_and_name), + true => { + DataSourcesTable::new(site.namespace.clone()) + .load(conn, block) + .await + } + false => shared::load(conn, site.deployment.as_str(), block, manifest_idx_and_name).await, } } -pub(crate) fn insert( - conn: &mut PgConnection, +pub(crate) async fn insert( + conn: &mut AsyncPgConnection, site: &Site, data_sources: &write::DataSources, manifest_idx_and_name: &[(u32, String)], ) -> Result { match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).insert(conn, data_sources), - false => shared::insert(conn, &site.deployment, data_sources, manifest_idx_and_name), + true => { + DataSourcesTable::new(site.namespace.clone()) + .insert(conn, data_sources) + .await + } + false => shared::insert(conn, &site.deployment, data_sources, manifest_idx_and_name).await, } } -pub(crate) fn revert( - conn: &mut PgConnection, +pub(crate) async fn revert( + conn: &mut AsyncPgConnection, site: &Site, block: BlockNumber, ) -> Result<(), StoreError> { match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).revert(conn, block), - false => shared::revert(conn, &site.deployment, block), + true => { + DataSourcesTable::new(site.namespace.clone()) + .revert(conn, block) + .await + } + false => shared::revert(conn, &site.deployment, block).await, } } -pub(crate) fn update_offchain_status( - conn: &mut PgConnection, +pub(crate) async fn update_offchain_status( + conn: &mut AsyncPgConnection, site: &Site, data_sources: &write::DataSources, ) -> Result<(), StoreError> { @@ -58,7 +69,9 @@ pub(crate) fn update_offchain_status( match site.schema_version.private_data_sources() { true => { - DataSourcesTable::new(site.namespace.clone()).update_offchain_status(conn, data_sources) + DataSourcesTable::new(site.namespace.clone()) + .update_offchain_status(conn, data_sources) + .await } false => Err(internal_error!( "shared schema does not support data source offchain_found", @@ -67,12 +80,16 @@ pub(crate) fn update_offchain_status( } /// The maximum assigned causality region. Any higher number is therefore free to be assigned. -pub(crate) fn causality_region_curr_val( - conn: &mut PgConnection, +pub(crate) async fn causality_region_curr_val( + conn: &mut AsyncPgConnection, site: &Site, ) -> Result, StoreError> { match site.schema_version.private_data_sources() { - true => DataSourcesTable::new(site.namespace.clone()).causality_region_curr_val(conn), + true => { + DataSourcesTable::new(site.namespace.clone()) + .causality_region_curr_val(conn) + .await + } // Subgraphs on the legacy shared table do not use offchain data sources. false => Ok(None), diff --git a/store/postgres/src/dynds/private.rs b/store/postgres/src/dynds/private.rs index d4d21ad39c1..874db77e788 100644 --- a/store/postgres/src/dynds/private.rs +++ b/store/postgres/src/dynds/private.rs @@ -2,12 +2,12 @@ use std::{collections::HashMap, i32, ops::Bound}; use diesel::{ pg::{sql_types, Pg}, - prelude::*, query_builder::{AstPass, QueryFragment, QueryId}, sql_query, sql_types::{Binary, Bool, Integer, Jsonb, Nullable}, - PgConnection, QueryDsl, RunQueryDsl, + ExpressionMethods, OptionalExtension, QueryDsl, QueryResult, }; +use diesel_async::RunQueryDsl; use graph::{ anyhow::{anyhow, Context}, @@ -17,7 +17,7 @@ use graph::{ prelude::{serde_json, BlockNumber, StoreError}, }; -use crate::{primary::Namespace, relational_queries::POSTGRES_MAX_PARAMETERS}; +use crate::{primary::Namespace, relational_queries::POSTGRES_MAX_PARAMETERS, AsyncPgConnection}; type DynTable = diesel_dynamic_schema::Table; type DynColumn = diesel_dynamic_schema::Column; @@ -83,9 +83,9 @@ impl DataSourcesTable { // Query to load the data sources which are live at `block`. Ordering by the creation block and // `vid` makes sure they are in insertion order which is important for the correctness of // reverts and the execution order of triggers. See also 8f1bca33-d3b7-4035-affc-fd6161a12448. - pub(super) fn load( + pub(super) async fn load( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block: BlockNumber, ) -> Result, StoreError> { type Tuple = ( @@ -109,7 +109,8 @@ impl DataSourcesTable { &self.done_at, )) .order_by(&self.vid) - .load::(conn)?; + .load::(conn) + .await?; let mut dses: Vec<_> = tuples .into_iter() @@ -142,9 +143,9 @@ impl DataSourcesTable { Ok(dses) } - pub(crate) fn insert( + pub(crate) async fn insert( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, data_sources: &write::DataSources, ) -> Result { let mut inserted_total = 0; @@ -187,15 +188,15 @@ impl DataSourcesTable { .bind::(causality_region) .bind::, _>(done_at); - inserted_total += query.execute(conn)?; + inserted_total += query.execute(conn).await?; } } Ok(inserted_total) } - pub(crate) fn revert( + pub(crate) async fn revert( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block: BlockNumber, ) -> Result<(), StoreError> { // Use the 'does not extend to the left of' operator `&>` to leverage the gist index, this @@ -207,22 +208,25 @@ impl DataSourcesTable { "delete from {} where block_range &> int4range($1, null)", self.qname ); - sql_query(query).bind::(block).execute(conn)?; + sql_query(query) + .bind::(block) + .execute(conn) + .await?; Ok(()) } /// Copy the dynamic data sources from `self` to `dst`. All data sources that /// were created up to and including `target_block` will be copied. - pub(crate) fn copy_to( + pub(crate) async fn copy_to( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, dst: &DataSourcesTable, target_block: BlockNumber, src_manifest_idx_and_name: &[(i32, String)], dst_manifest_idx_and_name: &[(i32, String)], ) -> Result { // Check if there are any data sources for dst which indicates we already copied - let count = dst.table.clone().count().get_result::(conn)?; + let count = dst.table.clone().count().get_result::(conn).await?; if count > 0 { return Ok(count as usize); } @@ -247,7 +251,8 @@ impl DataSourcesTable { &self.done_at, )) .order_by(&self.vid) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(|ds| ds.src_to_dst(target_block, &manifest_map, &self.namespace, &dst.namespace)) .collect::>()?; @@ -258,15 +263,20 @@ impl DataSourcesTable { let mut count = 0; for chunk in dss.chunks(chunk_size) { let query = CopyDsQuery::new(dst, chunk)?; - count += query.execute(conn)?; + count += query.execute(conn).await?; } // If the manifest idxes remained constant, we can test that both tables have the same // contents. if src_manifest_idx_and_name == dst_manifest_idx_and_name { debug_assert!( - self.load(conn, target_block).map_err(|e| e.to_string()) - == dst.load(conn, target_block).map_err(|e| e.to_string()) + self.load(conn, target_block) + .await + .map_err(|e| e.to_string()) + == dst + .load(conn, target_block) + .await + .map_err(|e| e.to_string()) ); } @@ -275,9 +285,9 @@ impl DataSourcesTable { // Remove offchain data sources by checking the causality region, which currently uniquely // identifies an offchain data source. - pub(super) fn update_offchain_status( + pub(super) async fn update_offchain_status( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, data_sources: &write::DataSources, ) -> Result<(), StoreError> { for (_, dss) in &data_sources.entries { @@ -290,7 +300,8 @@ impl DataSourcesTable { let count = sql_query(query) .bind::, _>(ds.done_at) .bind::(ds.causality_region) - .execute(conn)?; + .execute(conn) + .await?; if count > 1 { return Err(internal_error!( @@ -307,9 +318,9 @@ impl DataSourcesTable { /// The current causality sequence according to the store, which is infered to be the maximum /// value existing in the table. - pub(super) fn causality_region_curr_val( + pub(super) async fn causality_region_curr_val( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result, StoreError> { // Get the maximum `causality_region` leveraging the btree index. Ok(self @@ -318,6 +329,7 @@ impl DataSourcesTable { .select(&self.causality_region) .order_by((&self.causality_region).desc()) .first::(conn) + .await .optional()?) } } @@ -457,5 +469,3 @@ impl<'a> QueryId for CopyDsQuery<'a> { const HAS_STATIC_QUERY_ID: bool = false; } - -impl<'a, Conn> RunQueryDsl for CopyDsQuery<'a> {} diff --git a/store/postgres/src/dynds/shared.rs b/store/postgres/src/dynds/shared.rs index 7fdec556ada..8835f449c35 100644 --- a/store/postgres/src/dynds/shared.rs +++ b/store/postgres/src/dynds/shared.rs @@ -1,13 +1,14 @@ //! SQL queries to load dynamic data sources +use diesel::insert_into; use diesel::{ delete, dsl::{count, sql}, - prelude::{ExpressionMethods, QueryDsl, RunQueryDsl}, + prelude::{ExpressionMethods, QueryDsl}, sql_query, sql_types::{Integer, Text}, }; -use diesel::{insert_into, pg::PgConnection}; +use diesel_async::RunQueryDsl; use graph::{ components::store::{write, StoredDynamicDataSource}, @@ -18,6 +19,7 @@ use graph::{ }; use crate::primary::Site; +use crate::AsyncPgConnection; use crate::ForeignServer; table! { @@ -35,8 +37,8 @@ table! { } } -pub(super) fn load( - conn: &mut PgConnection, +pub(super) async fn load( + conn: &mut AsyncPgConnection, id: &str, block: BlockNumber, manifest_idx_and_name: Vec<(u32, String)>, @@ -57,7 +59,8 @@ pub(super) fn load( )) .filter(decds::ethereum_block_number.le(sql(&format!("{}::numeric", block)))) .order_by((decds::ethereum_block_number, decds::vid)) - .load::<(i64, String, Option, Vec, BigDecimal)>(conn)?; + .load::<(i64, String, Option, Vec, BigDecimal)>(conn) + .await?; let mut data_sources: Vec = Vec::new(); for (vid, name, context, address, creation_block) in dds.into_iter() { @@ -98,8 +101,8 @@ pub(super) fn load( Ok(data_sources) } -pub(super) fn insert( - conn: &mut PgConnection, +pub(super) async fn insert( + conn: &mut AsyncPgConnection, deployment: &DeploymentHash, data_sources: &write::DataSources, manifest_idx_and_name: &[(u32, String)], @@ -163,13 +166,14 @@ pub(super) fn insert( insert_into(decds::table) .values(dds) .execute(conn) + .await .map_err(|e| e.into()) } /// Copy the dynamic data sources for `src` to `dst`. All data sources that /// were created up to and including `target_block` will be copied. -pub(crate) fn copy( - conn: &mut PgConnection, +pub(crate) async fn copy( + conn: &mut AsyncPgConnection, src: &Site, dst: &Site, target_block: BlockNumber, @@ -189,7 +193,8 @@ pub(crate) fn copy( let count = decds::table .filter(decds::deployment.eq(dst.deployment.as_str())) .select(count(decds::vid)) - .get_result::(conn)?; + .get_result::(conn) + .await?; if count > 0 { return Ok(count as usize); } @@ -212,25 +217,32 @@ pub(crate) fn copy( .bind::(src.deployment.as_str()) .bind::(dst.deployment.as_str()) .bind::(target_block) - .execute(conn)?) + .execute(conn) + .await?) } -pub(super) fn revert( - conn: &mut PgConnection, +pub(super) async fn revert( + conn: &mut AsyncPgConnection, id: &DeploymentHash, block: BlockNumber, ) -> Result<(), StoreError> { use dynamic_ethereum_contract_data_source as decds; let dds = decds::table.filter(decds::deployment.eq(id.as_str())); - delete(dds.filter(decds::ethereum_block_number.ge(sql(&block.to_string())))).execute(conn)?; + delete(dds.filter(decds::ethereum_block_number.ge(sql(&block.to_string())))) + .execute(conn) + .await?; Ok(()) } -pub(crate) fn drop(conn: &mut PgConnection, id: &DeploymentHash) -> Result { +pub(crate) async fn drop( + conn: &mut AsyncPgConnection, + id: &DeploymentHash, +) -> Result { use dynamic_ethereum_contract_data_source as decds; delete(decds::table.filter(decds::deployment.eq(id.as_str()))) .execute(conn) + .await .map_err(|e| e.into()) } diff --git a/store/postgres/src/fork.rs b/store/postgres/src/fork.rs index 40457fb1739..6c9c340342c 100644 --- a/store/postgres/src/fork.rs +++ b/store/postgres/src/fork.rs @@ -4,8 +4,8 @@ use std::{ sync::Mutex, }; +use async_trait::async_trait; use graph::{ - block_on, components::store::SubgraphFork as SubgraphForkTrait, internal_error, prelude::{ @@ -44,8 +44,13 @@ pub(crate) struct SubgraphFork { logger: Logger, } +#[async_trait] impl SubgraphForkTrait for SubgraphFork { - fn fetch(&self, entity_type_name: String, id: String) -> Result, StoreError> { + async fn fetch( + &self, + entity_type_name: String, + id: String, + ) -> Result, StoreError> { { let mut fids = self.fetched_ids.lock().map_err(|e| { StoreError::ForkFailure(format!( @@ -76,7 +81,7 @@ impl SubgraphForkTrait for SubgraphFork { query: self.query_string(&entity_type_name, fields)?, variables: Variables { id }, }; - let raw_json = block_on(self.send(&query))?; + let raw_json = self.send(&query).await?; if !raw_json.contains("data") { return Err(StoreError::ForkFailure(format!( "the GraphQL query \"{:?}\" to `{}` failed with \"{}\"", diff --git a/store/postgres/src/graphman/mod.rs b/store/postgres/src/graphman/mod.rs index 4f538cd6e23..30fcd47fb58 100644 --- a/store/postgres/src/graphman/mod.rs +++ b/store/postgres/src/graphman/mod.rs @@ -1,6 +1,8 @@ use anyhow::Result; +use async_trait::async_trait; use chrono::Utc; use diesel::prelude::*; +use diesel_async::RunQueryDsl; use graphman_store::CommandKind; use graphman_store::Execution; use graphman_store::ExecutionId; @@ -23,9 +25,10 @@ impl GraphmanStore { } } +#[async_trait] impl graphman_store::GraphmanStore for GraphmanStore { - fn new_execution(&self, kind: CommandKind) -> Result { - let mut conn = self.primary_pool.get()?; + async fn new_execution(&self, kind: CommandKind) -> Result { + let mut conn = self.primary_pool.get().await?; let id: i64 = diesel::insert_into(gce::table) .values(( @@ -34,20 +37,21 @@ impl graphman_store::GraphmanStore for GraphmanStore { gce::created_at.eq(Utc::now()), )) .returning(gce::id) - .get_result(&mut conn)?; + .get_result(&mut conn) + .await?; Ok(ExecutionId(id)) } - fn load_execution(&self, id: ExecutionId) -> Result { - let mut conn = self.primary_pool.get()?; - let execution = gce::table.find(id).first(&mut conn)?; + async fn load_execution(&self, id: ExecutionId) -> Result { + let mut conn = self.primary_pool.get().await?; + let execution = gce::table.find(id).first(&mut conn).await?; Ok(execution) } - fn mark_execution_as_running(&self, id: ExecutionId) -> Result<()> { - let mut conn = self.primary_pool.get()?; + async fn mark_execution_as_running(&self, id: ExecutionId) -> Result<()> { + let mut conn = self.primary_pool.get().await?; diesel::update(gce::table) .set(( @@ -56,13 +60,14 @@ impl graphman_store::GraphmanStore for GraphmanStore { )) .filter(gce::id.eq(id)) .filter(gce::completed_at.is_null()) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; Ok(()) } - fn mark_execution_as_failed(&self, id: ExecutionId, error_message: String) -> Result<()> { - let mut conn = self.primary_pool.get()?; + async fn mark_execution_as_failed(&self, id: ExecutionId, error_message: String) -> Result<()> { + let mut conn = self.primary_pool.get().await?; diesel::update(gce::table) .set(( @@ -71,13 +76,14 @@ impl graphman_store::GraphmanStore for GraphmanStore { gce::completed_at.eq(Utc::now()), )) .filter(gce::id.eq(id)) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; Ok(()) } - fn mark_execution_as_succeeded(&self, id: ExecutionId) -> Result<()> { - let mut conn = self.primary_pool.get()?; + async fn mark_execution_as_succeeded(&self, id: ExecutionId) -> Result<()> { + let mut conn = self.primary_pool.get().await?; diesel::update(gce::table) .set(( @@ -85,7 +91,8 @@ impl graphman_store::GraphmanStore for GraphmanStore { gce::completed_at.eq(Utc::now()), )) .filter(gce::id.eq(id)) - .execute(&mut conn)?; + .execute(&mut conn) + .await?; Ok(()) } diff --git a/store/postgres/src/jobs.rs b/store/postgres/src/jobs.rs index d8177667183..0828c4f1944 100644 --- a/store/postgres/src/jobs.rs +++ b/store/postgres/src/jobs.rs @@ -4,7 +4,8 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use async_trait::async_trait; -use diesel::{prelude::RunQueryDsl, sql_query, sql_types::Double}; +use diesel::{sql_query, sql_types::Double}; +use diesel_async::RunQueryDsl; use graph::prelude::{error, Logger, MetricsRegistry, StoreError, ENV_VARS}; use graph::prometheus::Gauge; @@ -106,14 +107,12 @@ impl NotificationQueueUsage { usage: f64, } let usage_gauge = self.usage_gauge.clone(); - self.primary - .with_conn(move |conn, _| { - let res = sql_query("select pg_notification_queue_usage() as usage") - .get_result::(conn)?; - usage_gauge.set(res.usage); - Ok(()) - }) - .await + let mut conn = self.primary.get().await?; + let res = sql_query("select pg_notification_queue_usage() as usage") + .get_result::(&mut conn) + .await?; + usage_gauge.set(res.usage); + Ok(()) } } @@ -199,7 +198,7 @@ impl Job for UnusedJob { let start = Instant::now(); - if let Err(e) = self.store.record_unused_deployments() { + if let Err(e) = self.store.record_unused_deployments().await { error!(logger, "failed to record unused deployments"; "error" => e.to_string()); return; } @@ -208,7 +207,9 @@ impl Job for UnusedJob { .store .list_unused_deployments(unused::Filter::UnusedLongerThan( ENV_VARS.store.remove_unused_interval, - )) { + )) + .await + { Ok(remove) => remove, Err(e) => { error!(logger, "failed to list removable deployments"; "error" => e.to_string()); @@ -217,7 +218,7 @@ impl Job for UnusedJob { }; for deployment in remove { - match self.store.remove_deployment(deployment.id) { + match self.store.remove_deployment(deployment.id).await { Ok(()) => { /* ignore */ } Err(e) => { error!(logger, "failed to remove unused deployment"; diff --git a/store/postgres/src/lib.rs b/store/postgres/src/lib.rs index 0bbb261c154..b619eff2d63 100644 --- a/store/postgres/src/lib.rs +++ b/store/postgres/src/lib.rs @@ -61,7 +61,9 @@ pub use self::chain_store::{ChainStore, ChainStoreMetrics, Storage}; pub use self::detail::DeploymentDetail; pub use self::jobs::register as register_jobs; pub use self::notification_listener::NotificationSender; -pub use self::pool::{ConnectionPool, ForeignServer, PoolCoordinator, PoolRole}; +pub use self::pool::{ + AsyncPgConnection, ConnectionPool, ForeignServer, PoolCoordinator, PoolRole, ScopedFutureExt, +}; pub use self::primary::{db_version, UnusedDeployment}; pub use self::store::Store; pub use self::store_events::SubscriptionManager; diff --git a/store/postgres/src/notification_listener.rs b/store/postgres/src/notification_listener.rs index 583ef91479e..4f3864fc4f8 100644 --- a/store/postgres/src/notification_listener.rs +++ b/store/postgres/src/notification_listener.rs @@ -1,4 +1,3 @@ -use diesel::pg::PgConnection; use diesel::select; use diesel::sql_types::Text; use graph::prelude::tokio::sync::mpsc::error::SendTimeoutError; @@ -9,14 +8,17 @@ use postgres::Notification; use postgres::{fallible_iterator::FallibleIterator, Client}; use postgres_openssl::MakeTlsConnector; use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, Barrier, Mutex}; +use std::sync::{Arc, Barrier}; use std::thread; use std::time::{Duration, Instant}; use tokio::sync::mpsc::{channel, Receiver}; +use tokio::sync::Mutex; use graph::prelude::serde_json; use graph::prelude::*; +use crate::AsyncPgConnection; + #[cfg(debug_assertions)] lazy_static::lazy_static! { /// Tests set this to true so that `send_store_event` will store a copy @@ -404,15 +406,15 @@ impl NotificationSender { /// connection `conn` must be into the primary database as that's the /// only place where listeners connect. The `network` is only used for /// metrics gathering and does not affect how the notification is sent - pub fn notify( + pub async fn notify( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, channel: &str, network: Option<&str>, data: &serde_json::Value, ) -> Result<(), StoreError> { use diesel::ExpressionMethods; - use diesel::RunQueryDsl; + use diesel_async::RunQueryDsl; use public::large_notifications::dsl::*; define_sql_function! { @@ -422,16 +424,19 @@ impl NotificationSender { let msg = data.to_string(); if msg.len() <= LARGE_NOTIFICATION_THRESHOLD { - select(pg_notify(channel, &msg)).execute(conn)?; + select(pg_notify(channel, &msg)).execute(conn).await?; } else { // Write the notification payload to the large_notifications table let payload_id: i32 = diesel::insert_into(large_notifications) .values(payload.eq(&msg)) .returning(id) - .get_result(conn)?; + .get_result(conn) + .await?; // Use the large_notifications row ID as the payload for NOTIFY - select(pg_notify(channel, &payload_id.to_string())).execute(conn)?; + select(pg_notify(channel, &payload_id.to_string())) + .execute(conn) + .await?; // Prune old large_notifications. We want to keep the size of the // table manageable, but there's a lot of latitude in how often @@ -456,7 +461,8 @@ impl NotificationSender { where created_at < current_timestamp - interval '{}s'", ENV_VARS.store.large_notification_cleanup_interval.as_secs(), )) - .execute(conn)?; + .execute(conn) + .await?; *last_check = Instant::now(); } } diff --git a/store/postgres/src/pool/coordinator.rs b/store/postgres/src/pool/coordinator.rs index f58a553b693..c16ba4d4b7a 100644 --- a/store/postgres/src/pool/coordinator.rs +++ b/store/postgres/src/pool/coordinator.rs @@ -76,15 +76,15 @@ impl PoolCoordinator { /// This tries to take the migration lock and must therefore be run from /// code that does _not_ hold the migration lock as it will otherwise /// deadlock - fn propagate(&self, pool: &PoolInner, count: MigrationCount) -> Result<(), StoreError> { + async fn propagate(&self, pool: &PoolInner, count: MigrationCount) -> Result<(), StoreError> { // We need to remap all these servers into `pool` if the list of // tables that are mapped have changed from the code of the previous // version. Since dropping and recreating the foreign table // definitions can slow the startup of other nodes down because of // locking, we try to only do this when it is actually needed for server in self.servers.iter() { - if pool.needs_remap(server)? { - pool.remap(server)?; + if pool.needs_remap(server).await? { + pool.remap(server).await?; } } @@ -95,9 +95,8 @@ impl PoolCoordinator { // we can be sure that these mappings use the correct schema if count.had_migrations() { let server = self.server(&pool.shard)?; - for pool in self.pools.lock().unwrap().values() { - let pool = pool.get_unready(); - let remap_res = pool.remap(server); + for pool in self.pools() { + let remap_res = pool.remap(&server).await; if let Err(e) = remap_res { error!(pool.logger, "Failed to map imports from {}", server.shard; "error" => e.to_string()); return Err(e); @@ -252,7 +251,7 @@ impl PoolCoordinator { .into_iter() .map(|(state, count)| async move { let pool = state.get_unready(); - let res = this.propagate(&pool, count); + let res = this.propagate(&pool, count).await; (state.cheap_clone(), res) }) .collect::>(); @@ -266,7 +265,7 @@ impl PoolCoordinator { let primary = self.primary()?; - let mut pconn = primary.get().map_err(|_| StoreError::DatabaseUnavailable)?; + let mut pconn = primary.get().await?; let states: Vec<_> = states .into_iter() @@ -294,13 +293,13 @@ impl PoolCoordinator { return Ok(0); } - primary.drop_cross_shard_views()?; + primary.drop_cross_shard_views().await?; let migrated = migrate(&states, self.servers.as_ref()).await?; let propagated = propagate(&self, migrated).await?; - primary.create_cross_shard_views(&self.servers)?; + primary.create_cross_shard_views(&self.servers).await?; for state in &propagated { state.set_ready(); diff --git a/store/postgres/src/pool/foreign_server.rs b/store/postgres/src/pool/foreign_server.rs index 3f8daf64b54..9f9f9f60791 100644 --- a/store/postgres/src/pool/foreign_server.rs +++ b/store/postgres/src/pool/foreign_server.rs @@ -1,5 +1,4 @@ -use diesel::{connection::SimpleConnection, pg::PgConnection}; - +use diesel_async::SimpleAsyncConnection; use graph::{ prelude::{ anyhow::{self, anyhow, bail}, @@ -12,12 +11,13 @@ use std::fmt::Write; use postgres::config::{Config, Host}; -use crate::catalog; use crate::primary::NAMESPACE_PUBLIC; +use crate::{catalog, AsyncPgConnection}; use crate::{Shard, PRIMARY_SHARD}; use super::{PRIMARY_PUBLIC, PRIMARY_TABLES, SHARDED_TABLES}; +#[derive(Clone)] pub struct ForeignServer { pub name: String, pub shard: Shard, @@ -106,7 +106,7 @@ impl ForeignServer { /// Create a new foreign server and user mapping on `conn` for this foreign /// server - pub(super) fn create(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + pub(super) async fn create(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { let query = format!( "\ create server \"{name}\" @@ -127,12 +127,12 @@ impl ForeignServer { remote_password = self.password, fetch_size = ENV_VARS.store.fdw_fetch_size, ); - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } /// Update an existing user mapping with possibly new details - pub(super) fn update(&self, conn: &mut PgConnection) -> Result<(), StoreError> { - let options = catalog::server_options(conn, &self.name)?; + pub(super) async fn update(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { + let options = catalog::server_options(conn, &self.name).await?; let set_or_add = |option: &str| -> &'static str { if options.contains_key(option) { "set" @@ -161,13 +161,16 @@ impl ForeignServer { remote_password = self.password, fetch_size = ENV_VARS.store.fdw_fetch_size, ); - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } /// Map key tables from the primary into our local schema. If we are the /// primary, set them up as views. - pub(super) fn map_primary(conn: &mut PgConnection, shard: &Shard) -> Result<(), StoreError> { - catalog::recreate_schema(conn, PRIMARY_PUBLIC)?; + pub(super) async fn map_primary( + conn: &mut AsyncPgConnection, + shard: &Shard, + ) -> Result<(), StoreError> { + catalog::recreate_schema(conn, PRIMARY_PUBLIC).await?; let mut query = String::new(); for table_name in PRIMARY_TABLES { @@ -184,31 +187,39 @@ impl ForeignServer { table_name, PRIMARY_PUBLIC, Self::name(&PRIMARY_SHARD).as_str(), - )? + ) + .await? }; write!(query, "{}", create_stmt)?; } - conn.batch_execute(&query)?; + conn.batch_execute(&query).await?; Ok(()) } /// Map the `subgraphs` schema from the foreign server `self` into the /// database accessible through `conn` - pub(super) fn map_metadata(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + pub(super) async fn map_metadata( + &self, + conn: &mut AsyncPgConnection, + ) -> Result<(), StoreError> { let nsp = Self::metadata_schema(&self.shard); - catalog::recreate_schema(conn, &nsp)?; + catalog::recreate_schema(conn, &nsp).await?; let mut query = String::new(); for (src_nsp, src_tables) in SHARDED_TABLES { for src_table in src_tables { let create_stmt = - catalog::create_foreign_table(conn, src_nsp, src_table, &nsp, &self.name)?; + catalog::create_foreign_table(conn, src_nsp, src_table, &nsp, &self.name) + .await?; write!(query, "{}", create_stmt)?; } } - Ok(conn.batch_execute(&query)?) + Ok(conn.batch_execute(&query).await?) } - pub(super) fn needs_remap(&self, conn: &mut PgConnection) -> Result { + pub(super) async fn needs_remap( + &self, + conn: &mut AsyncPgConnection, + ) -> Result { fn different(mut existing: Vec, mut needed: Vec) -> bool { existing.sort(); needed.sort(); @@ -216,7 +227,7 @@ impl ForeignServer { } if &self.shard == &*PRIMARY_SHARD { - let existing = catalog::foreign_tables(conn, PRIMARY_PUBLIC)?; + let existing = catalog::foreign_tables(conn, PRIMARY_PUBLIC).await?; let needed = PRIMARY_TABLES .into_iter() .map(String::from) @@ -226,7 +237,7 @@ impl ForeignServer { } } - let existing = catalog::foreign_tables(conn, &Self::metadata_schema(&self.shard))?; + let existing = catalog::foreign_tables(conn, &Self::metadata_schema(&self.shard)).await?; let needed = SHARDED_TABLES .iter() .flat_map(|(_, tables)| *tables) diff --git a/store/postgres/src/pool/manager.rs b/store/postgres/src/pool/manager.rs new file mode 100644 index 00000000000..6cff0f3c844 --- /dev/null +++ b/store/postgres/src/pool/manager.rs @@ -0,0 +1,300 @@ +//! Connection management for Postgres connection pools +//! +//! This module provides helpers for collecting metrics for a pool and +//! tracking availability of the underlying database + +use deadpool::managed::{Hook, RecycleError, RecycleResult}; +use diesel::IntoSql; + +use diesel_async::pooled_connection::{PoolError as DieselPoolError, PoolableConnection}; +use diesel_async::{AsyncConnection, RunQueryDsl}; +use graph::env::ENV_VARS; +use graph::prelude::error; +use graph::prelude::Counter; +use graph::prelude::Gauge; +use graph::prelude::MetricsRegistry; +use graph::prelude::MovingStats; +use graph::prelude::PoolWaitStats; +use graph::slog::info; +use graph::slog::Logger; + +use std::collections::HashMap; +use std::sync::atomic::AtomicBool; +use std::sync::atomic::Ordering; +use std::sync::Arc; +use std::sync::RwLock; +use std::time::Duration; + +use crate::pool::AsyncPool; + +/// Our own connection manager. It is pretty much the same as +/// `AsyncDieselConnectionManager` but makes it easier to instrument and +/// track connection errors +#[derive(Clone)] +pub struct ConnectionManager { + logger: Logger, + connection_url: String, + state_tracker: StateTracker, + error_counter: Counter, +} + +impl ConnectionManager { + pub(super) fn new( + logger: Logger, + connection_url: String, + state_tracker: StateTracker, + registry: &MetricsRegistry, + const_labels: HashMap, + ) -> Self { + let error_counter = registry + .global_counter( + "store_connection_error_count", + "The number of Postgres connections errors", + const_labels, + ) + .expect("failed to create `store_connection_error_count` counter"); + + Self { + logger, + connection_url, + state_tracker, + error_counter, + } + } + + fn handle_error(&self, error: &dyn std::error::Error) { + let msg = brief_error_msg(&error); + + // Don't count canceling statements for timeouts etc. as a + // connection error. Unfortunately, we only have the textual error + // and need to infer whether the error indicates that the database + // is down or if something else happened. When querying a replica, + // these messages indicate that a query was canceled because it + // conflicted with replication, but does not indicate that there is + // a problem with the database itself. + // + // This check will break if users run Postgres (or even graph-node) + // in a locale other than English. In that case, their database will + // be marked as unavailable even though it is perfectly fine. + if msg.contains("canceling statement") + || msg.contains("terminating connection due to conflict with recovery") + { + return; + } + + self.error_counter.inc(); + if self.state_tracker.is_available() { + error!(self.logger, "Connection checkout"; "error" => msg); + } + self.state_tracker.mark_unavailable(Duration::from_secs(0)); + } +} + +impl deadpool::managed::Manager for ConnectionManager { + type Type = diesel_async::AsyncPgConnection; + + type Error = DieselPoolError; + + async fn create(&self) -> Result { + let res = diesel_async::AsyncPgConnection::establish(&self.connection_url).await; + if let Err(ref e) = res { + self.handle_error(e); + } + res.map_err(DieselPoolError::ConnectionError) + } + + async fn recycle( + &self, + obj: &mut Self::Type, + _metrics: &deadpool::managed::Metrics, + ) -> RecycleResult { + if std::thread::panicking() || obj.is_broken() { + return Err(RecycleError::Message("Broken connection".into())); + } + let res = diesel::select(67_i32.into_sql::()) + .execute(obj) + .await + .map(|_| ()); + if let Err(ref e) = res { + self.handle_error(e); + } + res.map_err(DieselPoolError::QueryError)?; + Ok(()) + } +} + +/// Track whether a database is available or not +#[derive(Clone)] +pub(super) struct StateTracker { + logger: Logger, + available: Arc, + ignore_timeout: Arc, +} + +impl StateTracker { + pub(super) fn new(logger: Logger) -> Self { + Self { + logger, + available: Arc::new(AtomicBool::new(true)), + ignore_timeout: Arc::new(AtomicBool::new(false)), + } + } + + pub(super) fn mark_available(&self) { + if !self.is_available() { + info!(self.logger, "Conection checkout"; "event" => "available"); + } + self.available.store(true, Ordering::Relaxed); + } + + pub(super) fn mark_unavailable(&self, waited: Duration) { + if self.is_available() { + if waited.as_nanos() > 0 { + error!(self.logger, "Connection checkout timed out"; + "event" => "unavailable", + "wait_ms" => waited.as_millis() + ) + } else { + error!(self.logger, "Connection checkout"; "event" => "unavailable"); + } + } + self.available.store(false, Ordering::Relaxed); + } + + pub(super) fn is_available(&self) -> bool { + AtomicBool::load(&self.available, Ordering::Relaxed) + } + + pub(super) fn timeout_is_ignored(&self) -> bool { + AtomicBool::load(&self.ignore_timeout, Ordering::Relaxed) + } + + /// Run the given async function while ignoring timeouts; if `f` causes + /// a timeout, the database is not marked as unavailable + pub(super) async fn ignore_timeout(&self, f: F) -> R + where + F: AsyncFnOnce() -> R, + { + self.ignore_timeout.store(true, Ordering::Relaxed); + let res = f().await; + self.ignore_timeout.store(false, Ordering::Relaxed); + res + } + + /// Return a deadpool hook that marks the database as available + pub(super) fn mark_available_hook(&self) -> Hook { + let state_tracker = self.clone(); + Hook::async_fn(move |_conn, _metrics| { + let state_tracker = state_tracker.clone(); + Box::pin(async move { + state_tracker.mark_available(); + Ok(()) + }) + }) + } +} + +fn brief_error_msg(error: &dyn std::error::Error) -> String { + // For 'Connection refused' errors, Postgres includes the IP and + // port number in the error message. We want to suppress that and + // only use the first line from the error message. For more detailed + // analysis, 'Connection refused' manifests as a + // `ConnectionError(BadConnection("could not connect to server: + // Connection refused.."))` + error + .to_string() + .split('\n') + .next() + .unwrap_or("no error details provided") + .to_string() +} + +pub(crate) fn spawn_size_stat_collector( + pool: AsyncPool, + registry: &MetricsRegistry, + const_labels: HashMap, +) { + let count_gauge = registry + .global_gauge( + "store_connection_checkout_count", + "The number of Postgres connections currently checked out", + const_labels.clone(), + ) + .expect("failed to create `store_connection_checkout_count` counter"); + let size_gauge = registry + .global_gauge( + "store_connection_pool_size_count", + "Overall size of the connection pool", + const_labels, + ) + .expect("failed to create `store_connection_pool_size_count` counter"); + tokio::task::spawn(async move { + loop { + let status = pool.status(); + count_gauge.set((status.size - status.available) as f64); + size_gauge.set(status.size as f64); + tokio::time::sleep(Duration::from_secs(15)).await; + } + }); +} + +/// Reap connections that are too old (older than 30 minutes) or if there +/// are more than `connection_min_idle` connections in the pool that have +/// been idle for longer than `idle_timeout` +pub(crate) fn spawn_connection_reaper(pool: AsyncPool, idle_timeout: Duration) { + const MAX_LIFETIME: Duration = Duration::from_secs(30 * 60); + let Some(min_idle) = ENV_VARS.store.connection_min_idle else { + // If this is None, we will never reap anything + return; + }; + // What happens here isn't exactly what we would like to have: we would + // like to have at any point `min_idle` unused connections in the pool, + // but there is no way to achieve that with deadpool. Instead, we try to + // keep `min_idle` connections around if they exist + tokio::task::spawn(async move { + loop { + let mut idle_count = 0; + pool.retain(|_, metrics| { + if metrics.age() > MAX_LIFETIME { + return false; + } + if metrics.last_used() > idle_timeout { + idle_count += 1; + return idle_count <= min_idle; + } + true + }); + tokio::time::sleep(Duration::from_secs(30)).await; + } + }); +} + +pub(crate) struct WaitMeter { + wait_gauge: Gauge, + pub(crate) wait_stats: PoolWaitStats, +} + +impl WaitMeter { + pub(crate) fn new(registry: &MetricsRegistry, const_labels: HashMap) -> Self { + let wait_gauge = registry + .global_gauge( + "store_connection_wait_time_ms", + "Average connection wait time", + const_labels, + ) + .expect("failed to create `store_connection_wait_time_ms` counter"); + let wait_stats = Arc::new(RwLock::new(MovingStats::default())); + + Self { + wait_gauge, + wait_stats, + } + } + + pub(crate) fn add_conn_wait_time(&self, duration: Duration) { + self.wait_stats + .write() + .unwrap() + .add_and_register(duration, &self.wait_gauge); + } +} diff --git a/store/postgres/src/pool/mod.rs b/store/postgres/src/pool/mod.rs index a94238fd62f..b7d3bda1db9 100644 --- a/store/postgres/src/pool/mod.rs +++ b/store/postgres/src/pool/mod.rs @@ -1,21 +1,18 @@ -use diesel::r2d2::Builder; -use diesel::{connection::SimpleConnection, pg::PgConnection}; -use diesel::{ - r2d2::{ConnectionManager, Pool, PooledConnection}, - Connection, -}; -use diesel::{sql_query, RunQueryDsl}; - +use deadpool::managed::{PoolError, Timeouts}; +use deadpool::Runtime; +use diesel::sql_query; +use diesel_async::async_connection_wrapper::AsyncConnectionWrapper; +use diesel_async::{AsyncConnection as _, RunQueryDsl, SimpleAsyncConnection}; use diesel_migrations::{EmbeddedMigrations, HarnessWithOutput}; + use graph::cheap_clone::CheapClone; use graph::components::store::QueryPermit; use graph::derive::CheapClone; use graph::internal_error; use graph::prelude::tokio::time::Instant; use graph::prelude::{ - anyhow::anyhow, crit, debug, error, info, o, tokio::sync::Semaphore, CancelGuard, CancelHandle, - CancelToken as _, CancelableError, Gauge, Logger, MovingStats, PoolWaitStats, StoreError, - ENV_VARS, + anyhow::anyhow, crit, debug, error, info, o, Gauge, Logger, MovingStats, PoolWaitStats, + StoreError, ENV_VARS, }; use graph::prelude::{tokio, MetricsRegistry}; use graph::slog::warn; @@ -27,16 +24,23 @@ use std::time::Duration; use std::{collections::HashMap, sync::RwLock}; use crate::catalog; +use crate::pool::manager::{ConnectionManager, WaitMeter}; use crate::primary::{self, Mirror, Namespace}; use crate::{Shard, PRIMARY_SHARD}; mod coordinator; mod foreign_server; -mod state_tracker; +mod manager; + +pub use diesel_async::scoped_futures::ScopedFutureExt; pub use coordinator::PoolCoordinator; pub use foreign_server::ForeignServer; -use state_tracker::{ErrorHandler, EventHandler, StateTracker}; +use manager::StateTracker; + +type AsyncPool = deadpool::managed::Pool; +/// A database connection for asynchronous diesel operations +pub type AsyncPgConnection = deadpool::managed::Object; /// The namespace under which the `PRIMARY_TABLES` are mapped into each /// shard @@ -178,6 +182,7 @@ impl PoolState { // We just tried to set up the pool; if it is still not set up and // we didn't have an error, it means the database is not available if self.needs_setup() { + error!(self.logger, "Database is not available, setup did not work"); return Err(StoreError::DatabaseUnavailable); } else { Ok(pool) @@ -204,6 +209,7 @@ impl PoolState { } } } + #[derive(Clone)] pub struct ConnectionPool { inner: PoolState, @@ -255,10 +261,9 @@ impl ConnectionPool { registry: Arc, coord: Arc, ) -> ConnectionPool { - let state_tracker = StateTracker::new(); let shard = Shard::new(shard_name.to_string()).expect("shard_name is a valid name for a shard"); - let inner = { + let (inner, state_tracker) = { let pool = PoolInner::create( shard.clone(), pool_name.as_str(), @@ -267,12 +272,12 @@ impl ConnectionPool { fdw_pool_size, logger, registry, - state_tracker.clone(), ); + let state_tracker = pool.state_tracker.clone(); if pool_name.is_replica() { - PoolState::ready(Arc::new(pool)) + (PoolState::ready(Arc::new(pool)), state_tracker) } else { - PoolState::created(Arc::new(pool), coord) + (PoolState::created(Arc::new(pool), coord), state_tracker) } }; ConnectionPool { @@ -310,61 +315,8 @@ impl ConnectionPool { } } - /// Execute a closure with a connection to the database. - /// - /// # API - /// The API of using a closure to bound the usage of the connection serves several - /// purposes: - /// - /// * Moves blocking database access out of the `Future::poll`. Within - /// `Future::poll` (which includes all `async` methods) it is illegal to - /// perform a blocking operation. This includes all accesses to the - /// database, acquiring of locks, etc. Calling a blocking operation can - /// cause problems with `Future` combinators (including but not limited - /// to select, timeout, and FuturesUnordered) and problems with - /// executors/runtimes. This method moves the database work onto another - /// thread in a way which does not block `Future::poll`. - /// - /// * Limit the total number of connections. Because the supplied closure - /// takes a reference, we know the scope of the usage of all entity - /// connections and can limit their use in a non-blocking way. - /// - /// # Cancellation - /// The normal pattern for futures in Rust is drop to cancel. Once we - /// spawn the database work in a thread though, this expectation no longer - /// holds because the spawned task is the independent of this future. So, - /// this method provides a cancel token which indicates that the `Future` - /// has been dropped. This isn't *quite* as good as drop on cancel, - /// because a drop on cancel can do things like cancel http requests that - /// are in flight, but checking for cancel periodically is a significant - /// improvement. - /// - /// The implementation of the supplied closure should check for cancel - /// between every operation that is potentially blocking. This includes - /// any method which may interact with the database. The check can be - /// conveniently written as `token.check_cancel()?;`. It is low overhead - /// to check for cancel, so when in doubt it is better to have too many - /// checks than too few. - /// - /// # Panics: - /// * This task will panic if the supplied closure panics - /// * This task will panic if the supplied closure returns Err(Cancelled) - /// when the supplied cancel token is not cancelled. - pub(crate) async fn with_conn( - &self, - f: impl 'static - + Send - + FnOnce( - &mut PooledConnection>, - &CancelHandle, - ) -> Result>, - ) -> Result { - let pool = self.get_ready()?; - pool.with_conn(f).await - } - - pub fn get(&self) -> Result>, StoreError> { - self.get_ready()?.get() + pub async fn get(&self) -> Result { + self.get_ready()?.get().await } /// Get a connection from the pool for foreign data wrapper access; @@ -374,29 +326,30 @@ impl ConnectionPool { /// The `timeout` is called every time we time out waiting for a /// connection. If `timeout` returns `true`, `get_fdw` returns with that /// error, otherwise we try again to get a connection. - pub fn get_fdw( + pub async fn get_fdw( &self, logger: &Logger, timeout: F, - ) -> Result>, StoreError> + ) -> Result where F: FnMut() -> bool, { - self.get_ready()?.get_fdw(logger, timeout) + self.get_ready()?.get_fdw(logger, timeout).await } /// Get a connection from the pool for foreign data wrapper access if /// one is available - pub fn try_get_fdw( + pub async fn try_get_fdw( &self, logger: &Logger, timeout: Duration, - ) -> Option>> { + ) -> Option { let Ok(inner) = self.get_ready() else { return None; }; self.state_tracker .ignore_timeout(|| inner.try_get_fdw(logger, timeout)) + .await } pub(crate) async fn query_permit(&self) -> QueryPermit { @@ -410,7 +363,7 @@ impl ConnectionPool { } pub(crate) fn wait_stats(&self) -> PoolWaitStats { - self.inner.get_unready().wait_stats.cheap_clone() + self.inner.get_unready().wait_meter.wait_stats.cheap_clone() } /// Mirror key tables from the primary into our own schema. We do this @@ -424,11 +377,10 @@ impl ConnectionPool { } } -#[derive(Clone)] pub struct PoolInner { logger: Logger, pub shard: Shard, - pool: Pool>, + pool: AsyncPool, // A separate pool for connections that will use foreign data wrappers. // Once such a connection accesses a foreign table, Postgres keeps a // connection to the foreign server until the connection is closed. @@ -439,10 +391,11 @@ pub struct PoolInner { // this will no longer be needed since it will then be possible to // explicitly close connections to foreign servers when a connection is // returned to the pool. - fdw_pool: Option>>, - limiter: Arc, + fdw_pool: Option, postgres_url: String, - pub(crate) wait_stats: PoolWaitStats, + /// Measures how long we spend getting connections from the main `pool` + wait_meter: WaitMeter, + state_tracker: StateTracker, // Limits the number of graphql queries that may execute concurrently. Since one graphql query // may require multiple DB queries, it is useful to organize the queue at the graphql level so @@ -462,7 +415,6 @@ impl PoolInner { fdw_pool_size: Option, logger: &Logger, registry: Arc, - state_tracker: StateTracker, ) -> PoolInner { check_mirrored_tables(); @@ -474,64 +426,62 @@ impl PoolInner { map.insert("shard".to_string(), shard.to_string()); map }; - let error_counter = registry - .global_counter( - "store_connection_error_count", - "The number of Postgres connections errors", - const_labels.clone(), - ) - .expect("failed to create `store_connection_error_count` counter"); - let error_handler = Box::new(ErrorHandler::new( + + let state_tracker = StateTracker::new(logger_pool.cheap_clone()); + + // Connect to Postgres + let conn_manager = ConnectionManager::new( logger_pool.clone(), - error_counter, + postgres_url.clone(), state_tracker.clone(), - )); - let wait_stats = Arc::new(RwLock::new(MovingStats::default())); - let event_handler = Box::new(EventHandler::new( - logger_pool.clone(), - registry.cheap_clone(), - wait_stats.clone(), + ®istry, const_labels.clone(), - state_tracker, - )); + ); + + let timeouts = Timeouts { + wait: Some(ENV_VARS.store.connection_timeout), + create: Some(ENV_VARS.store.connection_timeout), + recycle: Some(ENV_VARS.store.connection_timeout), + }; + + // The post_create and post_recycle hooks are only called when + // create and recycle succeed; we can therefore mark the pool + // available + let pool = AsyncPool::builder(conn_manager.clone()) + .max_size(pool_size as usize) + .timeouts(timeouts) + .runtime(Runtime::Tokio1) + .post_create(state_tracker.mark_available_hook()) + .post_recycle(state_tracker.mark_available_hook()) + .build() + .expect("failed to create connection pool"); + + manager::spawn_size_stat_collector(pool.clone(), ®istry, const_labels.clone()); + + manager::spawn_connection_reaper(pool.clone(), ENV_VARS.store.connection_idle_timeout); + + let wait_meter = WaitMeter::new(®istry, const_labels.clone()); - // Connect to Postgres - let conn_manager = ConnectionManager::new(postgres_url.clone()); - let min_idle = ENV_VARS.store.connection_min_idle.filter(|min_idle| { - if *min_idle <= pool_size { - true - } else { - warn!( - logger_pool, - "Configuration error: min idle {} exceeds pool size {}, ignoring min idle", - min_idle, - pool_size - ); - false - } - }); - let builder: Builder> = Pool::builder() - .error_handler(error_handler.clone()) - .event_handler(event_handler.clone()) - .connection_timeout(ENV_VARS.store.connection_timeout) - .max_size(pool_size) - .min_idle(min_idle) - .idle_timeout(Some(ENV_VARS.store.connection_idle_timeout)); - let pool = builder.build_unchecked(conn_manager); let fdw_pool = fdw_pool_size.map(|pool_size| { - let conn_manager = ConnectionManager::new(postgres_url.clone()); - let builder: Builder> = Pool::builder() - .error_handler(error_handler) - .event_handler(event_handler) - .connection_timeout(ENV_VARS.store.connection_timeout) - .max_size(pool_size) - .min_idle(Some(1)) - .idle_timeout(Some(FDW_IDLE_TIMEOUT)); - builder.build_unchecked(conn_manager) + let fdw_timeouts = Timeouts { + wait: Some(ENV_VARS.store.connection_timeout), + create: None, + recycle: Some(FDW_IDLE_TIMEOUT), + }; + + let fdw_pool = AsyncPool::builder(conn_manager) + .max_size(pool_size as usize) + .timeouts(fdw_timeouts) + .runtime(Runtime::Tokio1) + .post_create(state_tracker.mark_available_hook()) + .post_recycle(state_tracker.mark_available_hook()) + .build() + .expect("failed to create fdw connection pool"); + + manager::spawn_connection_reaper(fdw_pool.clone(), FDW_IDLE_TIMEOUT); + fdw_pool }); - let max_concurrent_queries = pool_size as usize + ENV_VARS.store.extra_query_permits; - let limiter = Arc::new(Semaphore::new(max_concurrent_queries)); info!(logger_store, "Pool successfully connected to Postgres"); let semaphore_wait_gauge = registry @@ -541,6 +491,7 @@ impl PoolInner { const_labels, ) .expect("failed to create `query_effort_ms` counter"); + let max_concurrent_queries = pool_size as usize + ENV_VARS.store.extra_query_permits; let query_semaphore = Arc::new(tokio::sync::Semaphore::new(max_concurrent_queries)); PoolInner { logger: logger_pool, @@ -548,111 +499,63 @@ impl PoolInner { postgres_url, pool, fdw_pool, - limiter, - wait_stats, + wait_meter, + state_tracker, semaphore_wait_stats: Arc::new(RwLock::new(MovingStats::default())), query_semaphore, semaphore_wait_gauge, } } - /// Execute a closure with a connection to the database. - /// - /// # API - /// The API of using a closure to bound the usage of the connection serves several - /// purposes: + /// Helper so that getting a connection from the main pool and the + /// fdw_pool collect the same metrics. /// - /// * Moves blocking database access out of the `Future::poll`. Within - /// `Future::poll` (which includes all `async` methods) it is illegal to - /// perform a blocking operation. This includes all accesses to the - /// database, acquiring of locks, etc. Calling a blocking operation can - /// cause problems with `Future` combinators (including but not limited - /// to select, timeout, and FuturesUnordered) and problems with - /// executors/runtimes. This method moves the database work onto another - /// thread in a way which does not block `Future::poll`. + /// If `timeouts` is `None`, the default pool timeouts are used. /// - /// * Limit the total number of connections. Because the supplied closure - /// takes a reference, we know the scope of the usage of all entity - /// connections and can limit their use in a non-blocking way. - /// - /// # Cancellation - /// The normal pattern for futures in Rust is drop to cancel. Once we - /// spawn the database work in a thread though, this expectation no longer - /// holds because the spawned task is the independent of this future. So, - /// this method provides a cancel token which indicates that the `Future` - /// has been dropped. This isn't *quite* as good as drop on cancel, - /// because a drop on cancel can do things like cancel http requests that - /// are in flight, but checking for cancel periodically is a significant - /// improvement. - /// - /// The implementation of the supplied closure should check for cancel - /// between every operation that is potentially blocking. This includes - /// any method which may interact with the database. The check can be - /// conveniently written as `token.check_cancel()?;`. It is low overhead - /// to check for cancel, so when in doubt it is better to have too many - /// checks than too few. - /// - /// # Panics: - /// * This task will panic if the supplied closure panics - /// * This task will panic if the supplied closure returns Err(Cancelled) - /// when the supplied cancel token is not cancelled. - pub(crate) async fn with_conn( + /// On error, returns `StoreError::DatabaseUnavailable` and marks the + /// pool as unavailable if we can tell that the error is due to the pool + /// being closed. Returns `StoreError::StatementTimeout` if the error is + /// due to a timeout. + async fn get_from_pool( &self, - f: impl 'static - + Send - + FnOnce( - &mut PooledConnection>, - &CancelHandle, - ) -> Result>, - ) -> Result { - let _permit = self.limiter.acquire().await; - let pool = self.clone(); - - let cancel_guard = CancelGuard::new(); - let cancel_handle = cancel_guard.handle(); - - let result = graph::spawn_blocking_allow_panic(move || { - // It is possible time has passed between scheduling on the - // threadpool and being executed. Time to check for cancel. - cancel_handle.check_cancel()?; - - // A failure to establish a connection is propagated as though the - // closure failed. - let mut conn = pool - .get() - .map_err(|_| CancelableError::Error(StoreError::DatabaseUnavailable))?; - - // It is possible time has passed while establishing a connection. - // Time to check for cancel. - cancel_handle.check_cancel()?; - - f(&mut conn, &cancel_handle) - }) - .await - .unwrap(); // Propagate panics, though there shouldn't be any. - - drop(cancel_guard); - - // Finding cancel isn't technically unreachable, since there is nothing - // stopping the supplied closure from returning Canceled even if the - // supplied handle wasn't canceled. That would be very unexpected, the - // doc comment for this function says we will panic in this scenario. - match result { - Ok(t) => Ok(t), - Err(CancelableError::Error(e)) => Err(e), - Err(CancelableError::Cancel) => panic!("The closure supplied to with_entity_conn must not return Err(Canceled) unless the supplied token was canceled."), + pool: &AsyncPool, + timeouts: Option, + ) -> Result { + let start = Instant::now(); + let res = match timeouts { + Some(timeouts) => pool.timeout_get(&timeouts).await, + None => pool.get().await, + }; + let elapsed = start.elapsed(); + self.wait_meter.add_conn_wait_time(elapsed); + match res { + Ok(conn) => { + self.state_tracker.mark_available(); + return Ok(conn); + } + Err(PoolError::Closed) | Err(PoolError::Backend(_)) => { + self.state_tracker.mark_unavailable(Duration::from_nanos(0)); + return Err(StoreError::DatabaseUnavailable); + } + Err(PoolError::Timeout(_)) => { + if !self.state_tracker.timeout_is_ignored() { + self.state_tracker.mark_unavailable(elapsed); + } + return Err(StoreError::StatementTimeout); + } + Err(PoolError::NoRuntimeSpecified) | Err(PoolError::PostCreateHook(_)) => { + let e = res.err().unwrap(); + unreachable!("impossible error {e}"); + } } } - pub fn get(&self) -> Result>, StoreError> { - self.pool.get().map_err(|_| StoreError::DatabaseUnavailable) + async fn get(&self) -> Result { + self.get_from_pool(&self.pool, None).await } /// Get the pool for fdw connections. It is an error if none is configured - fn fdw_pool( - &self, - logger: &Logger, - ) -> Result<&Pool>, StoreError> { + fn fdw_pool(&self, logger: &Logger) -> Result<&AsyncPool, StoreError> { let pool = match &self.fdw_pool { Some(pool) => pool, None => { @@ -672,21 +575,21 @@ impl PoolInner { /// The `timeout` is called every time we time out waiting for a /// connection. If `timeout` returns `true`, `get_fdw` returns with that /// error, otherwise we try again to get a connection. - pub fn get_fdw( + async fn get_fdw( &self, logger: &Logger, mut timeout: F, - ) -> Result>, StoreError> + ) -> Result where F: FnMut() -> bool, { let pool = self.fdw_pool(logger)?; loop { - match pool.get() { + match self.get_from_pool(&pool, None).await { Ok(conn) => return Ok(conn), Err(e) => { if timeout() { - return Err(e.into()); + return Err(anyhow!("timeout in get_fdw: {e}").into()); } } } @@ -696,11 +599,7 @@ impl PoolInner { /// Get a connection from the fdw pool if one is available. We wait for /// `timeout` for a connection which should be set just big enough to /// allow establishing a connection - pub fn try_get_fdw( - &self, - logger: &Logger, - timeout: Duration, - ) -> Option>> { + async fn try_get_fdw(&self, logger: &Logger, timeout: Duration) -> Option { // Any error trying to get a connection is treated as "couldn't get // a connection in time". If there is a serious error with the // database, e.g., because it's not available, the next database @@ -708,7 +607,12 @@ impl PoolInner { let Ok(fdw_pool) = self.fdw_pool(logger) else { return None; }; - let Ok(conn) = fdw_pool.get_timeout(timeout) else { + let timeouts = Timeouts { + wait: Some(timeout), + create: None, + recycle: None, + }; + let Ok(conn) = self.get_from_pool(fdw_pool, Some(timeouts)).await else { return None; }; Some(conn) @@ -719,22 +623,19 @@ impl PoolInner { } /// Check that we can connect to the database - pub fn check(&self) -> bool { - self.pool - .get() - .ok() - .map(|mut conn| sql_query("select 1").execute(&mut conn).is_ok()) - .unwrap_or(false) + pub async fn check(&self) -> bool { + let Ok(mut conn) = self.get().await else { + return false; + }; + + sql_query("select 1").execute(&mut conn).await.is_ok() } - fn locale_check( - &self, - logger: &Logger, - mut conn: PooledConnection>, - ) -> Result<(), StoreError> { + async fn locale_check(&self, logger: &Logger) -> Result<(), StoreError> { + let mut conn = self.get().await?; Ok( - if let Err(msg) = catalog::Locale::load(&mut conn)?.suitable() { - if &self.shard == &*PRIMARY_SHARD && primary::is_empty(&mut conn)? { + if let Err(msg) = catalog::Locale::load(&mut conn).await?.suitable() { + if &self.shard == &*PRIMARY_SHARD && primary::is_empty(&mut conn).await? { const MSG: &str = "Database does not use C locale. \ Please check the graph-node documentation for how to set up the database locale"; @@ -758,21 +659,26 @@ impl PoolInner { permit.unwrap() } - fn configure_fdw(&self, servers: &[ForeignServer]) -> Result<(), StoreError> { + async fn configure_fdw(&self, servers: &[ForeignServer]) -> Result<(), StoreError> { info!(&self.logger, "Setting up fdw"); - let mut conn = self.get()?; - conn.batch_execute("create extension if not exists postgres_fdw")?; + let mut conn = self.get().await?; + conn.batch_execute("create extension if not exists postgres_fdw") + .await?; conn.transaction(|conn| { - let current_servers: Vec = crate::catalog::current_servers(conn)?; - for server in servers.iter().filter(|server| server.shard != self.shard) { - if current_servers.contains(&server.name) { - server.update(conn)?; - } else { - server.create(conn)?; + async { + let current_servers: Vec = crate::catalog::current_servers(conn).await?; + for server in servers.iter().filter(|server| server.shard != self.shard) { + if current_servers.contains(&server.name) { + server.update(conn).await?; + } else { + server.create(conn).await?; + } } + Ok(()) } - Ok(()) + .scope_boxed() }) + .await } /// Do the part of database setup that only affects this pool. Those @@ -785,36 +691,48 @@ impl PoolInner { self: Arc, servers: &[ForeignServer], ) -> Result { - self.configure_fdw(servers)?; - let mut conn = self.get()?; - let (this, count) = conn.transaction(|conn| -> Result<_, StoreError> { - let count = migrate_schema(&self.logger, conn)?; - Ok((self, count)) - })?; + self.locale_check(&self.logger).await?; - this.locale_check(&this.logger, conn)?; + self.configure_fdw(servers).await?; - Ok(count) + // We use AsyncConnectionWrapper here since diesel_async doesn't + // offer a truly async way to run migrations, and we need to be very + // careful that block_on only gets called on a blocking thread to + // avoid errors from the tokio runtime + let logger = self.logger.cheap_clone(); + let mut conn = self.get().await.map(AsyncConnectionWrapper::from)?; + + tokio::task::spawn_blocking(move || { + diesel::Connection::transaction::<_, StoreError, _>(&mut conn, |conn| { + migrate_schema(&logger, conn) + }) + }) + .await + .expect("migration task panicked") } /// If this is the primary shard, drop the namespace `CROSS_SHARD_NSP` - fn drop_cross_shard_views(&self) -> Result<(), StoreError> { + async fn drop_cross_shard_views(&self) -> Result<(), StoreError> { if self.shard != *PRIMARY_SHARD { return Ok(()); } info!(&self.logger, "Dropping cross-shard views"); - let mut conn = self.get()?; + let mut conn = self.get().await?; conn.transaction(|conn| { - let query = format!("drop schema if exists {} cascade", CROSS_SHARD_NSP); - conn.batch_execute(&query)?; - Ok(()) + async { + let query = format!("drop schema if exists {} cascade", CROSS_SHARD_NSP); + conn.batch_execute(&query).await?; + Ok(()) + } + .scope_boxed() }) + .await } /// If this is the primary shard, create the namespace `CROSS_SHARD_NSP` /// and populate it with tables that union various imported tables - fn create_cross_shard_views(&self, servers: &[ForeignServer]) -> Result<(), StoreError> { + async fn create_cross_shard_views(&self, servers: &[ForeignServer]) -> Result<(), StoreError> { fn shard_nsp_pairs<'a>( current: &Shard, local_nsp: &str, @@ -837,9 +755,9 @@ impl PoolInner { return Ok(()); } - let mut conn = self.get()?; + let mut conn = self.get().await?; let sharded = Namespace::special(CROSS_SHARD_NSP); - if catalog::has_namespace(&mut conn, &sharded)? { + if catalog::has_namespace(&mut conn, &sharded).await? { // We dropped the namespace before, but another node must have // recreated it in the meantime so we don't need to do anything return Ok(()); @@ -847,24 +765,29 @@ impl PoolInner { info!(&self.logger, "Creating cross-shard views"); conn.transaction(|conn| { - let query = format!("create schema {}", CROSS_SHARD_NSP); - conn.batch_execute(&query)?; - for (src_nsp, src_tables) in SHARDED_TABLES { - // Pairs of (shard, nsp) for all servers - let nsps = shard_nsp_pairs(&self.shard, src_nsp, servers); - for src_table in src_tables { - let create_view = catalog::create_cross_shard_view( - conn, - src_nsp, - src_table, - CROSS_SHARD_NSP, - &nsps, - )?; - conn.batch_execute(&create_view)?; + async { + let query = format!("create schema {}", CROSS_SHARD_NSP); + conn.batch_execute(&query).await?; + for (src_nsp, src_tables) in SHARDED_TABLES { + // Pairs of (shard, nsp) for all servers + let nsps = shard_nsp_pairs(&self.shard, src_nsp, servers); + for src_table in src_tables { + let create_view = catalog::create_cross_shard_view( + conn, + src_nsp, + src_table, + CROSS_SHARD_NSP, + &nsps, + ) + .await?; + conn.batch_execute(&create_view).await?; + } } + Ok(()) } - Ok(()) + .scope_boxed() }) + .await } /// Copy the data from key tables in the primary into our local schema @@ -873,22 +796,20 @@ impl PoolInner { if self.shard == *PRIMARY_SHARD { return Ok(()); } - self.with_conn(|conn, handle| { - conn.transaction(|conn| { - primary::Mirror::refresh_tables(conn, handle).map_err(CancelableError::from) - }) - }) - .await + let mut conn = self.get().await?; + conn.transaction(|conn| primary::Mirror::refresh_tables(conn).scope_boxed()) + .await } /// The foreign server `server` had schema changes, and we therefore /// need to remap anything that we are importing via fdw to make sure we /// are using this updated schema - pub fn remap(&self, server: &ForeignServer) -> Result<(), StoreError> { + pub async fn remap(&self, server: &ForeignServer) -> Result<(), StoreError> { if &server.shard == &*PRIMARY_SHARD { info!(&self.logger, "Mapping primary"); - let mut conn = self.get()?; - conn.transaction(|conn| ForeignServer::map_primary(conn, &self.shard))?; + let mut conn = self.get().await?; + conn.transaction(|conn| ForeignServer::map_primary(conn, &self.shard).scope_boxed()) + .await?; } if &server.shard != &self.shard { info!( @@ -896,19 +817,20 @@ impl PoolInner { "Mapping metadata from {}", server.shard.as_str() ); - let mut conn = self.get()?; - conn.transaction(|conn| server.map_metadata(conn))?; + let mut conn = self.get().await?; + conn.transaction(|conn| server.map_metadata(conn).scope_boxed()) + .await?; } Ok(()) } - pub fn needs_remap(&self, server: &ForeignServer) -> Result { + pub async fn needs_remap(&self, server: &ForeignServer) -> Result { if &server.shard == &self.shard { return Ok(false); } - let mut conn = self.get()?; - server.needs_remap(&mut conn) + let mut conn = self.get().await?; + server.needs_remap(&mut conn).await } } @@ -926,17 +848,16 @@ impl MigrationCount { } /// Run all schema migrations. -/// -/// When multiple `graph-node` processes start up at the same time, we ensure -/// that they do not run migrations in parallel by using `blocking_conn` to -/// serialize them. The `conn` is used to run the actual migration. -fn migrate_schema(logger: &Logger, conn: &mut PgConnection) -> Result { +fn migrate_schema( + logger: &Logger, + conn: &mut AsyncConnectionWrapper, +) -> Result { use diesel_migrations::MigrationHarness; // Collect migration logging output let mut output = vec![]; - let old_count = catalog::migration_count(conn)?; + let old_count = graph::block_on(catalog::migration_count(conn))?; let mut harness = HarnessWithOutput::new(conn, &mut output); info!(logger, "Running migrations"); @@ -961,7 +882,7 @@ fn migrate_schema(logger: &Logger, conn: &mut PgConnection) -> Result msg); } - let migrations = catalog::migration_count(conn)?; + let migrations = graph::block_on(catalog::migration_count(conn))?; Ok(MigrationCount { new: migrations, diff --git a/store/postgres/src/pool/state_tracker.rs b/store/postgres/src/pool/state_tracker.rs deleted file mode 100644 index 231a66a9292..00000000000 --- a/store/postgres/src/pool/state_tracker.rs +++ /dev/null @@ -1,224 +0,0 @@ -//! Event/error handlers for our r2d2 pools - -use diesel::r2d2::{self, event as e, HandleEvent}; - -use graph::prelude::error; -use graph::prelude::Counter; -use graph::prelude::Gauge; -use graph::prelude::MetricsRegistry; -use graph::prelude::PoolWaitStats; -use graph::slog::Logger; - -use std::collections::HashMap; -use std::fmt; -use std::sync::atomic::AtomicBool; -use std::sync::atomic::Ordering; -use std::sync::Arc; -use std::time::Duration; - -/// Track whether a database is available or not using the event and error -/// handlers from this module. The pool must be set up with these handlers -/// when it is created -#[derive(Clone)] -pub(super) struct StateTracker { - available: Arc, - ignore_timeout: Arc, -} - -impl StateTracker { - pub(super) fn new() -> Self { - Self { - available: Arc::new(AtomicBool::new(true)), - ignore_timeout: Arc::new(AtomicBool::new(false)), - } - } - - pub(super) fn mark_available(&self) { - self.available.store(true, Ordering::Relaxed); - } - - fn mark_unavailable(&self) { - self.available.store(false, Ordering::Relaxed); - } - - pub(super) fn is_available(&self) -> bool { - self.available.load(Ordering::Relaxed) - } - - fn timeout_is_ignored(&self) -> bool { - self.ignore_timeout.load(Ordering::Relaxed) - } - - pub(super) fn ignore_timeout(&self, f: F) -> R - where - F: FnOnce() -> R, - { - self.ignore_timeout.store(true, Ordering::Relaxed); - let res = f(); - self.ignore_timeout.store(false, Ordering::Relaxed); - res - } -} - -#[derive(Clone)] -pub(super) struct ErrorHandler { - logger: Logger, - counter: Counter, - state_tracker: StateTracker, -} - -impl ErrorHandler { - pub(super) fn new(logger: Logger, counter: Counter, state_tracker: StateTracker) -> Self { - Self { - logger, - counter, - state_tracker, - } - } -} -impl std::fmt::Debug for ErrorHandler { - fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { - fmt::Result::Ok(()) - } -} - -impl r2d2::HandleError for ErrorHandler { - fn handle_error(&self, error: r2d2::Error) { - let msg = brief_error_msg(&error); - - // Don't count canceling statements for timeouts etc. as a - // connection error. Unfortunately, we only have the textual error - // and need to infer whether the error indicates that the database - // is down or if something else happened. When querying a replica, - // these messages indicate that a query was canceled because it - // conflicted with replication, but does not indicate that there is - // a problem with the database itself. - // - // This check will break if users run Postgres (or even graph-node) - // in a locale other than English. In that case, their database will - // be marked as unavailable even though it is perfectly fine. - if msg.contains("canceling statement") - || msg.contains("terminating connection due to conflict with recovery") - { - return; - } - - self.counter.inc(); - if self.state_tracker.is_available() { - error!(self.logger, "Postgres connection error"; "error" => msg); - } - self.state_tracker.mark_unavailable(); - } -} - -#[derive(Clone)] -pub(super) struct EventHandler { - logger: Logger, - count_gauge: Gauge, - wait_gauge: Gauge, - size_gauge: Gauge, - wait_stats: PoolWaitStats, - state_tracker: StateTracker, -} - -impl EventHandler { - pub(super) fn new( - logger: Logger, - registry: Arc, - wait_stats: PoolWaitStats, - const_labels: HashMap, - state_tracker: StateTracker, - ) -> Self { - let count_gauge = registry - .global_gauge( - "store_connection_checkout_count", - "The number of Postgres connections currently checked out", - const_labels.clone(), - ) - .expect("failed to create `store_connection_checkout_count` counter"); - let wait_gauge = registry - .global_gauge( - "store_connection_wait_time_ms", - "Average connection wait time", - const_labels.clone(), - ) - .expect("failed to create `store_connection_wait_time_ms` counter"); - let size_gauge = registry - .global_gauge( - "store_connection_pool_size_count", - "Overall size of the connection pool", - const_labels, - ) - .expect("failed to create `store_connection_pool_size_count` counter"); - EventHandler { - logger, - count_gauge, - wait_gauge, - wait_stats, - size_gauge, - state_tracker, - } - } - - fn add_conn_wait_time(&self, duration: Duration) { - self.wait_stats - .write() - .unwrap() - .add_and_register(duration, &self.wait_gauge); - } -} - -impl std::fmt::Debug for EventHandler { - fn fmt(&self, _f: &mut fmt::Formatter) -> fmt::Result { - fmt::Result::Ok(()) - } -} - -impl HandleEvent for EventHandler { - fn handle_acquire(&self, _: e::AcquireEvent) { - self.size_gauge.inc(); - self.state_tracker.mark_available(); - } - - fn handle_release(&self, _: e::ReleaseEvent) { - self.size_gauge.dec(); - } - - fn handle_checkout(&self, event: e::CheckoutEvent) { - self.count_gauge.inc(); - self.add_conn_wait_time(event.duration()); - self.state_tracker.mark_available(); - } - - fn handle_timeout(&self, event: e::TimeoutEvent) { - if self.state_tracker.timeout_is_ignored() { - return; - } - self.add_conn_wait_time(event.timeout()); - if self.state_tracker.is_available() { - error!(self.logger, "Connection checkout timed out"; - "wait_ms" => event.timeout().as_millis() - ) - } - self.state_tracker.mark_unavailable(); - } - - fn handle_checkin(&self, _: e::CheckinEvent) { - self.count_gauge.dec(); - } -} - -fn brief_error_msg(error: &dyn std::error::Error) -> String { - // For 'Connection refused' errors, Postgres includes the IP and - // port number in the error message. We want to suppress that and - // only use the first line from the error message. For more detailed - // analysis, 'Connection refused' manifests as a - // `ConnectionError(BadConnection("could not connect to server: - // Connection refused.."))` - error - .to_string() - .split('\n') - .next() - .unwrap_or("no error details provided") - .to_string() -} diff --git a/store/postgres/src/primary.rs b/store/postgres/src/primary.rs index a92652b54aa..59e9fb4ec3d 100644 --- a/store/postgres/src/primary.rs +++ b/store/postgres/src/primary.rs @@ -6,10 +6,14 @@ use crate::{ detail::DeploymentDetail, pool::PRIMARY_PUBLIC, subgraph_store::{unused, Shard, PRIMARY_SHARD}, - ConnectionPool, ForeignServer, NotificationSender, + AsyncPgConnection, ConnectionPool, ForeignServer, NotificationSender, +}; +use diesel::dsl::{delete, insert_into, sql, update}; +use diesel::prelude::{ + BoolExpressionMethods, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, + OptionalExtension, QueryDsl, }; use diesel::{ - connection::SimpleConnection, data_types::PgTimestamp, deserialize::FromSql, dsl::{exists, not, select}, @@ -17,26 +21,18 @@ use diesel::{ serialize::{Output, ToSql}, sql_types::{Array, BigInt, Bool, Integer, Text}, }; -use diesel::{ - dsl::{delete, insert_into, sql, update}, - r2d2::PooledConnection, -}; -use diesel::{pg::PgConnection, r2d2::ConnectionManager}; -use diesel::{ - prelude::{ - BoolExpressionMethods, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, - OptionalExtension, QueryDsl, RunQueryDsl, - }, - Connection as _, +use diesel_async::{ + scoped_futures::{ScopedBoxFuture, ScopedFutureExt}, + RunQueryDsl, SimpleAsyncConnection as _, TransactionManager, }; use graph::{ - cheap_clone::CheapClone, components::store::DeploymentLocator, data::{ store::scalar::ToPrimitive, subgraph::{status, DeploymentFeatures}, }, derive::CheapClone, + futures03::{future::BoxFuture, FutureExt}, internal_error, prelude::{ anyhow, @@ -47,11 +43,10 @@ use graph::{ }; use graph::{ components::store::{DeploymentId as GraphDeploymentId, DeploymentSchemaVersion}, - prelude::{chrono, CancelHandle, CancelToken}, + prelude::chrono, }; use graph::{data::subgraph::schema::generate_entity_id, prelude::StoreEvent}; use itertools::Itertools; -use maybe_owned::MaybeOwnedMut; use std::{ borrow::Borrow, collections::HashMap, @@ -443,12 +438,12 @@ pub fn make_dummy_site(deployment: DeploymentHash, namespace: Namespace, network mod queries { use diesel::data_types::PgTimestamp; use diesel::dsl::{exists, sql}; - use diesel::pg::PgConnection; use diesel::prelude::{ BoolExpressionMethods, ExpressionMethods, JoinOnDsl, NullableExpressionMethods, - OptionalExtension, QueryDsl, RunQueryDsl, + OptionalExtension, QueryDsl, }; use diesel::sql_types::Text; + use diesel_async::RunQueryDsl; use graph::prelude::NodeId; use graph::{ components::store::DeploymentId as GraphDeploymentId, @@ -458,7 +453,7 @@ mod queries { }; use std::{collections::HashMap, convert::TryFrom, convert::TryInto}; - use crate::Shard; + use crate::{AsyncPgConnection, Shard}; use super::{DeploymentId, Schema, Site}; @@ -470,38 +465,40 @@ mod queries { use super::subgraph_deployment_assignment as a; use super::subgraph_version as v; - pub(super) fn find_active_site( - conn: &mut PgConnection, + pub(super) async fn find_active_site( + conn: &mut AsyncPgConnection, subgraph: &DeploymentHash, ) -> Result, StoreError> { let schema = ds::table .filter(ds::subgraph.eq(subgraph.to_string())) .filter(ds::active.eq(true)) .first::(conn) + .await .optional()?; schema.map(|schema| schema.try_into()).transpose() } - pub(super) fn find_site_by_ref( - conn: &mut PgConnection, + pub(super) async fn find_site_by_ref( + conn: &mut AsyncPgConnection, id: DeploymentId, ) -> Result, StoreError> { - let schema = ds::table.find(id).first::(conn).optional()?; + let schema = ds::table.find(id).first::(conn).await.optional()?; schema.map(|schema| schema.try_into()).transpose() } - pub(super) fn subgraph_exists( - conn: &mut PgConnection, + pub(super) async fn subgraph_exists( + conn: &mut AsyncPgConnection, name: &SubgraphName, ) -> Result { Ok( diesel::select(exists(s::table.filter(s::name.eq(name.as_str())))) - .get_result::(conn)?, + .get_result::(conn) + .await?, ) } - pub(super) fn current_deployment_for_subgraph( - conn: &mut PgConnection, + pub(super) async fn current_deployment_for_subgraph( + conn: &mut AsyncPgConnection, name: &SubgraphName, ) -> Result { let id = v::table @@ -509,6 +506,7 @@ mod queries { .filter(s::name.eq(name.as_str())) .select(v::deployment) .first::(conn) + .await .optional()?; match id { Some(id) => DeploymentHash::new(id) @@ -517,8 +515,8 @@ mod queries { } } - pub(super) fn deployments_for_subgraph( - conn: &mut PgConnection, + pub(super) async fn deployments_for_subgraph( + conn: &mut AsyncPgConnection, name: &str, ) -> Result, StoreError> { ds::table @@ -528,14 +526,15 @@ mod queries { .filter(ds::active) .order_by(v::created_at.asc()) .select(ds::all_columns) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(Site::try_from) .collect::, _>>() } - pub(super) fn subgraph_version( - conn: &mut PgConnection, + pub(super) async fn subgraph_version( + conn: &mut AsyncPgConnection, name: &str, use_current: bool, ) -> Result, StoreError> { @@ -547,6 +546,7 @@ mod queries { .filter(s::name.eq(&name)) .filter(ds::active) .first::(conn) + .await } else { ds::table .select(ds::all_columns) @@ -555,32 +555,35 @@ mod queries { .filter(s::name.eq(&name)) .filter(ds::active) .first::(conn) + .await }; deployment.optional()?.map(Site::try_from).transpose() } /// Find sites by their subgraph deployment hashes. If `ids` is empty, /// return all sites - pub(super) fn find_sites( - conn: &mut PgConnection, + pub(super) async fn find_sites( + conn: &mut AsyncPgConnection, ids: &[String], only_active: bool, ) -> Result, StoreError> { let schemas = if ids.is_empty() { if only_active { - ds::table.filter(ds::active).load::(conn)? + ds::table.filter(ds::active).load::(conn).await? } else { - ds::table.load::(conn)? + ds::table.load::(conn).await? } } else if only_active { ds::table .filter(ds::active) .filter(ds::subgraph.eq_any(ids)) - .load::(conn)? + .load::(conn) + .await? } else { ds::table .filter(ds::subgraph.eq_any(ids)) - .load::(conn)? + .load::(conn) + .await? }; schemas .into_iter() @@ -590,19 +593,22 @@ mod queries { /// Find sites by their subgraph deployment ids. If `ids` is empty, /// return no sites - pub(super) fn find_sites_by_id( - conn: &mut PgConnection, + pub(super) async fn find_sites_by_id( + conn: &mut AsyncPgConnection, ids: &[DeploymentId], ) -> Result, StoreError> { - let schemas = ds::table.filter(ds::id.eq_any(ids)).load::(conn)?; + let schemas = ds::table + .filter(ds::id.eq_any(ids)) + .load::(conn) + .await?; schemas .into_iter() .map(|schema| schema.try_into()) .collect() } - pub(super) fn find_site_in_shard( - conn: &mut PgConnection, + pub(super) async fn find_site_in_shard( + conn: &mut AsyncPgConnection, subgraph: &DeploymentHash, shard: &Shard, ) -> Result, StoreError> { @@ -610,27 +616,29 @@ mod queries { .filter(ds::subgraph.eq(subgraph.as_str())) .filter(ds::shard.eq(shard.as_str())) .first::(conn) + .await .optional()?; schema.map(|schema| schema.try_into()).transpose() } - pub(super) fn assignments( - conn: &mut PgConnection, + pub(super) async fn assignments( + conn: &mut AsyncPgConnection, node: &NodeId, ) -> Result, StoreError> { ds::table .inner_join(a::table.on(a::id.eq(ds::id))) .filter(a::node_id.eq(node.as_str())) .select(ds::all_columns) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(Site::try_from) .collect::, _>>() } // All assignments for a node that are currently not paused - pub(super) fn active_assignments( - conn: &mut PgConnection, + pub(super) async fn active_assignments( + conn: &mut AsyncPgConnection, node: &NodeId, ) -> Result, StoreError> { ds::table @@ -638,40 +646,39 @@ mod queries { .filter(a::node_id.eq(node.as_str())) .filter(a::paused_at.is_null()) .select(ds::all_columns) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(Site::try_from) .collect::, _>>() } - pub(super) fn fill_assignments( - conn: &mut PgConnection, - infos: &mut [status::Info], - ) -> Result<(), StoreError> { + pub(super) async fn fill_assignments( + conn: &mut AsyncPgConnection, + infos: &[status::Info], + ) -> Result, StoreError> { let ids: Vec<_> = infos.iter().map(|info| &info.id).collect(); let nodes: HashMap<_, _> = a::table .inner_join(ds::table.on(ds::id.eq(a::id))) .filter(ds::id.eq_any(ids)) .select((ds::id, a::node_id, a::paused_at.is_not_null())) - .load::<(GraphDeploymentId, String, bool)>(conn)? + .load::<(GraphDeploymentId, String, bool)>(conn) + .await? .into_iter() .map(|(id, node, paused)| (id, (node, paused))) .collect(); - for info in infos { - info.node = nodes.get(&info.id).map(|(node, _)| node.clone()); - info.paused = nodes.get(&info.id).map(|(_, paused)| *paused); - } - Ok(()) + Ok(nodes) } - pub(super) fn assigned_node( - conn: &mut PgConnection, + pub(super) async fn assigned_node( + conn: &mut AsyncPgConnection, site: &Site, ) -> Result, StoreError> { a::table .filter(a::id.eq(site.id)) .select(a::node_id) .first::(conn) + .await .optional()? .map(|node| { NodeId::new(&node).map_err(|()| { @@ -689,14 +696,15 @@ mod queries { /// the subgraph is assigned to, and `is_paused` is true if the /// subgraph is paused. /// Returns None if the deployment does not exist. - pub(super) fn assignment_status( - conn: &mut PgConnection, + pub(super) async fn assignment_status( + conn: &mut AsyncPgConnection, site: &Site, ) -> Result, StoreError> { a::table .filter(a::id.eq(site.id)) .select((a::node_id, a::paused_at)) .first::<(String, Option)>(conn) + .await .optional()? .map(|(node, ts)| { let node_id = NodeId::new(&node).map_err(|()| { @@ -715,32 +723,34 @@ mod queries { .transpose() } - pub(super) fn version_info( - conn: &mut PgConnection, + pub(super) async fn version_info( + conn: &mut AsyncPgConnection, version: &str, ) -> Result, StoreError> { Ok(v::table .select((v::deployment, sql::("created_at::text"))) .filter(v::id.eq(version)) .first::<(String, String)>(conn) + .await .optional()?) } - pub(super) fn versions_for_subgraph_id( - conn: &mut PgConnection, + pub(super) async fn versions_for_subgraph_id( + conn: &mut AsyncPgConnection, subgraph_id: &str, ) -> Result<(Option, Option), StoreError> { Ok(s::table .select((s::current_version.nullable(), s::pending_version.nullable())) .filter(s::id.eq(subgraph_id)) .first::<(Option, Option)>(conn) + .await .optional()? .unwrap_or((None, None))) } /// Returns all (subgraph_name, version) pairs for a given deployment hash. - pub fn subgraphs_by_deployment_hash( - conn: &mut PgConnection, + pub async fn subgraphs_by_deployment_hash( + conn: &mut AsyncPgConnection, deployment_hash: &str, ) -> Result, StoreError> { v::table @@ -761,52 +771,84 @@ mod queries { ), )) .get_results(conn) + .await .map_err(Into::into) } } /// A wrapper for a database connection that provides access to functionality /// that works only on the primary database -pub struct Connection<'a> { - conn: MaybeOwnedMut<'a, PooledConnection>>, +pub struct Connection { + conn: AsyncPgConnection, } -impl<'a> Connection<'a> { - pub fn new( - conn: impl Into>>>, - ) -> Self { - Self { conn: conn.into() } - } - - pub(crate) fn transaction(&mut self, f: F) -> Result +impl Connection { + pub fn new(conn: AsyncPgConnection) -> Self { + Self { conn } + } + + /// Run an async `callback` inside a database transaction on the + /// connection that `self` contains. If `callback` returns `Ok(T)`, the + /// transaction is committed and `Ok(T)` is returned. If `callback` + /// returns `Err(E)`, the transaction is rolled back and `Err(E)` is + /// returned. If committing or rolling back the transaction fails, + /// return an error + pub(crate) fn transaction<'a, 'conn, R, F>( + &'conn mut self, + callback: F, + ) -> BoxFuture<'conn, Result> where - F: FnOnce(&mut PooledConnection>) -> Result, - E: From, + F: for<'r> FnOnce(&'r mut Self) -> ScopedBoxFuture<'a, 'r, Result> + + Send + + 'a, + R: Send + 'a, + 'a: 'conn, { - self.conn.as_mut().transaction(f) + type TM = ::TransactionManager; + + async move { + TM::begin_transaction(&mut self.conn).await?; + match callback(self).await { + Ok(value) => { + TM::commit_transaction(&mut self.conn).await?; + Ok(value) + } + Err(user_error) => match TM::rollback_transaction(&mut self.conn).await { + Ok(()) => Err(user_error), + Err(diesel::result::Error::BrokenTransactionManager) => { + // In this case we are probably more interested by the + // original error, which likely caused this + Err(user_error) + } + Err(rollback_error) => Err(rollback_error.into()), + }, + } + } + .boxed() } /// Signal any copy process that might be copying into one of these /// deployments that it should stop. Copying is cancelled whenever we /// remove the assignment for a deployment - fn cancel_copies(&mut self, ids: Vec) -> Result<(), StoreError> { + async fn cancel_copies(&mut self, ids: Vec) -> Result<(), StoreError> { use active_copies as ac; update(ac::table.filter(ac::dst.eq_any(ids))) .set(ac::cancelled_at.eq(sql("now()"))) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; Ok(()) } /// Delete all assignments for deployments that are neither the current nor the /// pending version of a subgraph and return the deployment id's - fn remove_unused_assignments(&mut self) -> Result, StoreError> { + async fn remove_unused_assignments(&mut self) -> Result, StoreError> { use deployment_schemas as ds; use subgraph as s; use subgraph_deployment_assignment as a; use subgraph_version as v; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let named = v::table .inner_join( s::table.on(v::id @@ -820,18 +862,20 @@ impl<'a> Connection<'a> { let removed = delete(a::table.filter(not(exists(named)))) .returning(a::id) - .load::(conn)?; + .load::(conn) + .await?; let removed: Vec<_> = ds::table .filter(ds::id.eq_any(removed)) .select((ds::id, ds::subgraph)) - .load::<(DeploymentId, String)>(conn)? + .load::<(DeploymentId, String)>(conn) + .await? .into_iter() .collect(); // Stop ongoing copies let removed_ids: Vec<_> = removed.iter().map(|(id, _)| *id).collect(); - self.cancel_copies(removed_ids)?; + self.cancel_copies(removed_ids).await?; let events = removed .into_iter() @@ -853,14 +897,14 @@ impl<'a> Connection<'a> { /// the pending version so far, and remove any assignments that are not needed /// any longer as a result. Return the changes that were made to assignments /// in the process - pub fn promote_deployment( + pub async fn promote_deployment( &mut self, id: &DeploymentHash, ) -> Result, StoreError> { use subgraph as s; use subgraph_version as v; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; // Subgraphs where we need to promote the version let pending_subgraph_versions: Vec<(String, String)> = s::table @@ -868,7 +912,8 @@ impl<'a> Connection<'a> { .filter(v::deployment.eq(id.as_str())) .select((s::id, v::id)) .for_update() - .load(conn)?; + .load(conn) + .await?; // Switch the pending version to the current version for (subgraph, version) in &pending_subgraph_versions { @@ -877,7 +922,8 @@ impl<'a> Connection<'a> { s::current_version.eq(version), s::pending_version.eq::>(None), )) - .execute(conn)?; + .execute(conn) + .await?; } // Clean up assignments if we could possibly have changed any @@ -885,7 +931,7 @@ impl<'a> Connection<'a> { let changes = if pending_subgraph_versions.is_empty() { vec![] } else { - self.remove_unused_assignments()? + self.remove_unused_assignments().await? }; Ok(changes) } @@ -893,10 +939,10 @@ impl<'a> Connection<'a> { /// Create a new subgraph with the given name. If one already exists, use /// the existing one. Return the `id` of the newly created or existing /// subgraph - pub fn create_subgraph(&mut self, name: &SubgraphName) -> Result { + pub async fn create_subgraph(&mut self, name: &SubgraphName) -> Result { use subgraph as s; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let id = generate_entity_id(); let created_at = SystemTime::now() .duration_since(UNIX_EPOCH) @@ -912,19 +958,21 @@ impl<'a> Connection<'a> { )) .on_conflict(s::name) .do_nothing() - .execute(conn)?; + .execute(conn) + .await?; if inserted == 0 { let existing_id = s::table .filter(s::name.eq(name.as_str())) .select(s::id) - .first::(conn)?; + .first::(conn) + .await?; Ok(existing_id) } else { Ok(id) } } - pub fn create_subgraph_version( + pub async fn create_subgraph_version( &mut self, name: SubgraphName, site: &Site, @@ -933,7 +981,7 @@ impl<'a> Connection<'a> { exists_and_synced: F, ) -> Result, StoreError> where - F: Fn(&DeploymentHash) -> Result, + F: AsyncFn(&DeploymentHash) -> Result, { use subgraph as s; use subgraph_deployment_assignment as a; @@ -951,38 +999,38 @@ impl<'a> Connection<'a> { .left_outer_join(v::table.on(s::current_version.eq(v::id.nullable()))) .filter(s::name.eq(name.as_str())) .select((s::id, v::deployment.nullable())) - .first::<(String, Option)>(self.conn.as_mut()) + .first::<(String, Option)>(&mut self.conn) + .await .optional()?; let (subgraph_id, current_deployment) = match info { - Some((subgraph_id, current_deployment)) => (subgraph_id, current_deployment), - None => (self.create_subgraph(&name)?, None), + Some((subgraph_id, current_deployment)) => { + let current_deployment = current_deployment + .map(|d| DeploymentHash::new(d).map_err(StoreError::DeploymentNotFound)) + .transpose()?; + (subgraph_id, current_deployment) + } + None => (self.create_subgraph(&name).await?, None), }; let pending_deployment = s::table .left_outer_join(v::table.on(s::pending_version.eq(v::id.nullable()))) .filter(s::id.eq(&subgraph_id)) .select(v::deployment.nullable()) - .first::>(self.conn.as_mut())?; + .first::>(&mut self.conn) + .await?; // See if the current version of that subgraph is synced. If the subgraph // has no current version, we treat it the same as if it were not synced // The `optional` below only comes into play if data is corrupted/missing; // ignoring that via `optional` makes it possible to fix a missing version // or deployment by deploying over it. - let current_exists_and_synced = current_deployment - .as_deref() - .map(|id| { - DeploymentHash::new(id) - .map_err(StoreError::DeploymentNotFound) - .and_then(|id| exists_and_synced(&id)) - }) - .transpose()? - .unwrap_or(false); + let current_exists_and_synced = match current_deployment { + None => false, + Some(ref id) => exists_and_synced(id).await?, + }; // Check if we even need to make any changes let change_needed = match (mode, current_exists_and_synced) { - (Instant, _) | (Synced, false) => { - current_deployment.as_deref() != Some(site.deployment.as_str()) - } + (Instant, _) | (Synced, false) => current_deployment.as_ref() != Some(&site.deployment), (Synced, true) => pending_deployment.as_deref() != Some(site.deployment.as_str()), }; if !change_needed { @@ -1000,26 +1048,29 @@ impl<'a> Connection<'a> { v::created_at.eq(sql(&format!("{}", created_at))), v::block_range.eq(UNVERSIONED_RANGE), )) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; // Create a subgraph assignment if there isn't one already let new_assignment = a::table .filter(a::id.eq(site.id)) .select(a::id) - .first::(self.conn.as_mut()) + .first::(&mut self.conn) + .await .optional()? .is_none(); if new_assignment { insert_into(a::table) .values((a::id.eq(site.id), a::node_id.eq(node_id.as_str()))) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; } // See if we should make this the current or pending version let subgraph_row = update(s::table.filter(s::id.eq(&subgraph_id))); // When the new deployment is also synced already, we always want to // overwrite the current version - let new_exists_and_synced = exists_and_synced(&site.deployment)?; + let new_exists_and_synced = exists_and_synced(&site.deployment).await?; match (mode, current_exists_and_synced, new_exists_and_synced) { (Instant, _, _) | (Synced, false, _) | (Synced, true, true) => { subgraph_row @@ -1027,17 +1078,19 @@ impl<'a> Connection<'a> { s::current_version.eq(&version_id), s::pending_version.eq::>(None), )) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; } (Synced, true, false) => { subgraph_row .set(s::pending_version.eq(&version_id)) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; } } // Clean up any assignments we might have displaced - let mut changes = self.remove_unused_assignments()?; + let mut changes = self.remove_unused_assignments().await?; if new_assignment { let change = AssignmentChange::set(site.into()); changes.push(change); @@ -1045,14 +1098,14 @@ impl<'a> Connection<'a> { Ok(changes) } - pub fn remove_subgraph( + pub async fn remove_subgraph( &mut self, name: SubgraphName, ) -> Result, StoreError> { use subgraph as s; use subgraph_version as v; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; // Get the id of the given subgraph. If no subgraph with the // name exists, there is nothing to do @@ -1060,24 +1113,33 @@ impl<'a> Connection<'a> { .filter(s::name.eq(name.as_str())) .select(s::id) .first(conn) + .await .optional()?; if let Some(subgraph) = subgraph { - delete(v::table.filter(v::subgraph.eq(&subgraph))).execute(conn)?; - delete(s::table.filter(s::id.eq(subgraph))).execute(conn)?; - self.remove_unused_assignments() + delete(v::table.filter(v::subgraph.eq(&subgraph))) + .execute(conn) + .await?; + delete(s::table.filter(s::id.eq(subgraph))) + .execute(conn) + .await?; + self.remove_unused_assignments().await } else { Ok(vec![]) } } - pub fn pause_subgraph(&mut self, site: &Site) -> Result, StoreError> { + pub async fn pause_subgraph( + &mut self, + site: &Site, + ) -> Result, StoreError> { use subgraph_deployment_assignment as a; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let updates = update(a::table.filter(a::id.eq(site.id))) .set(a::paused_at.eq(sql("now()"))) - .execute(conn)?; + .execute(conn) + .await?; match updates { 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), 1 => { @@ -1092,14 +1154,18 @@ impl<'a> Connection<'a> { } } - pub fn resume_subgraph(&mut self, site: &Site) -> Result, StoreError> { + pub async fn resume_subgraph( + &mut self, + site: &Site, + ) -> Result, StoreError> { use subgraph_deployment_assignment as a; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let updates = update(a::table.filter(a::id.eq(site.id))) .set(a::paused_at.eq(sql("null"))) - .execute(conn)?; + .execute(conn) + .await?; match updates { 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), 1 => { @@ -1114,17 +1180,18 @@ impl<'a> Connection<'a> { } } - pub fn reassign_subgraph( + pub async fn reassign_subgraph( &mut self, site: &Site, node: &NodeId, ) -> Result, StoreError> { use subgraph_deployment_assignment as a; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let updates = update(a::table.filter(a::id.eq(site.id))) .set(a::node_id.eq(node.as_str())) - .execute(conn)?; + .execute(conn) + .await?; match updates { 0 => Err(StoreError::DeploymentNotFound(site.deployment.to_string())), 1 => { @@ -1139,13 +1206,13 @@ impl<'a> Connection<'a> { } } - pub fn get_subgraph_features( + pub async fn get_subgraph_features( &mut self, id: String, ) -> Result, StoreError> { use subgraph_features as f; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let features = f::table .filter(f::id.eq(id)) .select(( @@ -1174,6 +1241,7 @@ impl<'a> Connection<'a> { bool, Vec, )>(conn) + .await .optional()?; let features = features.map( @@ -1209,7 +1277,7 @@ impl<'a> Connection<'a> { Ok(features) } - pub fn create_subgraph_features( + pub async fn create_subgraph_features( &mut self, features: DeploymentFeatures, ) -> Result<(), StoreError> { @@ -1229,7 +1297,7 @@ impl<'a> Connection<'a> { has_aggregations, } = features; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let changes = ( f::id.eq(id), f::spec_version.eq(spec_version), @@ -1247,33 +1315,40 @@ impl<'a> Connection<'a> { insert_into(f::table) .values(changes.clone()) .on_conflict_do_nothing() - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - pub fn assign_subgraph( + pub async fn assign_subgraph( &mut self, site: &Site, node: &NodeId, ) -> Result, StoreError> { use subgraph_deployment_assignment as a; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; insert_into(a::table) .values((a::id.eq(site.id), a::node_id.eq(node.as_str()))) - .execute(conn)?; + .execute(conn) + .await?; let change = AssignmentChange::set(site.into()); Ok(vec![change]) } - pub fn unassign_subgraph(&mut self, site: &Site) -> Result, StoreError> { + pub async fn unassign_subgraph( + &mut self, + site: &Site, + ) -> Result, StoreError> { use subgraph_deployment_assignment as a; - let conn = self.conn.as_mut(); - let delete_count = delete(a::table.filter(a::id.eq(site.id))).execute(conn)?; + let conn = &mut self.conn; + let delete_count = delete(a::table.filter(a::id.eq(site.id))) + .execute(conn) + .await?; - self.cancel_copies(vec![site.id])?; + self.cancel_copies(vec![site.id]).await?; match delete_count { 0 => Ok(vec![]), @@ -1293,7 +1368,7 @@ impl<'a> Connection<'a> { /// function only performs the basic operations for creation, and the /// caller must check that other conditions (like whether there already /// is an active site for the deployment) are met - fn create_site( + async fn create_site( &mut self, shard: Shard, deployment: DeploymentHash, @@ -1303,7 +1378,7 @@ impl<'a> Connection<'a> { ) -> Result { use deployment_schemas as ds; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; let schemas: Vec<(DeploymentId, String)> = diesel::insert_into(ds::table) .values(( @@ -1314,8 +1389,10 @@ impl<'a> Connection<'a> { ds::active.eq(active), )) .returning((ds::id, ds::name)) - .get_results(conn)?; + .get_results(conn) + .await?; let (id, namespace) = schemas + .as_slice() .first() .cloned() .ok_or_else(|| anyhow!("failed to read schema name for {} back", deployment))?; @@ -1339,22 +1416,22 @@ impl<'a> Connection<'a> { /// If it already exists, return the existing site /// and a boolean indicating whether a new site was created. /// `false` means the site already existed. - pub fn allocate_site( + pub async fn allocate_site( &mut self, shard: Shard, subgraph: &DeploymentHash, network: String, graft_base: Option<&DeploymentHash>, ) -> Result<(Site, bool), StoreError> { - let conn = self.conn.as_mut(); - if let Some(site) = queries::find_active_site(conn, subgraph)? { + let conn = &mut self.conn; + if let Some(site) = queries::find_active_site(conn, subgraph).await? { return Ok((site, false)); } let site_was_created = true; let schema_version = match graft_base { Some(graft_base) => { - let site = queries::find_active_site(conn, graft_base)?; + let site = queries::find_active_site(conn, graft_base).await?; site.map(|site| site.schema_version).ok_or_else(|| { StoreError::DeploymentNotFound("graft_base not found".to_string()) }) @@ -1363,27 +1440,31 @@ impl<'a> Connection<'a> { }?; self.create_site(shard, subgraph.clone(), network, schema_version, true) + .await .map(|site| (site, site_was_created)) } - pub fn assigned_node(&mut self, site: &Site) -> Result, StoreError> { - queries::assigned_node(self.conn.as_mut(), site) + pub async fn assigned_node(&mut self, site: &Site) -> Result, StoreError> { + queries::assigned_node(&mut self.conn, site).await } /// Returns Option<(node_id,is_paused)> where `node_id` is the node that /// the subgraph is assigned to, and `is_paused` is true if the /// subgraph is paused. /// Returns None if the deployment does not exist. - pub fn assignment_status(&mut self, site: &Site) -> Result, StoreError> { - queries::assignment_status(self.conn.as_mut(), site) + pub async fn assignment_status( + &mut self, + site: &Site, + ) -> Result, StoreError> { + queries::assignment_status(&mut self.conn, site).await } /// Create a copy of the site `src` in the shard `shard`, but mark it as /// not active. If there already is a site in `shard`, return that /// instead. - pub fn copy_site(&mut self, src: &Site, shard: Shard) -> Result { + pub async fn copy_site(&mut self, src: &Site, shard: Shard) -> Result { if let Some(site) = - queries::find_site_in_shard(self.conn.as_mut(), &src.deployment, &shard)? + queries::find_site_in_shard(&mut self.conn, &src.deployment, &shard).await? { return Ok(site); } @@ -1395,88 +1476,113 @@ impl<'a> Connection<'a> { src.schema_version, false, ) + .await } - pub(crate) fn activate(&mut self, deployment: &DeploymentLocator) -> Result<(), StoreError> { + pub(crate) async fn activate( + &mut self, + deployment: &DeploymentLocator, + ) -> Result<(), StoreError> { use deployment_schemas as ds; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; // We need to tread lightly so we do not violate the unique constraint on // `subgraph where active` update(ds::table.filter(ds::subgraph.eq(deployment.hash.as_str()))) .set(ds::active.eq(false)) - .execute(conn)?; + .execute(conn) + .await?; update(ds::table.filter(ds::id.eq(DeploymentId::from(deployment.id)))) .set(ds::active.eq(true)) .execute(conn) + .await .map_err(|e| e.into()) .map(|_| ()) } /// Remove all subgraph versions, the entry in `deployment_schemas` and the entry in /// `subgraph_features` for subgraph `id` in a transaction - pub fn drop_site(&mut self, site: &Site) -> Result<(), StoreError> { + pub async fn drop_site(&mut self, site: &Site) -> Result<(), StoreError> { use deployment_schemas as ds; use subgraph_features as f; use subgraph_version as v; use unused_deployments as u; - self.transaction(|conn| { - delete(ds::table.filter(ds::id.eq(site.id))).execute(conn)?; + self.transaction(|pconn| { + async { + let conn = &mut pconn.conn; - // If there is no site for this deployment any more, we can get - // rid of versions pointing to it - let exists = select(exists( - ds::table.filter(ds::subgraph.eq(site.deployment.as_str())), - )) - .get_result::(conn)?; - if !exists { - delete(v::table.filter(v::deployment.eq(site.deployment.as_str()))) - .execute(conn)?; + delete(ds::table.filter(ds::id.eq(site.id))) + .execute(conn) + .await?; - // Remove the entry in `subgraph_features` - delete(f::table.filter(f::id.eq(site.deployment.as_str()))).execute(conn)?; - } + // If there is no site for this deployment any more, we can get + // rid of versions pointing to it + let exists = select(exists( + ds::table.filter(ds::subgraph.eq(site.deployment.as_str())), + )) + .get_result::(conn) + .await?; + if !exists { + delete(v::table.filter(v::deployment.eq(site.deployment.as_str()))) + .execute(conn) + .await?; + + // Remove the entry in `subgraph_features` + delete(f::table.filter(f::id.eq(site.deployment.as_str()))) + .execute(conn) + .await?; + } - update(u::table.filter(u::id.eq(site.id))) - .set(u::removed_at.eq(sql("now()"))) - .execute(conn)?; - Ok(()) + update(u::table.filter(u::id.eq(site.id))) + .set(u::removed_at.eq(sql("now()"))) + .execute(conn) + .await?; + Ok(()) + } + .scope_boxed() }) + .await } - pub fn locate_site(&mut self, locator: DeploymentLocator) -> Result, StoreError> { + pub async fn locate_site( + &mut self, + locator: DeploymentLocator, + ) -> Result, StoreError> { let schema = deployment_schemas::table .filter(deployment_schemas::id.eq::(locator.into())) - .first::(self.conn.as_mut()) + .first::(&mut self.conn) + .await .optional()?; schema.map(|schema| schema.try_into()).transpose() } - pub fn find_sites_for_network(&mut self, network: &str) -> Result, StoreError> { + pub async fn find_sites_for_network(&mut self, network: &str) -> Result, StoreError> { use deployment_schemas as ds; ds::table .filter(ds::network.eq(network)) - .load::(self.conn.as_mut())? + .load::(&mut self.conn) + .await? .into_iter() .map(|schema| schema.try_into()) .collect() } - pub fn sites(&mut self) -> Result, StoreError> { + pub async fn sites(&mut self) -> Result, StoreError> { use deployment_schemas as ds; ds::table .filter(ds::name.ne("subgraphs")) - .load::(self.conn.as_mut())? + .load::(&mut self.conn) + .await? .into_iter() .map(|schema| schema.try_into()) .collect() } - pub fn send_store_event( + pub async fn send_store_event( &mut self, sender: &NotificationSender, event: &StoreEvent, @@ -1492,22 +1598,30 @@ impl<'a> Connection<'a> { EVENT_TAP.lock().unwrap().push(event.clone()); } } - sender.notify(&mut self.conn, "store_events", None, &v) + sender + .notify(&mut self.conn, "store_events", None, &v) + .await } /// Return the name of the node that has the fewest assignments out of the /// given `nodes`. If `nodes` is empty, return `None` - pub fn least_assigned_node(&mut self, nodes: &[NodeId]) -> Result, StoreError> { + pub async fn least_assigned_node( + &mut self, + nodes: &[NodeId], + ) -> Result, StoreError> { use subgraph_deployment_assignment as a; let nodes: Vec<_> = nodes.iter().map(|n| n.as_str()).collect(); + let conn = &mut self.conn; + let assigned = a::table .filter(a::node_id.eq_any(&nodes)) .select((a::node_id, sql::("count(*)"))) .group_by(a::node_id) .order_by(sql::("count(*)")) - .load::<(String, i64)>(self.conn.as_mut())?; + .load::<(String, i64)>(conn) + .await?; // Any nodes without assignments will be missing from `assigned` let missing = nodes @@ -1535,17 +1649,23 @@ impl<'a> Connection<'a> { /// that are stored in it. Unassigned deployments are ignored; in /// particular, that ignores deployments that are going to be removed /// soon. - pub fn least_used_shard(&mut self, shards: &[Shard]) -> Result, StoreError> { + pub async fn least_used_shard( + &mut self, + shards: &[Shard], + ) -> Result, StoreError> { use deployment_schemas as ds; use subgraph_deployment_assignment as a; + let conn = &mut self.conn; + let used = ds::table .inner_join(a::table.on(a::id.eq(ds::id))) .filter(ds::shard.eq_any(shards)) .select((ds::shard, sql::("count(*)"))) .group_by(ds::shard) .order_by(sql::("count(*)")) - .load::<(String, i64)>(self.conn.as_mut())?; + .load::<(String, i64)>(conn) + .await?; // Any shards that have no deployments in them will not be in // 'used'; add them in with a count of 0 @@ -1565,7 +1685,7 @@ impl<'a> Connection<'a> { } #[cfg(debug_assertions)] - pub fn versions_for_subgraph( + pub async fn versions_for_subgraph( &mut self, name: &str, ) -> Result<(Option, Option), StoreError> { @@ -1574,26 +1694,31 @@ impl<'a> Connection<'a> { Ok(s::table .select((s::current_version.nullable(), s::pending_version.nullable())) .filter(s::name.eq(&name)) - .first::<(Option, Option)>(self.conn.as_mut()) + .first::<(Option, Option)>(&mut self.conn) + .await .optional()? .unwrap_or((None, None))) } #[cfg(debug_assertions)] - pub fn deployment_for_version(&mut self, name: &str) -> Result, StoreError> { + pub async fn deployment_for_version( + &mut self, + name: &str, + ) -> Result, StoreError> { use subgraph_version as v; Ok(v::table .select(v::deployment) .filter(v::id.eq(name)) - .first::(self.conn.as_mut()) + .first::(&mut self.conn) + .await .optional()?) } /// Find all deployments that are not in use and add them to the /// `unused_deployments` table. Only values that are available in the /// primary will be filled in `unused_deployments` - pub fn detect_unused_deployments(&mut self) -> Result, StoreError> { + pub async fn detect_unused_deployments(&mut self) -> Result, StoreError> { use active_copies as cp; use deployment_schemas as ds; use subgraph as s; @@ -1601,7 +1726,7 @@ impl<'a> Connection<'a> { use subgraph_version as v; use unused_deployments as u; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; // Deployment is assigned let assigned = a::table.filter(a::id.eq(ds::id)); // Deployment is current or pending version @@ -1655,21 +1780,23 @@ impl<'a> Connection<'a> { .on_conflict(u::id) .do_nothing() .returning(u::id) - .get_results::(conn)?; + .get_results::(conn) + .await?; // We need to load again since we do not record the network in // unused_deployments ds::table .filter(ds::id.eq_any(ids)) .select(ds::all_columns) - .load::(conn)? + .load::(conn) + .await? .into_iter() .map(Site::try_from) .collect() } /// Add details from the deployment shard to unused deployments - pub fn update_unused_deployments( + pub async fn update_unused_deployments( &mut self, details: &[DeploymentDetail], ) -> Result<(), StoreError> { @@ -1697,7 +1824,8 @@ impl<'a> Connection<'a> { u::synced_at.eq(detail.synced_at), u::synced_at_block_number.eq(detail.synced_at_block_number.clone()), )) - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; } Ok(()) } @@ -1705,28 +1833,30 @@ impl<'a> Connection<'a> { /// The deployment `site` that we marked as unused previously is in fact /// now used again, e.g., because it was redeployed in between recording /// it as unused and now. Remove it from the `unused_deployments` table - pub fn unused_deployment_is_used(&mut self, site: &Site) -> Result<(), StoreError> { + pub async fn unused_deployment_is_used(&mut self, site: &Site) -> Result<(), StoreError> { use unused_deployments as u; delete(u::table.filter(u::id.eq(site.id))) - .execute(self.conn.as_mut()) + .execute(&mut self.conn) + .await .map(|_| ()) .map_err(StoreError::from) } - pub fn list_unused_deployments( + pub async fn list_unused_deployments( &mut self, filter: unused::Filter, ) -> Result, StoreError> { use unused::Filter::*; use unused_deployments as u; - let conn = self.conn.as_mut(); + let conn = &mut self.conn; match filter { - All => Ok(u::table.order_by(u::unused_at.desc()).load(conn)?), + All => Ok(u::table.order_by(u::unused_at.desc()).load(conn).await?), New => Ok(u::table .filter(u::removed_at.is_null()) .order_by(u::entity_count) - .load(conn)?), + .load(conn) + .await?), UnusedLongerThan(duration) => { let ts = chrono::offset::Local::now() .checked_sub_signed(duration) @@ -1737,7 +1867,8 @@ impl<'a> Connection<'a> { .filter(u::removed_at.is_null()) .filter(u::unused_at.lt(ts)) .order_by(u::entity_count) - .load(conn)?) + .load(conn) + .await?) } Name(name) => Ok(u::table @@ -1748,21 +1879,27 @@ impl<'a> Connection<'a> { .sql("] <@ subgraphs"), ) .order_by(u::entity_count) - .load(conn)?), + .load(conn) + .await?), Hash(hash) => Ok(u::table .filter(u::deployment.eq(hash)) .order_by(u::entity_count) - .load(conn)?), + .load(conn) + .await?), Deployment(id) => Ok(u::table .filter(u::namespace.eq(id)) .order_by(u::entity_count) - .load(conn)?), + .load(conn) + .await?), } } - pub fn subgraphs_using_deployment(&mut self, site: &Site) -> Result, StoreError> { + pub async fn subgraphs_using_deployment( + &mut self, + site: &Site, + ) -> Result, StoreError> { use subgraph as s; use subgraph_version as v; @@ -1776,33 +1913,36 @@ impl<'a> Connection<'a> { .filter(v::deployment.eq(site.deployment.as_str())) .select(s::name) .distinct() - .load(self.conn.as_mut())?) + .load(&mut self.conn) + .await?) } - pub fn find_ens_name(&mut self, hash: &str) -> Result, StoreError> { + pub async fn find_ens_name(&mut self, hash: &str) -> Result, StoreError> { use ens_names as dsl; dsl::table .select(dsl::name) .find(hash) - .get_result::(self.conn.as_mut()) + .get_result::(&mut self.conn) + .await .optional() .map_err(|e| anyhow!("error looking up ens_name for hash {}: {}", hash, e).into()) } - pub fn is_ens_table_empty(&mut self) -> Result { + pub async fn is_ens_table_empty(&mut self) -> Result { use ens_names as dsl; dsl::table .select(dsl::name) .limit(1) - .get_result::(self.conn.as_mut()) + .get_result::(&mut self.conn) + .await .optional() .map(|r| r.is_none()) .map_err(|e| anyhow!("error if ens table is empty: {}", e).into()) } - pub fn record_active_copy(&mut self, src: &Site, dst: &Site) -> Result<(), StoreError> { + pub async fn record_active_copy(&mut self, src: &Site, dst: &Site) -> Result<(), StoreError> { use active_copies as cp; insert_into(cp::table) @@ -1812,15 +1952,18 @@ impl<'a> Connection<'a> { cp::queued_at.eq(sql("now()")), )) .on_conflict_do_nothing() - .execute(self.conn.as_mut())?; + .execute(&mut self.conn) + .await?; Ok(()) } - pub fn copy_finished(&mut self, dst: &Site) -> Result<(), StoreError> { + pub async fn copy_finished(&mut self, dst: &Site) -> Result<(), StoreError> { use active_copies as cp; - delete(cp::table.filter(cp::dst.eq(dst.id))).execute(self.conn.as_mut())?; + delete(cp::table.filter(cp::dst.eq(dst.id))) + .execute(&mut self.conn) + .await?; Ok(()) } @@ -1845,10 +1988,10 @@ impl Primary { /// Return `true` if the site is the source of a copy operation. The copy /// operation might be just queued or in progress already. This method will /// block until a fdw connection becomes available. - pub fn is_source(&self, site: &Site) -> Result { + pub async fn is_source(&self, site: &Site) -> Result { use active_copies as ac; - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; select(diesel::dsl::exists( ac::table @@ -1856,30 +1999,32 @@ impl Primary { .filter(ac::cancelled_at.is_null()), )) .get_result::(&mut conn) + .await .map_err(StoreError::from) } - pub fn is_copy_cancelled(&self, dst: &Site) -> Result { + pub async fn is_copy_cancelled(&self, dst: &Site) -> Result { use active_copies as ac; - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; ac::table .filter(ac::dst.eq(dst.id)) .select(ac::cancelled_at.is_not_null()) .get_result::(&mut conn) + .await .map_err(StoreError::from) } } /// Return `true` if we deem this installation to be empty, defined as /// having no deployments and no subgraph names in the database -pub fn is_empty(conn: &mut PgConnection) -> Result { +pub async fn is_empty(conn: &mut AsyncPgConnection) -> Result { use deployment_schemas as ds; use subgraph as s; - let empty = ds::table.count().get_result::(conn)? == 0 - && s::table.count().get_result::(conn)? == 0; + let empty = ds::table.count().get_result::(conn).await? == 0 + && s::table.count().get_result::(conn).await? == 0; Ok(empty) } @@ -1932,65 +2077,52 @@ impl Mirror { } } - /// Execute the function `f` with connections from each of our pools in + /// Execute the `callback` with connections from each of our pools in /// order until for one of them we get any result other than /// `Err(StoreError::DatabaseUnavailable)`. In other words, we try to - /// execute `f` against our pools in order until we can be sure that we - /// talked to a database that is up. The function `f` must only access - /// tables that are mirrored through `refresh_tables` - pub(crate) fn read<'a, T>( - &self, - mut f: impl 'a - + FnMut(&mut PooledConnection>) -> Result, - ) -> Result { - for pool in self.pools.as_ref() { - let mut conn = match pool.get() { - Ok(conn) => conn, - Err(StoreError::DatabaseUnavailable) => continue, - Err(e) => return Err(e), - }; - match f(&mut conn) { - Ok(v) => return Ok(v), - Err(StoreError::DatabaseUnavailable) => continue, - Err(e) => return Err(e), - } - } - Err(StoreError::DatabaseUnavailable) - } - - /// An async version of `read` that spawns a blocking task to do the - /// actual work. This is useful when you want to call `read` from an - /// async context - pub(crate) async fn read_async(&self, mut f: F) -> Result + /// execute `callback` against our pools in order until we can be sure + /// that we talked to a database that is up. The function `callback` + /// must only access tables that are mirrored through `refresh_tables` + /// + /// The function `callback` must not do any blocking work itself + pub(crate) fn read_async<'a, 's, R, F>( + &'s self, + callback: F, + ) -> BoxFuture<'s, Result> where - T: 'static + Send, - F: 'static + F: for<'r> Fn(&'r mut AsyncPgConnection) -> ScopedBoxFuture<'a, 'r, Result> + Send - + FnMut(&mut PooledConnection>) -> Result, + + 'a, + R: Send + 'a, + 'a: 's, { - let this = self.cheap_clone(); - let res = graph::spawn_blocking(async move { this.read(|conn| f(conn)) }).await; - match res { - Ok(v) => v, - Err(e) => Err(internal_error!( - "spawn_blocking in read_async failed: {}", - e - )), + async move { + for pool in self.pools.as_ref() { + let mut conn = match pool.get().await { + Ok(conn) => conn, + Err(StoreError::DatabaseUnavailable) => continue, + Err(e) => return Err(e), + }; + match callback(&mut conn).await { + Ok(v) => return Ok(v), + Err(StoreError::DatabaseUnavailable) => continue, + Err(e) => return Err(e), + } + } + Err(StoreError::DatabaseUnavailable) } + .boxed() } /// Refresh the contents of mirrored tables from the primary (through /// the fdw mapping that `ForeignServer` establishes) - pub(crate) fn refresh_tables( - conn: &mut PgConnection, - handle: &CancelHandle, - ) -> Result<(), StoreError> { - fn run_query(conn: &mut PgConnection, query: String) -> Result<(), StoreError> { - conn.batch_execute(&query).map_err(StoreError::from) + pub(crate) async fn refresh_tables(conn: &mut AsyncPgConnection) -> Result<(), StoreError> { + async fn run_query(conn: &mut AsyncPgConnection, query: String) -> Result<(), StoreError> { + conn.batch_execute(&query).await.map_err(StoreError::from) } - fn copy_table( - conn: &mut PgConnection, + async fn copy_table( + conn: &mut AsyncPgConnection, src_nsp: &str, dst_nsp: &str, table_name: &str, @@ -2004,16 +2136,9 @@ impl Mirror { table_name = table_name ), ) + .await } - let check_cancel = || { - if handle.is_canceled() { - Err(StoreError::Canceled) - } else { - Ok(()) - } - }; - // Truncate all tables at once, otherwise truncation can fail // because of foreign key constraints let tables = Self::PUBLIC_TABLES @@ -2027,13 +2152,11 @@ impl Mirror { .map(|(nsp, name)| format!("{}.{}", nsp, name)) .join(", "); let query = format!("truncate table {};", tables); - conn.batch_execute(&query)?; - check_cancel()?; + conn.batch_execute(&query).await?; // Repopulate `PUBLIC_TABLES` by copying their data wholesale for table_name in Self::PUBLIC_TABLES { - copy_table(conn, PRIMARY_PUBLIC, NAMESPACE_PUBLIC, table_name)?; - check_cancel()?; + copy_table(conn, PRIMARY_PUBLIC, NAMESPACE_PUBLIC, table_name).await?; } // Repopulate `SUBGRAPHS_TABLES` but only copy the data we actually @@ -2048,7 +2171,8 @@ impl Mirror { select * from {src_nsp}.subgraph where current_version is not null;" ), - )?; + ) + .await?; run_query( conn, format!( @@ -2056,8 +2180,9 @@ impl Mirror { select v.* from {src_nsp}.subgraph_version v, {src_nsp}.subgraph s where v.id = s.current_version;" ), - )?; - copy_table(conn, &src_nsp, dst_nsp, "subgraph_deployment_assignment")?; + ) + .await?; + copy_table(conn, &src_nsp, dst_nsp, "subgraph_deployment_assignment").await?; Ok(()) } @@ -2070,18 +2195,19 @@ impl Mirror { &self.pools[0] } - pub fn assignments(&self, node: &NodeId) -> Result, StoreError> { - self.read(|conn| queries::assignments(conn, node)) + pub async fn assignments(&self, node: &NodeId) -> Result, StoreError> { + self.read_async(|conn| queries::assignments(conn, node).scope_boxed()) + .await } pub async fn active_assignments(&self, node: &NodeId) -> Result, StoreError> { - let node = node.clone(); - self.read_async(move |conn| queries::active_assignments(conn, &node)) + self.read_async(|conn| queries::active_assignments(conn, &node).scope_boxed()) .await } - pub fn assigned_node(&self, site: &Site) -> Result, StoreError> { - self.read(|conn| queries::assigned_node(conn, site)) + pub async fn assigned_node(&self, site: &Site) -> Result, StoreError> { + self.read_async(|conn| queries::assigned_node(conn, site).scope_boxed()) + .await } /// Returns Option<(node_id,is_paused)> where `node_id` is the node that @@ -2092,81 +2218,109 @@ impl Mirror { &self, site: Arc, ) -> Result, StoreError> { - self.read_async(move |conn| queries::assignment_status(conn, &site)) + self.read_async(|conn| queries::assignment_status(conn, &site).scope_boxed()) .await } - pub fn find_active_site(&self, subgraph: &DeploymentHash) -> Result, StoreError> { - self.read(|conn| queries::find_active_site(conn, subgraph)) + pub async fn find_active_site( + &self, + subgraph: &DeploymentHash, + ) -> Result, StoreError> { + self.read_async(|conn| queries::find_active_site(conn, subgraph).scope_boxed()) + .await } - pub fn find_site_by_ref(&self, id: DeploymentId) -> Result, StoreError> { - self.read(|conn| queries::find_site_by_ref(conn, id)) + pub async fn find_site_by_ref(&self, id: DeploymentId) -> Result, StoreError> { + self.read_async(|conn| queries::find_site_by_ref(conn, id).scope_boxed()) + .await } - pub fn current_deployment_for_subgraph( + pub async fn current_deployment_for_subgraph( &self, name: &SubgraphName, ) -> Result { - self.read(|conn| queries::current_deployment_for_subgraph(conn, name)) + self.read_async(|conn| queries::current_deployment_for_subgraph(conn, name).scope_boxed()) + .await } - pub fn deployments_for_subgraph(&self, name: &str) -> Result, StoreError> { - self.read(|conn| queries::deployments_for_subgraph(conn, name)) + pub async fn deployments_for_subgraph(&self, name: &str) -> Result, StoreError> { + self.read_async(|conn| queries::deployments_for_subgraph(conn, name).scope_boxed()) + .await } - pub fn subgraph_exists(&self, name: &SubgraphName) -> Result { - self.read(|conn| queries::subgraph_exists(conn, name)) + pub async fn subgraph_exists(&self, name: &SubgraphName) -> Result { + self.read_async(|conn| queries::subgraph_exists(conn, name).scope_boxed()) + .await } - pub fn subgraph_version( + pub async fn subgraph_version( &self, name: &str, use_current: bool, ) -> Result, StoreError> { - self.read(|conn| queries::subgraph_version(conn, name, use_current)) + self.read_async(|conn| queries::subgraph_version(conn, name, use_current).scope_boxed()) + .await } /// Find sites by their subgraph deployment hashes. If `ids` is empty, /// return all sites - pub fn find_sites(&self, ids: &[String], only_active: bool) -> Result, StoreError> { - self.read(|conn| queries::find_sites(conn, ids, only_active)) + pub async fn find_sites( + &self, + ids: &[String], + only_active: bool, + ) -> Result, StoreError> { + self.read_async(|conn| queries::find_sites(conn, ids, only_active).scope_boxed()) + .await } /// Find sites by their subgraph deployment ids. If `ids` is empty, /// return no sites - pub fn find_sites_by_id(&self, ids: &[DeploymentId]) -> Result, StoreError> { - self.read(|conn| queries::find_sites_by_id(conn, ids)) + pub async fn find_sites_by_id(&self, ids: &[DeploymentId]) -> Result, StoreError> { + self.read_async(|conn| queries::find_sites_by_id(conn, ids).scope_boxed()) + .await } - pub fn fill_assignments(&self, infos: &mut [status::Info]) -> Result<(), StoreError> { - self.read(|conn| queries::fill_assignments(conn, infos)) + pub async fn fill_assignments( + &self, + infos: &[status::Info], + ) -> Result, StoreError> { + self.read_async(|conn| queries::fill_assignments(conn, infos).scope_boxed()) + .await } - pub fn version_info(&self, version: &str) -> Result, StoreError> { - self.read(|conn| queries::version_info(conn, version)) + pub async fn version_info( + &self, + version: &str, + ) -> Result, StoreError> { + self.read_async(|conn| queries::version_info(conn, version).scope_boxed()) + .await } - pub fn versions_for_subgraph_id( + pub async fn versions_for_subgraph_id( &self, subgraph_id: &str, ) -> Result<(Option, Option), StoreError> { - self.read(|conn| queries::versions_for_subgraph_id(conn, subgraph_id)) + self.read_async(|conn| queries::versions_for_subgraph_id(conn, subgraph_id).scope_boxed()) + .await } /// Returns all (subgraph_name, version) pairs for a given deployment hash. - pub fn subgraphs_by_deployment_hash( + pub async fn subgraphs_by_deployment_hash( &self, deployment_hash: &str, ) -> Result, StoreError> { - self.read(|conn| queries::subgraphs_by_deployment_hash(conn, deployment_hash)) + self.read_async(|conn| { + queries::subgraphs_by_deployment_hash(conn, deployment_hash).scope_boxed() + }) + .await } - pub fn find_site_in_shard( + pub async fn find_site_in_shard( &self, subgraph: &DeploymentHash, shard: &Shard, ) -> Result, StoreError> { - self.read(|conn| queries::find_site_in_shard(conn, subgraph, shard)) + self.read_async(|conn| queries::find_site_in_shard(conn, subgraph, shard).scope_boxed()) + .await } } diff --git a/store/postgres/src/query_store.rs b/store/postgres/src/query_store.rs index ab6c43e55fd..884f292f1ef 100644 --- a/store/postgres/src/query_store.rs +++ b/store/postgres/src/query_store.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::time::Instant; use crate::deployment_store::{DeploymentStore, ReplicaId}; +use async_trait::async_trait; use graph::components::store::{DeploymentId, QueryPermit, QueryStore as QueryStoreTrait}; use graph::data::query::Trace; use graph::data::store::QueryObject; @@ -38,7 +39,7 @@ impl QueryStore { #[async_trait] impl QueryStoreTrait for QueryStore { - fn find_query_values( + async fn find_query_values( &self, query: EntityQuery, ) -> Result<(Vec, Trace), graph::prelude::QueryExecutionError> { @@ -47,10 +48,12 @@ impl QueryStoreTrait for QueryStore { let mut conn = self .store .get_replica_conn(self.replica_id) + .await .map_err(|e| QueryExecutionError::StoreError(e.into()))?; let wait = start.elapsed(); self.store .execute_query(&mut conn, self.site.clone(), query) + .await .map(|(entities, mut trace)| { trace.conn_wait(wait); (entities, trace) @@ -120,13 +123,13 @@ impl QueryStoreTrait for QueryStore { Ok(self.store.deployment_state(self.site.cheap_clone()).await?) } - fn api_schema(&self) -> Result, QueryExecutionError> { - let info = self.store.subgraph_info(self.site.cheap_clone())?; + async fn api_schema(&self) -> Result, QueryExecutionError> { + let info = self.store.subgraph_info(self.site.cheap_clone()).await?; Ok(info.api.get(&self.api_version).unwrap().clone()) } - fn input_schema(&self) -> Result { - let layout = self.store.find_layout(self.site.cheap_clone())?; + async fn input_schema(&self) -> Result { + let layout = self.store.find_layout(self.site.cheap_clone()).await?; Ok(layout.input_schema.cheap_clone()) } diff --git a/store/postgres/src/relational.rs b/store/postgres/src/relational.rs index 35e35a35746..e035764d34e 100644 --- a/store/postgres/src/relational.rs +++ b/store/postgres/src/relational.rs @@ -24,10 +24,11 @@ use diesel::deserialize::FromSql; use diesel::pg::Pg; use diesel::serialize::{Output, ToSql}; use diesel::sql_types::Text; -use diesel::{connection::SimpleConnection, Connection}; -use diesel::{ - debug_query, sql_query, OptionalExtension, PgConnection, QueryDsl, QueryResult, RunQueryDsl, -}; +use diesel::{debug_query, sql_query, OptionalExtension, QueryDsl, QueryResult}; +use diesel_async::scoped_futures::ScopedFutureExt; +use diesel_async::{AsyncConnection, RunQueryDsl, SimpleAsyncConnection}; +use tokio; + use graph::blockchain::block_stream::{EntityOperationKind, EntitySourceOperation}; use graph::blockchain::BlockTime; use graph::cheap_clone::CheapClone; @@ -78,7 +79,7 @@ use graph::prelude::{ use crate::block_range::{BoundSide, BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; pub use crate::catalog::Catalog; use crate::ForeignServer; -use crate::{catalog, deployment}; +use crate::{catalog, deployment, AsyncPgConnection}; use self::rollup::Rollup; @@ -374,20 +375,20 @@ impl Layout { } } - pub fn create_relational_schema( - conn: &mut PgConnection, + pub async fn create_relational_schema( + conn: &mut AsyncPgConnection, site: Arc, schema: &InputSchema, entities_with_causality_region: BTreeSet, index_def: Option, ) -> Result { let catalog = - Catalog::for_creation(conn, site.cheap_clone(), entities_with_causality_region)?; + Catalog::for_creation(conn, site.cheap_clone(), entities_with_causality_region).await?; let layout = Self::new(site, schema, catalog)?; let sql = layout .as_ddl(index_def) .map_err(|_| StoreError::Unknown(anyhow!("failed to generate DDL for layout")))?; - conn.batch_execute(&sql)?; + conn.batch_execute(&sql).await?; Ok(layout) } @@ -410,7 +411,7 @@ impl Layout { /// Import the database schema for this layout from its own database /// shard (in `self.site.shard`) into the database represented by `conn` /// if the schema for this layout does not exist yet - pub fn import_schema(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + pub async fn import_schema(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { let make_query = || -> Result { let nsp = self.site.namespace.as_str(); let srvname = ForeignServer::name(&self.site.shard); @@ -429,7 +430,7 @@ impl Layout { Ok(query) }; - if !catalog::has_namespace(conn, &self.site.namespace)? { + if !catalog::has_namespace(conn, &self.site.namespace).await? { let query = make_query().map_err(|_| { StoreError::Unknown(anyhow!( "failed to generate SQL to import foreign schema {}", @@ -437,7 +438,7 @@ impl Layout { )) })?; - conn.batch_execute(&query)?; + conn.batch_execute(&query).await?; } Ok(()) } @@ -457,9 +458,9 @@ impl Layout { .ok_or_else(|| StoreError::UnknownTable(entity.to_string())) } - pub fn find( + pub async fn find( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, key: &EntityKey, block: BlockNumber, ) -> Result, StoreError> { @@ -474,15 +475,16 @@ impl Layout { query .get_result::(conn) + .await .optional()? .map(|row| Entity::from_oid_row(row, &self.input_schema, &columns)) .transpose() } // An optimization when looking up multiple entities, it will generate a single sql query using `UNION ALL`. - pub fn find_many( + pub async fn find_many( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ids_for_type: &BTreeMap<(EntityType, CausalityRegion), IdList>, block: BlockNumber, ) -> Result, StoreError> { @@ -496,7 +498,7 @@ impl Layout { } let query = FindManyQuery::new(tables, ids_for_type, block); let mut entities: BTreeMap = BTreeMap::new(); - for data in query.load::(conn)? { + for data in query.load::(conn).await? { let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; @@ -516,9 +518,9 @@ impl Layout { Ok(entities) } - pub fn find_range( + pub async fn find_range( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, entity_types: Vec, causality_region: CausalityRegion, block_range: Range, @@ -540,6 +542,7 @@ impl Layout { block_range.clone(), ) .get_results::(conn) + .await .optional()? .unwrap_or_default(); // Collect all entities that have their 'upper(block_range)' attribute in the @@ -552,6 +555,7 @@ impl Layout { let upper_vec = FindRangeQuery::new(&tables, causality_region, BoundSide::Upper, block_range) .get_results::(conn) + .await .optional()? .unwrap_or_default(); let mut lower_iter = lower_vec.iter().fuse().peekable(); @@ -653,9 +657,9 @@ impl Layout { Ok(entities) } - pub fn find_derived( + pub async fn find_derived( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, derived_query: &DerivedEntityQuery, block: BlockNumber, excluded_keys: &Vec, @@ -667,7 +671,7 @@ impl Layout { let mut entities = BTreeMap::new(); - for data in query.load::(conn)? { + for data in query.load::(conn).await? { let entity_type = data.entity_type(&self.input_schema); let entity_data: Entity = data.deserialize_with_layout(self, None)?; let key = @@ -678,9 +682,9 @@ impl Layout { Ok(entities) } - pub fn find_changes( + pub async fn find_changes( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block: BlockNumber, ) -> Result, StoreError> { let mut tables = Vec::new(); @@ -690,10 +694,12 @@ impl Layout { } } - let inserts_or_updates = - FindChangesQuery::new(&tables[..], block).load::(conn)?; - let deletions = - FindPossibleDeletionsQuery::new(&tables[..], block).load::(conn)?; + let inserts_or_updates = FindChangesQuery::new(&tables[..], block) + .load::(conn) + .await?; + let deletions = FindPossibleDeletionsQuery::new(&tables[..], block) + .load::(conn) + .await?; let mut processed_entities = HashSet::new(); let mut changes = Vec::new(); @@ -726,9 +732,9 @@ impl Layout { Ok(changes) } - pub fn insert<'a>( + pub async fn insert<'a>( &'a self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, group: &'a RowGroup, stopwatch: &StopwatchMetrics, ) -> Result<(), StoreError> { @@ -763,6 +769,7 @@ impl Layout { if !chunk.is_empty() { InsertQuery::new(table, &chunk)? .execute(conn) + .await .map_err(|e| { let (block, msg) = chunk_details(&chunk); StoreError::write_failure(e, table.object.as_str(), block, msg) @@ -772,23 +779,24 @@ impl Layout { Ok(()) } - pub fn conflicting_entities( + pub async fn conflicting_entities( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, entities: &[EntityType], group: &RowGroup, ) -> Result, StoreError> { Ok(ConflictingEntitiesQuery::new(self, entities, group)? - .load(conn)? + .load(conn) + .await? .pop() .map(|data: ConflictingEntitiesData| (data.entity, data.id))) } /// order is a tuple (attribute, value_type, direction) - pub fn query( + pub async fn query( &self, logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, query: EntityQuery, ) -> Result<(Vec, Trace), QueryExecutionError> { fn log_query_timing( @@ -852,11 +860,15 @@ impl Layout { let start = Instant::now(); let values = conn .transaction(|conn| { - if let Some(ref timeout_sql) = *STATEMENT_TIMEOUT { - conn.batch_execute(timeout_sql)?; + async { + if let Some(ref timeout_sql) = *STATEMENT_TIMEOUT { + conn.batch_execute(timeout_sql).await?; + } + query.load::(conn).await } - query.load::(conn) + .scope_boxed() }) + .await .map_err(|e| { use diesel::result::DatabaseErrorKind; use diesel::result::Error::*; @@ -897,9 +909,9 @@ impl Layout { .map(|values| (values, trace)) } - pub fn update<'a>( + pub async fn update<'a>( &'a self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, group: &'a RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { @@ -925,7 +937,9 @@ impl Layout { group.entity_type.id_type()?, entity_keys.into_iter().map(|id| id.to_owned()), )?; - ClampRangeQuery::new(table, &entity_keys, block)?.execute(conn)?; + ClampRangeQuery::new(table, &entity_keys, block)? + .execute(conn) + .await?; } section.end(); @@ -936,15 +950,15 @@ impl Layout { // not exceed the maximum number of bindings allowed in queries let chunk_size = InsertQuery::chunk_size(table); for chunk in group.write_chunks(chunk_size) { - count += InsertQuery::new(table, &chunk)?.execute(conn)?; + count += InsertQuery::new(table, &chunk)?.execute(conn).await?; } Ok(count) } - pub fn delete( + pub async fn delete( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, group: &RowGroup, stopwatch: &StopwatchMetrics, ) -> Result { @@ -986,6 +1000,7 @@ impl Layout { )?; count += ClampRangeQuery::new(table, &chunk, block)? .execute(conn) + .await .map_err(|e| { StoreError::write_failure( e, @@ -999,9 +1014,11 @@ impl Layout { Ok(count) } - pub fn truncate_tables(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + pub async fn truncate_tables(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { for table in self.tables.values() { - sql_query(&format!("TRUNCATE TABLE {}", table.qualified_name)).execute(conn)?; + sql_query(&format!("TRUNCATE TABLE {}", table.qualified_name)) + .execute(conn) + .await?; } Ok(()) } @@ -1013,9 +1030,9 @@ impl Layout { /// /// The `i32` that is returned is the amount by which the entity count /// for the subgraph needs to be adjusted - pub fn revert_block( + pub async fn revert_block( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, block: BlockNumber, ) -> Result { let mut count: i32 = 0; @@ -1024,7 +1041,8 @@ impl Layout { // Remove all versions whose entire block range lies beyond // `block` let removed: HashSet<_> = RevertRemoveQuery::new(table, block) - .get_results::(conn)? + .get_results::(conn) + .await? .into_iter() .collect(); // Make the versions current that existed at `block - 1` but that @@ -1034,7 +1052,8 @@ impl Layout { HashSet::new() } else { RevertClampQuery::new(table, block - 1)? - .get_results(conn)? + .get_results(conn) + .await? .into_iter() .collect::>() }; @@ -1055,14 +1074,14 @@ impl Layout { /// /// For metadata, reversion always means deletion since the metadata that /// is subject to reversion is only ever created but never updated - pub fn revert_metadata( + pub async fn revert_metadata( logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: &Site, block: BlockNumber, ) -> Result<(), StoreError> { - crate::dynds::revert(conn, site, block)?; - crate::deployment::revert_subgraph_errors(logger, conn, &site.deployment, block)?; + crate::dynds::revert(conn, site, block).await?; + crate::deployment::revert_subgraph_errors(logger, conn, &site.deployment, block).await?; Ok(()) } @@ -1082,13 +1101,13 @@ impl Layout { /// This is tied closely to how the `LayoutCache` works and called from /// it right after creating a `Layout`, and periodically to update the /// `Layout` in case changes were made - fn refresh( + async fn refresh( self: Arc, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, ) -> Result, StoreError> { - let account_like = crate::catalog::account_like(conn, &self.site)?; - let history_blocks = deployment::history_blocks(conn, &self.site)?; + let account_like = crate::catalog::account_like(conn, &self.site).await?; + let history_blocks = deployment::history_blocks(conn, &self.site).await?; let is_account_like = { |table: &Table| account_like.contains(table.name.as_str()) }; @@ -1119,11 +1138,11 @@ impl Layout { /// for all aggregations, meaning that if some aggregations do not have /// an entry with the maximum timestamp that there was just no data for /// that interval, but we did try to aggregate at that time. - pub(crate) fn last_rollup( + pub(crate) async fn last_rollup( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result, StoreError> { - Rollup::last_rollup(&self.rollups, conn) + Rollup::last_rollup(&self.rollups, conn).await } /// Construct `Rolllup` for each of the aggregation mappings @@ -1181,9 +1200,9 @@ impl Layout { /// /// Changing this would require that we have a complete list of block /// numbers and block times which we do not have anywhere in graph-node. - pub(crate) fn rollup( + pub(crate) async fn rollup( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, last_rollup: Option, block_times: &[(BlockNumber, BlockTime)], ) -> Result<(), StoreError> { @@ -1224,7 +1243,7 @@ impl Layout { // that, which will roll up the bucket `t5 <= b2 < t6`. So // there's no need to worry about the buckets starting at // `t2`, `t3`, and `t4`. - match buckets.first() { + match buckets.as_slice().first() { None => { // The rollups are in increasing order of interval size, so // if a smaller interval doesn't have a bucket between @@ -1233,7 +1252,7 @@ impl Layout { break; } Some(bucket) => { - rollup.insert(conn, &bucket, *block)?; + rollup.insert(conn, &bucket, *block).await?; } } } @@ -1710,10 +1729,10 @@ impl Table { .expect("every table has a primary key") } - pub(crate) fn analyze(&self, conn: &mut PgConnection) -> Result<(), StoreError> { + pub(crate) async fn analyze(&self, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { let table_name = &self.qualified_name; let sql = format!("analyze (skip_locked) {table_name}"); - sql_query(&sql).execute(conn)?; + sql_query(&sql).execute(conn).await?; Ok(()) } @@ -1745,7 +1764,7 @@ pub struct LayoutCache { ttl: Duration, /// Use this so that we only refresh one layout at any given time to /// avoid refreshing the same layout multiple times - refresh: Mutex<()>, + refresh: tokio::sync::Mutex<()>, last_sweep: Mutex, } @@ -1754,18 +1773,22 @@ impl LayoutCache { Self { entries: Mutex::new(HashMap::new()), ttl, - refresh: Mutex::new(()), + refresh: tokio::sync::Mutex::new(()), last_sweep: Mutex::new(Instant::now()), } } - fn load(conn: &mut PgConnection, site: Arc) -> Result, StoreError> { - let (subgraph_schema, use_bytea_prefix) = deployment::schema(conn, site.as_ref())?; + async fn load( + conn: &mut AsyncPgConnection, + site: Arc, + ) -> Result, StoreError> { + let (subgraph_schema, use_bytea_prefix) = deployment::schema(conn, site.as_ref()).await?; let has_causality_region = - deployment::entities_with_causality_region(conn, site.id, &subgraph_schema)?; - let catalog = Catalog::load(conn, site.clone(), use_bytea_prefix, has_causality_region)?; + deployment::entities_with_causality_region(conn, site.id, &subgraph_schema).await?; + let catalog = + Catalog::load(conn, site.clone(), use_bytea_prefix, has_causality_region).await?; let layout = Arc::new(Layout::new(site.clone(), &subgraph_schema, catalog)?); - layout.refresh(conn, site) + layout.refresh(conn, site).await } fn cache(&self, layout: Arc) { @@ -1792,10 +1815,10 @@ impl LayoutCache { /// Get the layout for `site`. If it's not in cache, load it. If it is /// expired, try to refresh it if there isn't another refresh happening /// already - pub fn get( + pub async fn get( &self, logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, ) -> Result, StoreError> { let now = Instant::now(); @@ -1817,12 +1840,12 @@ impl LayoutCache { if refresh.is_err() { value } else { - self.refresh(logger, conn, site, value) + self.refresh(logger, conn, site, value).await } } } None => { - let layout = Self::load(conn, site)?; + let layout = Self::load(conn, site).await?; self.cache(layout.cheap_clone()); layout } @@ -1831,14 +1854,14 @@ impl LayoutCache { Ok(layout) } - fn refresh( + async fn refresh( &self, logger: &Logger, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, site: Arc, value: Arc, ) -> Arc { - match value.cheap_clone().refresh(conn, site) { + match value.cheap_clone().refresh(conn, site).await { Err(e) => { warn!( logger, diff --git a/store/postgres/src/relational/index.rs b/store/postgres/src/relational/index.rs index efa82e901f0..c72d832ba7a 100644 --- a/store/postgres/src/relational/index.rs +++ b/store/postgres/src/relational/index.rs @@ -4,8 +4,9 @@ use std::collections::HashMap; use std::fmt::{Display, Write}; use std::sync::Arc; +use diesel::sql_query; use diesel::sql_types::{Bool, Text}; -use diesel::{sql_query, Connection, PgConnection, RunQueryDsl}; +use diesel_async::RunQueryDsl; use graph::components::store::StoreError; use graph::itertools::Itertools; use graph::prelude::{ @@ -15,11 +16,11 @@ use graph::prelude::{ }; use crate::block_range::{BLOCK_COLUMN, BLOCK_RANGE_COLUMN}; -use crate::catalog; use crate::command_support::catalog::Site; use crate::deployment_store::DeploymentStore; use crate::primary::Namespace; use crate::relational::{BYTE_ARRAY_PREFIX_SIZE, STRING_PREFIX_SIZE}; +use crate::{catalog, AsyncPgConnection}; use super::{Layout, Table, VID_COLUMN}; @@ -752,19 +753,19 @@ pub struct IndexList { pub(crate) indexes: HashMap>, } -pub fn load_indexes_from_table( - conn: &mut PgConnection, +pub async fn load_indexes_from_table( + conn: &mut AsyncPgConnection, table: &Arc
, schema_name: &str, ) -> Result, StoreError> { let table_name = table.name.as_str(); - let indexes = catalog::indexes_for_table(conn, schema_name, table_name)?; + let indexes = catalog::indexes_for_table(conn, schema_name, table_name).await?; Ok(indexes.into_iter().map(CreateIndex::parse).collect()) } impl IndexList { - pub fn load( - conn: &mut PgConnection, + pub async fn load( + conn: &mut AsyncPgConnection, site: Arc, store: DeploymentStore, ) -> Result { @@ -772,9 +773,9 @@ impl IndexList { indexes: HashMap::new(), }; let schema_name = site.namespace.clone(); - let layout = store.layout(conn, site)?; + let layout = store.layout(conn, site).await?; for (_, table) in &layout.tables { - let indexes = load_indexes_from_table(conn, table, schema_name.as_str())?; + let indexes = load_indexes_from_table(conn, table, schema_name.as_str()).await?; list.indexes.insert(table.name.to_string(), indexes); } Ok(list) @@ -818,9 +819,9 @@ impl IndexList { Ok(arr) } - pub fn recreate_invalid_indexes( + pub async fn recreate_invalid_indexes( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, layout: &Layout, ) -> Result<(), StoreError> { #[derive(QueryableByName, Debug)] @@ -851,7 +852,8 @@ impl IndexList { .bind::(namespace.to_string()) .bind::(table_name) .bind::(index_name.clone()) - .get_results::(conn)? + .get_results::(conn) + .await? .into_iter() .map(|ii| ii.into()) .collect::>(); @@ -864,9 +866,9 @@ impl IndexList { namespace.to_string(), index_name )); - conn.transaction(|conn| drop_query.execute(conn))?; + drop_query.execute(conn).await?; } - sql_query(create_query).execute(conn)?; + sql_query(create_query).execute(conn).await?; } } } diff --git a/store/postgres/src/relational/prune.rs b/store/postgres/src/relational/prune.rs index 1c33eca4aeb..bf23245013d 100644 --- a/store/postgres/src/relational/prune.rs +++ b/store/postgres/src/relational/prune.rs @@ -1,17 +1,16 @@ use std::{collections::HashMap, fmt::Write, sync::Arc}; use diesel::{ - connection::SimpleConnection, sql_query, sql_types::{BigInt, Integer}, - Connection, PgConnection, RunQueryDsl, +}; +use diesel_async::{ + scoped_futures::{ScopedBoxFuture, ScopedFutureExt}, + AsyncConnection, RunQueryDsl, SimpleAsyncConnection, }; use graph::{ components::store::{PrunePhase, PruneReporter, PruneRequest, PruningStrategy, VersionStats}, - prelude::{ - BlockNumber, CancelHandle, CancelToken, CancelableError, CheapClone, StoreError, - BLOCK_NUMBER_MAX, - }, + prelude::{BlockNumber, CancelableError, CheapClone, StoreError, BLOCK_NUMBER_MAX}, schema::InputSchema, slog::{warn, Logger}, }; @@ -23,6 +22,7 @@ use crate::{ deployment, relational::{Table, VID_COLUMN}, vid_batcher::{VidBatcher, VidRange}, + AsyncPgConnection, }; use super::{ @@ -49,8 +49,8 @@ impl TablePair { /// Create a `TablePair` for `src`. This creates a new table `dst` with /// the same structure as the `src` table in the database, but in a /// different namespace so that the names of indexes etc. don't clash - fn create( - conn: &mut PgConnection, + async fn create( + conn: &mut AsyncPgConnection, src: Arc
, src_nsp: Namespace, dst_nsp: Namespace, @@ -60,13 +60,14 @@ impl TablePair { let dst = src.new_like(&dst_nsp, &src.name); let mut query = String::new(); - if catalog::table_exists(conn, dst_nsp.as_str(), &dst.name)? { + if catalog::table_exists(conn, dst_nsp.as_str(), &dst.name).await? { writeln!(query, "truncate table {};", dst.qualified_name)?; } else { let mut list = IndexList { indexes: HashMap::new(), }; - let indexes = load_indexes_from_table(conn, &src, src_nsp.as_str())? + let indexes = load_indexes_from_table(conn, &src, src_nsp.as_str()) + .await? .into_iter() .map(|index| index.with_nsp(dst_nsp.to_string())) .collect::, _>>()?; @@ -76,7 +77,7 @@ impl TablePair { // as the asumption is that there is not that much data inserted. dst.as_ddl(schema, catalog, Some(&list), &mut query)?; } - conn.batch_execute(&query)?; + conn.batch_execute(&query).await?; Ok(TablePair { src, @@ -90,32 +91,33 @@ impl TablePair { /// `final_block` in batches, where each batch is a separate /// transaction. Write activity for nonfinal blocks can happen /// concurrently to this copy - fn copy_final_entities( + async fn copy_final_entities( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, reporter: &mut dyn PruneReporter, tracker: &status::Tracker, earliest_block: BlockNumber, final_block: BlockNumber, - cancel: &CancelHandle, ) -> Result<(), CancelableError> { - let column_list = self.column_list(); + let column_list = Arc::new(self.column_list()); // Determine the last vid that we need to copy - let range = VidRange::for_prune(conn, &self.src, earliest_block, final_block)?; - let mut batcher = VidBatcher::load(conn, &self.src_nsp, &self.src, range)?; - tracker.start_copy_final(conn, &self.src, range)?; + let range = VidRange::for_prune(conn, &self.src, earliest_block, final_block).await?; + let mut batcher = VidBatcher::load(conn, &self.src_nsp, &self.src, range).await?; + tracker.start_copy_final(conn, &self.src, range).await?; while !batcher.finished() { let rows = batch_with_timeout(conn, &mut batcher, |conn, start, end| { - // Page through all rows in `src` in batches of `batch_size` - // and copy the ones that are visible to queries at block - // heights between `earliest_block` and `final_block`, but - // whose block_range does not extend past `final_block` - // since they could still be reverted while we copy. - // The conditions on `block_range` are expressed redundantly - // to make more indexes useable - sql_query(format!( + let column_list = column_list.cheap_clone(); + async move { + // Page through all rows in `src` in batches of `batch_size` + // and copy the ones that are visible to queries at block + // heights between `earliest_block` and `final_block`, but + // whose block_range does not extend past `final_block` + // since they could still be reverted while we copy. + // The conditions on `block_range` are expressed redundantly + // to make more indexes useable + sql_query(format!( "/* controller=prune,phase=final,start_vid={start},batch_size={batch_size} */ \ insert into {dst}({column_list}) \ select {column_list} from {src} \ @@ -129,16 +131,21 @@ impl TablePair { dst = self.dst.qualified_name, batch_size = end - start + 1, )) - .bind::(earliest_block) - .bind::(final_block) - .bind::(start) - .bind::(end) - .execute(conn) - .map_err(StoreError::from) - })?; + .bind::(earliest_block) + .bind::(final_block) + .bind::(start) + .bind::(end) + .execute(conn) + .await + .map_err(StoreError::from) + } + .scope_boxed() + }) + .await?; let rows = rows.unwrap_or(0); - tracker.finish_batch(conn, &self.src, rows as i64, &batcher)?; - cancel.check_cancel()?; + tracker + .finish_batch(conn, &self.src, rows as i64, &batcher) + .await?; reporter.prune_batch( self.src.name.as_str(), @@ -153,22 +160,24 @@ impl TablePair { /// Copy all entity versions visible after `final_block` in batches, /// where each batch is a separate transaction. This assumes that all /// other write activity to the source table is blocked while we copy - fn copy_nonfinal_entities( + async fn copy_nonfinal_entities( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, reporter: &mut dyn PruneReporter, tracker: &status::Tracker, final_block: BlockNumber, ) -> Result<(), StoreError> { - let column_list = self.column_list(); + let column_list = Arc::new(self.column_list()); // Determine the last vid that we need to copy - let range = VidRange::for_prune(conn, &self.src, final_block + 1, BLOCK_NUMBER_MAX)?; - let mut batcher = VidBatcher::load(conn, &self.src.nsp, &self.src, range)?; - tracker.start_copy_nonfinal(conn, &self.src, range)?; + let range = VidRange::for_prune(conn, &self.src, final_block + 1, BLOCK_NUMBER_MAX).await?; + let mut batcher = VidBatcher::load(conn, &self.src.nsp, &self.src, range).await?; + tracker.start_copy_nonfinal(conn, &self.src, range).await?; while !batcher.finished() { let rows = batch_with_timeout(conn, &mut batcher, |conn, start, end| { + let column_list = column_list.cheap_clone(); + async move { // Page through all the rows in `src` in batches of // `batch_size` that are visible to queries at block heights // starting right after `final_block`. The conditions on @@ -190,11 +199,14 @@ impl TablePair { .bind::(start) .bind::(end) .execute(conn) + .await .map_err(StoreError::from) - })?; + }.scope_boxed()}).await?; let rows = rows.unwrap_or(0); - tracker.finish_batch(conn, &self.src, rows as i64, &batcher)?; + tracker + .finish_batch(conn, &self.src, rows as i64, &batcher) + .await?; reporter.prune_batch( self.src.name.as_str(), @@ -207,7 +219,7 @@ impl TablePair { } /// Replace the `src` table with the `dst` table - fn switch(self, logger: &Logger, conn: &mut PgConnection) -> Result<(), StoreError> { + async fn switch(self, logger: &Logger, conn: &mut AsyncPgConnection) -> Result<(), StoreError> { let src_qname = &self.src.qualified_name; let dst_qname = &self.dst.qualified_name; let src_nsp = &self.src_nsp; @@ -217,7 +229,7 @@ impl TablePair { // What we are about to do would get blocked by autovacuum on our // tables, so just kill the autovacuum - if let Err(e) = catalog::cancel_vacuum(conn, src_nsp) { + if let Err(e) = catalog::cancel_vacuum(conn, src_nsp).await { warn!(logger, "Failed to cancel vacuum during pruning; trying to carry on regardless"; "src" => src_nsp.as_str(), "error" => e.to_string()); } @@ -236,7 +248,8 @@ impl TablePair { writeln!(query, "drop table {src_qname};")?; writeln!(query, "alter table {dst_qname} set schema {src_nsp}")?; - conn.transaction(|conn| conn.batch_execute(&query))?; + conn.transaction(|conn| conn.batch_execute(&query).scope_boxed()) + .await?; Ok(()) } @@ -252,22 +265,20 @@ impl TablePair { impl Layout { /// Analyze the `tables` and return `VersionStats` for all tables in /// this `Layout` - fn analyze_tables( + async fn analyze_tables( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, reporter: &mut dyn PruneReporter, mut tables: Vec<&Arc
>, - cancel: &CancelHandle, - ) -> Result, CancelableError> { + ) -> Result, StoreError> { reporter.start_analyze(); tables.sort_by_key(|table| table.name.as_str()); for table in &tables { reporter.start_analyze_table(table.name.as_str()); - table.analyze(conn)?; + table.analyze(conn).await?; reporter.finish_analyze_table(table.name.as_str()); - cancel.check_cancel()?; } - let stats = self.catalog.stats(conn)?; + let stats = self.catalog.stats(conn).await?; let analyzed: Vec<_> = tables.iter().map(|table| table.name.as_str()).collect(); reporter.finish_analyze(&stats, &analyzed); @@ -279,17 +290,16 @@ impl Layout { /// is `true`, analyze all tables before getting statistics. If it is /// `false`, only analyze tables that Postgres' autovacuum daemon would /// consider needing analysis. - fn version_stats( + async fn version_stats( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, reporter: &mut dyn PruneReporter, analyze_all: bool, - cancel: &CancelHandle, - ) -> Result, CancelableError> { + ) -> Result, StoreError> { let needs_analyze = if analyze_all { vec![] } else { - catalog::needs_autoanalyze(conn, &self.site.namespace)? + catalog::needs_autoanalyze(conn, &self.site.namespace).await? }; let tables: Vec<_> = self .tables @@ -297,7 +307,7 @@ impl Layout { .filter(|table| analyze_all || needs_analyze.contains(&table.name)) .collect(); - self.analyze_tables(conn, reporter, tables, cancel) + self.analyze_tables(conn, reporter, tables).await } /// Return all tables and the strategy to prune them withir stats whose ratio of distinct entities @@ -366,55 +376,55 @@ impl Layout { /// fails, e.g. because the database is not available. All errors that /// happen during pruning itself will be stored in the `prune_state` /// table and this method will return `Ok` - pub fn prune( + pub async fn prune( self: Arc, logger: &Logger, reporter: &mut dyn PruneReporter, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, req: &PruneRequest, - cancel: &CancelHandle, ) -> Result<(), CancelableError> { - let tracker = status::Tracker::new(conn, self.clone())?; + let tracker = status::Tracker::new(conn, self.clone()).await?; - let res = self.prune_inner(logger, reporter, conn, req, cancel, &tracker); + let res = self + .prune_inner(logger, reporter, conn, req, &tracker) + .await; match res { Ok(_) => { - tracker.finish(conn)?; + tracker.finish(conn).await?; } Err(e) => { // If we get an error, we need to set the error in the // database and finish the tracker let err = e.to_string(); - tracker.error(conn, &err)?; + tracker.error(conn, &err).await?; } } Ok(()) } - fn prune_inner( + async fn prune_inner( self: Arc, logger: &Logger, reporter: &mut dyn PruneReporter, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, req: &PruneRequest, - cancel: &CancelHandle, tracker: &status::Tracker, ) -> Result<(), CancelableError> { reporter.start(req); - let stats = self.version_stats(conn, reporter, true, cancel)?; + let stats = self.version_stats(conn, reporter, true).await?; let prunable_tables: Vec<_> = self.prunable_tables(&stats, req).into_iter().collect(); - tracker.start(conn, req, &prunable_tables)?; + tracker.start(conn, req, &prunable_tables).await?; let dst_nsp = Namespace::prune(self.site.id); let mut recreate_dst_nsp = true; for (table, strat) in &prunable_tables { reporter.start_table(table.name.as_str()); - tracker.start_table(conn, table)?; + tracker.start_table(conn, table).await?; match strat { PruningStrategy::Rebuild => { if recreate_dst_nsp { - catalog::recreate_schema(conn, dst_nsp.as_str())?; + catalog::recreate_schema(conn, dst_nsp.as_str()).await?; recreate_dst_nsp = false; } let pair = TablePair::create( @@ -424,7 +434,8 @@ impl Layout { dst_nsp.clone(), &self.input_schema, &self.catalog, - )?; + ) + .await?; // Copy final entities. This can happen in parallel to indexing as // that part of the table will not change pair.copy_final_entities( @@ -433,32 +444,38 @@ impl Layout { tracker, req.earliest_block, req.final_block, - cancel, - )?; + ) + .await?; // Copy nonfinal entities, and replace the original `src` table with // the smaller `dst` table // see also: deployment-lock-for-update reporter.start_switch(); - deployment::with_lock(conn, &self.site, |conn| -> Result<_, StoreError> { - pair.copy_nonfinal_entities(conn, reporter, tracker, req.final_block)?; - cancel.check_cancel().map_err(CancelableError::from)?; + deployment::with_lock( + conn, + &self.site, + async |conn| -> Result<_, StoreError> { + pair.copy_nonfinal_entities(conn, reporter, tracker, req.final_block) + .await?; - conn.transaction(|conn| pair.switch(logger, conn))?; - cancel.check_cancel().map_err(CancelableError::from)?; + pair.switch(logger, conn).await?; - Ok(()) - })?; + Ok(()) + }, + ) + .await?; reporter.finish_switch(); } PruningStrategy::Delete => { // Delete all entity versions whose range was closed // before `req.earliest_block` - let range = VidRange::for_prune(conn, &table, 0, req.earliest_block)?; - let mut batcher = VidBatcher::load(conn, &self.site.namespace, &table, range)?; + let range = VidRange::for_prune(conn, &table, 0, req.earliest_block).await?; + let mut batcher = + VidBatcher::load(conn, &self.site.namespace, &table, range).await?; - tracker.start_delete(conn, table, range, &batcher)?; + tracker.start_delete(conn, table, range, &batcher).await?; while !batcher.finished() { let rows = batch_with_timeout(conn, &mut batcher, |conn, start, end| { + async move { sql_query(format!( "/* controller=prune,phase=delete,start_vid={start},batch_size={batch_size} */ \ delete from {qname} \ @@ -470,11 +487,15 @@ impl Layout { .bind::(req.earliest_block) .bind::(start) .bind::(end) - .execute(conn).map_err(StoreError::from) - })?; + .execute(conn) + .await + .map_err(StoreError::from) + }.scope_boxed()}).await?; let rows = rows.unwrap_or(0); - tracker.finish_batch(conn, table, -(rows as i64), &batcher)?; + tracker + .finish_batch(conn, table, -(rows as i64), &batcher) + .await?; reporter.prune_batch( table.name.as_str(), @@ -486,16 +507,17 @@ impl Layout { } } reporter.finish_table(table.name.as_str()); - tracker.finish_table(conn, table)?; + tracker.finish_table(conn, table).await?; } if !recreate_dst_nsp { - catalog::drop_schema(conn, dst_nsp.as_str())?; + catalog::drop_schema(conn, dst_nsp.as_str()).await?; } for (table, _) in &prunable_tables { - catalog::set_last_pruned_block(conn, &self.site, &table.name, req.earliest_block)?; + catalog::set_last_pruned_block(conn, &self.site, &table.name, req.earliest_block) + .await?; } let tables = prunable_tables.iter().map(|(table, _)| *table).collect(); - self.analyze_tables(conn, reporter, tables, cancel)?; + self.analyze_tables(conn, reporter, tables).await?; reporter.finish(); Ok(()) } @@ -508,23 +530,35 @@ impl Layout { /// /// Doing this serves as a safeguard against very bad batch size estimations /// so that batches never take longer than `BATCH_SIZE_TIMEOUT` -fn batch_with_timeout( - conn: &mut PgConnection, +async fn batch_with_timeout<'a, 'conn, R, F>( + conn: &'conn mut AsyncPgConnection, batcher: &mut VidBatcher, query: F, -) -> Result, StoreError> +) -> Result, StoreError> where - F: Fn(&mut PgConnection, i64, i64) -> Result, + R: Send, + F: for<'r> Fn( + &'r mut AsyncPgConnection, + i64, + i64, + ) -> ScopedBoxFuture<'a, 'r, Result> + + Sync, + 'a: 'conn, { let res = batcher - .step(|start, end| { + .step(async |start, end| { conn.transaction(|conn| { - if let Some(timeout) = BATCH_STATEMENT_TIMEOUT.as_ref() { - conn.batch_execute(timeout)?; + async { + if let Some(timeout) = BATCH_STATEMENT_TIMEOUT.as_ref() { + conn.batch_execute(timeout).await?; + } + query(conn, start, end).await } - query(conn, start, end) + .scope_boxed() }) + .await }) + .await .map(|(_, res)| res); if !matches!(res, Err(StoreError::StatementTimeout)) { @@ -533,7 +567,11 @@ where batcher.set_batch_size(1); batcher - .step(|start, end| conn.transaction(|conn| query(conn, start, end))) + .step(async |start, end| { + conn.transaction(|conn| query(conn, start, end).scope_boxed()) + .await + }) + .await .map(|(_, res)| res) } @@ -548,9 +586,10 @@ mod status { query_builder::QueryFragment, serialize::{Output, ToSql}, sql_types::Text, - table, update, AsChangeset, Connection, ExpressionMethods as _, OptionalExtension, - PgConnection, QueryDsl as _, RunQueryDsl as _, + table, update, AsChangeset, ExpressionMethods as _, OptionalExtension, QueryDsl as _, }; + use diesel_async::RunQueryDsl as _; + use diesel_async::{scoped_futures::ScopedFutureExt, AsyncConnection}; use graph::{ components::store::{PruneRequest, PruningStrategy, StoreResult}, env::ENV_VARS, @@ -560,7 +599,7 @@ mod status { use crate::{ relational::{Layout, Table}, vid_batcher::{VidBatcher, VidRange}, - ConnectionPool, + AsyncPgConnection, ConnectionPool, }; table! { @@ -715,13 +754,17 @@ mod status { } impl Tracker { - pub(super) fn new(conn: &mut PgConnection, layout: Arc) -> StoreResult { + pub(super) async fn new( + conn: &mut AsyncPgConnection, + layout: Arc, + ) -> StoreResult { use prune_state as ps; let run = ps::table .filter(ps::id.eq(layout.site.id)) .order(ps::run.desc()) .select(ps::run) .get_result::(conn) + .await .optional() .map_err(StoreError::from)? .unwrap_or(0) @@ -734,14 +777,15 @@ mod status { .filter(ps::run.gt(1)) .filter(ps::run.lt(run - (ENV_VARS.store.prune_keep_history as i32 - 1))) .execute(conn) + .await .map_err(StoreError::from)?; Ok(Tracker { layout, run }) } - pub(super) fn start( + pub(super) async fn start( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, req: &PruneRequest, prunable_tables: &[(&Arc
, PruningStrategy)], ) -> StoreResult<()> { @@ -749,40 +793,46 @@ mod status { use prune_table_state as pts; conn.transaction(|conn| { - insert_into(ps::table) - .values(( - ps::id.eq(self.layout.site.id), - ps::run.eq(self.run), - ps::first_block.eq(req.first_block), - ps::final_block.eq(req.final_block), - ps::latest_block.eq(req.latest_block), - ps::history_blocks.eq(req.history_blocks), - ps::started_at.eq(diesel::dsl::now), - )) - .execute(conn)?; - - for (table, strat) in prunable_tables { - let strat = match strat { - PruningStrategy::Rebuild => "r", - PruningStrategy::Delete => "d", - }; - insert_into(pts::table) + async move { + insert_into(ps::table) .values(( - pts::id.eq(self.layout.site.id), - pts::run.eq(self.run), - pts::table_name.eq(table.name.as_str()), - pts::strategy.eq(strat), - pts::phase.eq(Phase::Queued), + ps::id.eq(self.layout.site.id), + ps::run.eq(self.run), + ps::first_block.eq(req.first_block), + ps::final_block.eq(req.final_block), + ps::latest_block.eq(req.latest_block), + ps::history_blocks.eq(req.history_blocks), + ps::started_at.eq(diesel::dsl::now), )) - .execute(conn)?; + .execute(conn) + .await?; + + for (table, strat) in prunable_tables { + let strat = match strat { + PruningStrategy::Rebuild => "r", + PruningStrategy::Delete => "d", + }; + insert_into(pts::table) + .values(( + pts::id.eq(self.layout.site.id), + pts::run.eq(self.run), + pts::table_name.eq(table.name.as_str()), + pts::strategy.eq(strat), + pts::phase.eq(Phase::Queued), + )) + .execute(conn) + .await?; + } + Ok(()) } - Ok(()) + .scope_boxed() }) + .await } - pub(crate) fn start_table( + pub(crate) async fn start_table( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, ) -> StoreResult<()> { use prune_table_state as pts; @@ -794,14 +844,15 @@ mod status { pts::started_at.eq(diesel::dsl::now), pts::phase.eq(Phase::Started), ), - )?; + ) + .await?; Ok(()) } - pub(crate) fn start_copy_final( + pub(crate) async fn start_copy_final( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, range: VidRange, ) -> StoreResult<()> { @@ -815,12 +866,12 @@ mod status { pts::rows.eq(0), ); - self.update_table_state(conn, table, values) + self.update_table_state(conn, table, values).await } - pub(crate) fn start_copy_nonfinal( + pub(crate) async fn start_copy_nonfinal( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, range: VidRange, ) -> StoreResult<()> { @@ -832,12 +883,12 @@ mod status { pts::next_vid.eq(range.min), pts::nonfinal_vid.eq(range.max), ); - self.update_table_state(conn, table, values) + self.update_table_state(conn, table, values).await } - pub(crate) fn finish_batch( + pub(crate) async fn finish_batch( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, src: &Table, rows: i64, batcher: &VidBatcher, @@ -850,12 +901,12 @@ mod status { pts::rows.eq(pts::rows + rows), ); - self.update_table_state(conn, src, values) + self.update_table_state(conn, src, values).await } - pub(crate) fn finish_table( + pub(crate) async fn finish_table( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, ) -> StoreResult<()> { use prune_table_state as pts; @@ -865,12 +916,12 @@ mod status { pts::phase.eq(Phase::Done), ); - self.update_table_state(conn, table, values) + self.update_table_state(conn, table, values).await } - pub(crate) fn start_delete( + pub(crate) async fn start_delete( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, range: VidRange, batcher: &VidBatcher, @@ -887,18 +938,18 @@ mod status { pts::batch_size.eq(batcher.batch_size() as i64), ); - self.update_table_state(conn, table, values) + self.update_table_state(conn, table, values).await } - fn update_table_state( + async fn update_table_state( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, table: &Table, values: V, ) -> StoreResult<()> where V: AsChangeset, - C: QueryFragment, + C: QueryFragment + Send, { use prune_table_state as pts; @@ -907,22 +958,28 @@ mod status { .filter(pts::run.eq(self.run)) .filter(pts::table_name.eq(table.name.as_str())) .set(values) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - pub(crate) fn finish(&self, conn: &mut PgConnection) -> StoreResult<()> { + pub(crate) async fn finish(&self, conn: &mut AsyncPgConnection) -> StoreResult<()> { use prune_state as ps; update(ps::table) .filter(ps::id.eq(self.layout.site.id)) .filter(ps::run.eq(self.run)) .set((ps::finished_at.eq(diesel::dsl::now),)) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } - pub(crate) fn error(&self, conn: &mut PgConnection, err: &str) -> StoreResult<()> { + pub(crate) async fn error( + &self, + conn: &mut AsyncPgConnection, + err: &str, + ) -> StoreResult<()> { use prune_state as ps; update(ps::table) @@ -933,7 +990,8 @@ mod status { ps::errored_at.eq(diesel::dsl::now), ps::error.eq(err), )) - .execute(conn)?; + .execute(conn) + .await?; Ok(()) } } @@ -949,37 +1007,43 @@ mod status { Self { pool, layout } } - pub fn runs(&self) -> StoreResult> { + pub async fn runs(&self) -> StoreResult> { use prune_state as ps; - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; let runs = ps::table .filter(ps::id.eq(self.layout.site.id)) .select(ps::run) .order(ps::run.asc()) .load::(&mut conn) + .await .map_err(StoreError::from)?; let runs = runs.into_iter().map(|run| run as usize).collect::>(); Ok(runs) } - pub fn state(&self, run: usize) -> StoreResult)>> { + pub async fn state( + &self, + run: usize, + ) -> StoreResult)>> { use prune_state as ps; use prune_table_state as pts; - let mut conn = self.pool.get()?; + let mut conn = self.pool.get().await?; let ptss = pts::table .filter(pts::id.eq(self.layout.site.id)) .filter(pts::run.eq(run as i32)) .order(pts::table_name.asc()) .load::(&mut conn) + .await .map_err(StoreError::from)?; ps::table .filter(ps::id.eq(self.layout.site.id)) .filter(ps::run.eq(run as i32)) .first::(&mut conn) + .await .optional() .map_err(StoreError::from) .map(|state| state.map(|state| (state, ptss))) diff --git a/store/postgres/src/relational/rollup.rs b/store/postgres/src/relational/rollup.rs index 9a9830f6b5a..8ae8bac4795 100644 --- a/store/postgres/src/relational/rollup.rs +++ b/store/postgres/src/relational/rollup.rs @@ -58,9 +58,10 @@ use std::fmt; use std::ops::Range; use std::sync::Arc; -use diesel::{sql_query, PgConnection, RunQueryDsl as _}; - +use diesel::sql_query; use diesel::sql_types::{Integer, Nullable, Timestamptz}; +use diesel_async::RunQueryDsl; + use graph::blockchain::BlockTime; use graph::components::store::{BlockNumber, StoreError}; use graph::data::store::IdType; @@ -73,6 +74,7 @@ use graph::sqlparser::parser::ParserError; use itertools::Itertools; use crate::relational::Table; +use crate::AsyncPgConnection; use super::{Column, SqlName}; @@ -270,9 +272,9 @@ impl Rollup { }) } - pub(crate) fn insert( + pub(crate) async fn insert( &self, - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, bucket: &Range, block: BlockNumber, ) -> Result { @@ -280,12 +282,12 @@ impl Rollup { .bind::(bucket.start) .bind::(bucket.end) .bind::(block); - query.execute(conn) + query.execute(conn).await } - pub(crate) fn last_rollup( + pub(crate) async fn last_rollup( rollups: &[Rollup], - conn: &mut PgConnection, + conn: &mut AsyncPgConnection, ) -> Result, StoreError> { #[derive(QueryableByName)] #[diesel(check_for_backend(diesel::pg::Pg))] @@ -305,6 +307,7 @@ impl Rollup { let query = format!("select max(last_rollup) as last_rollup from ({union_all}) as a"); let last_rollup = sql_query(&query) .get_result::(conn) + .await .map(|res| res.last_rollup)?; Ok(last_rollup) } diff --git a/store/postgres/src/retry.rs b/store/postgres/src/retry.rs index d19df52a69b..2b6f3f73d75 100644 --- a/store/postgres/src/retry.rs +++ b/store/postgres/src/retry.rs @@ -22,34 +22,7 @@ fn log_backoff_warning(logger: &Logger, op: &str, backoff: &ExponentialBackoff) /// Run `f` with exponential backoff until it succeeds or it produces an /// error other than `DatabaseUnavailable`. In other words, keep retrying /// `f` until the database is available. -/// -/// Do not use this from an async context since it will block the current -/// thread. Use `forever_async` instead -pub(crate) fn forever(logger: &Logger, op: &str, f: F) -> Result -where - F: Fn() -> Result, -{ - let mut backoff = ExponentialBackoff::new(BACKOFF_BASE, BACKOFF_CEIL); - loop { - match f() { - Ok(v) => return Ok(v), - Err(StoreError::DatabaseUnavailable) => { - log_backoff_warning(logger, op, &backoff); - } - Err(e) => return Err(e), - } - backoff.sleep(); - } -} - -/// Run `f` with exponential backoff until it succeeds or it produces an -/// error other than `DatabaseUnavailable`. In other words, keep retrying -/// `f` until the database is available. -pub(crate) async fn forever_async( - logger: &Logger, - op: &str, - f: F, -) -> Result +pub(crate) async fn forever(logger: &Logger, op: &str, f: F) -> Result where F: Fn() -> Fut, Fut: std::future::Future>, diff --git a/store/postgres/src/store.rs b/store/postgres/src/store.rs index bda5b2da136..3b373fea035 100644 --- a/store/postgres/src/store.rs +++ b/store/postgres/src/store.rs @@ -32,11 +32,11 @@ use crate::{block_store::BlockStore, query_store::QueryStore, SubgraphStore}; #[derive(Clone)] pub struct Store { subgraph_store: Arc, - block_store: Arc, + block_store: BlockStore, } impl Store { - pub fn new(subgraph_store: Arc, block_store: Arc) -> Self { + pub fn new(subgraph_store: Arc, block_store: BlockStore) -> Self { Self { subgraph_store, block_store, @@ -47,7 +47,7 @@ impl Store { self.subgraph_store.cheap_clone() } - pub fn block_store(&self) -> Arc { + pub fn block_store(&self) -> BlockStore { self.block_store.cheap_clone() } } @@ -60,7 +60,7 @@ impl StoreTrait for Store { self.subgraph_store.cheap_clone() } - fn block_store(&self) -> Arc { + fn block_store(&self) -> Self::BlockStore { self.block_store.cheap_clone() } } @@ -78,15 +78,13 @@ impl QueryStoreManager for Store { let api_version = target.get_version(); let target = target.clone(); let (store, site, replica) = graph::spawn_blocking_allow_panic(move || { - store - .replica_for_query(target.clone()) - .map_err(|e| e.into()) + graph::block_on(store.replica_for_query(target.clone())).map_err(|e| e.into()) }) .await .map_err(|e| QueryExecutionError::Panic(e.to_string())) .and_then(|x| x)?; - let chain_store = self.block_store.chain_store(&site.network).ok_or_else(|| { + let chain_store = self.block_store.chain_store(&site.network).await.ok_or_else(|| { internal_error!( "Subgraphs index a known network, but {} indexes `{}` which we do not know about. This is most likely a configuration error.", site.deployment, @@ -106,9 +104,9 @@ impl QueryStoreManager for Store { #[async_trait] impl StatusStore for Store { - fn status(&self, filter: status::Filter) -> Result, StoreError> { - let mut infos = self.subgraph_store.status(filter)?; - let ptrs = self.block_store.chain_head_pointers()?; + async fn status(&self, filter: status::Filter) -> Result, StoreError> { + let mut infos = self.subgraph_store.status(filter).await?; + let ptrs = self.block_store.chain_head_pointers().await?; for info in &mut infos { for chain in &mut info.chains { chain.chain_head_block = ptrs.get(&chain.network).map(|ptr| ptr.clone().into()); @@ -117,27 +115,30 @@ impl StatusStore for Store { Ok(infos) } - fn version_info(&self, version_id: &str) -> Result { - let mut info = self.subgraph_store.version_info(version_id)?; + async fn version_info(&self, version_id: &str) -> Result { + let mut info = self.subgraph_store.version_info(version_id).await?; - info.total_ethereum_blocks_count = self.block_store.chain_head_block(&info.network)?; + info.total_ethereum_blocks_count = self.block_store.chain_head_block(&info.network).await?; Ok(info) } - fn versions_for_subgraph_id( + async fn versions_for_subgraph_id( &self, subgraph_id: &str, ) -> Result<(Option, Option), StoreError> { - self.subgraph_store.versions_for_subgraph_id(subgraph_id) + self.subgraph_store + .versions_for_subgraph_id(subgraph_id) + .await } - fn subgraphs_for_deployment_hash( + async fn subgraphs_for_deployment_hash( &self, deployment_hash: &str, ) -> Result, StoreError> { self.subgraph_store .subgraphs_for_deployment_hash(deployment_hash) + .await } async fn get_proof_of_indexing( diff --git a/store/postgres/src/store_events.rs b/store/postgres/src/store_events.rs index 300022d200e..5c7b8dfd845 100644 --- a/store/postgres/src/store_events.rs +++ b/store/postgres/src/store_events.rs @@ -2,10 +2,10 @@ use graph::futures01::Stream; use graph::futures03::compat::Stream01CompatExt; use graph::futures03::stream::StreamExt; use graph::futures03::TryStreamExt; -use graph::tokio_stream::wrappers::ReceiverStream; use std::sync::{atomic::Ordering, Arc, RwLock}; use std::{collections::HashMap, sync::atomic::AtomicUsize}; use tokio::sync::mpsc::{channel, Sender}; +use tokio_stream::wrappers::ReceiverStream; use crate::notification_listener::{NotificationListener, SafeChannelName}; use graph::components::store::SubscriptionManager as SubscriptionManagerTrait; diff --git a/store/postgres/src/subgraph_store.rs b/store/postgres/src/subgraph_store.rs index 7f5993735c2..396aee2e595 100644 --- a/store/postgres/src/subgraph_store.rs +++ b/store/postgres/src/subgraph_store.rs @@ -1,9 +1,11 @@ +use async_trait::async_trait; use diesel::{ deserialize::FromSql, pg::Pg, serialize::{Output, ToSql}, sql_types::{self, Text}, }; +use diesel_async::scoped_futures::ScopedFutureExt; use std::fmt; use std::{ collections::{BTreeMap, HashMap}, @@ -11,7 +13,6 @@ use std::{ }; use std::{iter::FromIterator, time::Duration}; -use graph::futures03::future::join_all; use graph::{ cheap_clone::CheapClone, components::{ @@ -24,17 +25,18 @@ use graph::{ data::query::QueryTarget, data::subgraph::{schema::DeploymentCreate, status, DeploymentFeatures}, internal_error, + prelude::StoreEvent, prelude::{ anyhow, lazy_static, o, web3::types::Address, ApiVersion, BlockNumber, BlockPtr, ChainStore, DeploymentHash, EntityOperation, Logger, MetricsRegistry, NodeId, PartialBlockPtr, StoreError, SubgraphDeploymentEntity, SubgraphName, SubgraphStore as SubgraphStoreTrait, SubgraphVersionSwitchingMode, }, - prelude::{CancelableError, StoreEvent}, schema::{ApiSchema, InputSchema}, url::Url, util::timed_cache::TimedCache, }; +use graph::{derive::CheapClone, futures03::future::join_all}; use crate::{ deployment::{OnSync, SubgraphHealth}, @@ -201,13 +203,9 @@ pub mod unused { /// shards. The actual work is done by code in the `primary` module for /// queries against the primary store, and by the `DeploymentStore` for /// access to deployment data and metadata. -#[derive(Clone)] +#[derive(Clone, CheapClone)] pub struct SubgraphStore { - inner: Arc, - /// Base URL for the GraphQL endpoint from which - /// subgraph forks will fetch entities. - /// Example: https://api.thegraph.com/subgraphs/ - fork_base: Option, + inner: Arc, } impl SubgraphStore { @@ -234,10 +232,9 @@ impl SubgraphStore { registry: Arc, ) -> Self { Self { - inner: Arc::new(SubgraphStoreInner::new( - logger, stores, placer, sender, registry, + inner: Arc::new(Inner::new( + logger, stores, placer, sender, registry, fork_base, )), - fork_base, } } @@ -254,7 +251,7 @@ impl SubgraphStore { &self, id: &DeploymentHash, block_number: BlockNumber, - block_store: Arc, + block_store: impl BlockStore, fetch_block_ptr: &dyn BlockPtrForNumber, ) -> Result, StoreError> { self.inner @@ -292,7 +289,7 @@ impl SubgraphStore { // Ideally the lower level functions would be asyncified. let this = self.clone(); let site = graph::spawn_blocking_allow_panic(move || -> Result<_, StoreError> { - this.find_site(deployment) + graph::block_on(this.find_site(deployment)) }) .await .unwrap()?; // Propagate panics, there shouldn't be any. @@ -313,17 +310,163 @@ impl SubgraphStore { .insert(deployment, writable.cheap_clone()); Ok(writable) } + + /// Create a new deployment. This requires creating an entry in + /// `deployment_schemas` in the primary, the subgraph schema in another + /// shard, assigning the deployment to a node, and handling any changes + /// to current/pending versions of the subgraph `name` + /// + /// This process needs to modify two databases: the primary and the + /// shard for the subgraph and is therefore not transactional. The code + /// is careful to make sure this process is at least idempotent, so that + /// a failed deployment creation operation can be fixed by deploying + /// again. + async fn create_deployment_internal( + &self, + name: SubgraphName, + schema: &InputSchema, + deployment: DeploymentCreate, + node_id: NodeId, + network_name: String, + mode: SubgraphVersionSwitchingMode, + // replace == true is only used in tests; for non-test code, it must + // be 'false' + replace: bool, + ) -> Result { + #[cfg(not(debug_assertions))] + assert!(!replace); + + self.evict(schema.id())?; + let graft_base = deployment.graft_base.as_ref(); + + let (site, exists, node_id) = { + // We need to deal with two situations: + // (1) We are really creating a new subgraph; it therefore needs + // to go in the shard and onto the node that the placement + // rules dictate + // (2) The deployment has previously been created, and either + // failed partway through, or the deployment rules have + // changed since the last time we created the deployment. + // In that case, we need to use the shard and node + // assignment that we used last time to avoid creating + // the same deployment in another shard + let (shard, node_id) = self.place(&name, &network_name, node_id).await?; + let mut conn = self.primary_conn().await?; + let (site, site_was_created) = conn + .allocate_site(shard, schema.id(), network_name, graft_base) + .await?; + let node_id = conn.assigned_node(&site).await?.unwrap_or(node_id); + (site, !site_was_created, node_id) + }; + let site = Arc::new(site); + + // if the deployment already exists, we don't need to perform any copying + // so we can set graft_base to None + // if it doesn't exist, we need to copy the graft base to the new deployment + let graft_base_layout = if !exists { + let graft_base = match deployment.graft_base.as_ref() { + Some(base) => Some(self.layout(&base).await?), + None => None, + }; + + if let Some(graft_base) = &graft_base { + self.primary_conn() + .await? + .record_active_copy(graft_base.site.as_ref(), site.as_ref()) + .await?; + } + graft_base + } else { + None + }; + + // Create the actual databases schema and metadata entries + let deployment_store = self + .stores + .get(&site.shard) + .ok_or_else(|| StoreError::UnknownShard(site.shard.to_string()))?; + + let index_def = if let Some(graft) = &graft_base.clone() { + if let Some(site) = self.sites.get(graft) { + let store = self + .stores + .get(&site.shard) + .ok_or_else(|| StoreError::UnknownShard(site.shard.to_string()))?; + + Some(store.load_indexes(site).await?) + } else { + None + } + } else { + None + }; + + deployment_store + .create_deployment( + schema, + deployment, + site.clone(), + graft_base_layout, + replace, + OnSync::None, + index_def, + ) + .await?; + + // FIXME: This simultaneously holds a `primary_conn` and a shard connection, which can + // potentially deadlock. + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + let subgraph_store = self.cheap_clone(); + let site = site.cheap_clone(); + async move { + let exists_and_synced = async move |id: &DeploymentHash| { + let (store, _) = subgraph_store.store(id).await?; + store.deployment_exists_and_synced(id).await + }; + + // Create subgraph, subgraph version, and assignment + let changes = pconn + .create_subgraph_version(name, &site, node_id, mode, exists_and_synced) + .await?; + + let event = StoreEvent::new(changes); + pconn.send_store_event(&self.sender, &event).await?; + Ok(()) + } + .scope_boxed() + }) + .await?; + Ok(site.as_ref().into()) + } + + // Only for tests to simplify their handling of test fixtures, so that + // tests can reset the block pointer of a subgraph by recreating it + #[cfg(debug_assertions)] + pub async fn create_deployment_replace( + &self, + name: SubgraphName, + schema: &InputSchema, + deployment: DeploymentCreate, + node_id: NodeId, + network_name: String, + mode: SubgraphVersionSwitchingMode, + ) -> Result { + self.create_deployment_internal(name, schema, deployment, node_id, network_name, mode, true) + .await + } } impl std::ops::Deref for SubgraphStore { - type Target = SubgraphStoreInner; + type Target = Inner; fn deref(&self) -> &Self::Target { &self.inner } } -pub struct SubgraphStoreInner { +pub struct Inner { mirror: PrimaryMirror, stores: HashMap>, /// Cache for the mapping from deployment id to shard/namespace/id. Only @@ -337,9 +480,13 @@ pub struct SubgraphStoreInner { sender: Arc, writables: Mutex>>, registry: Arc, + /// Base URL for the GraphQL endpoint from which + /// subgraph forks will fetch entities. + /// Example: https://api.thegraph.com/subgraphs/ + fork_base: Option, } -impl SubgraphStoreInner { +impl Inner { /// Create a new store for subgraphs that distributes deployments across /// multiple databases /// @@ -360,6 +507,7 @@ impl SubgraphStoreInner { placer: Arc, sender: Arc, registry: Arc, + fork_base: Option, ) -> Self { let primary = stores .iter() @@ -392,7 +540,7 @@ impl SubgraphStoreInner { }, )); let sites = TimedCache::new(SITES_CACHE_TTL); - SubgraphStoreInner { + Inner { mirror, stores, sites, @@ -400,12 +548,13 @@ impl SubgraphStoreInner { sender, writables: Mutex::new(HashMap::new()), registry, + fork_base, } } // Only needed for tests #[cfg(debug_assertions)] - pub(crate) fn clear_caches(&self) { + pub(crate) async fn clear_caches(&self) { for store in self.stores.values() { store.layout_cache.clear(); } @@ -414,8 +563,9 @@ impl SubgraphStoreInner { // Only needed for tests #[cfg(debug_assertions)] - pub fn shard(&self, deployment: &DeploymentLocator) -> Result { + pub async fn shard(&self, deployment: &DeploymentLocator) -> Result { self.find_site(deployment.id.into()) + .await .map(|site| site.shard.clone()) } @@ -426,14 +576,15 @@ impl SubgraphStoreInner { } /// Return the active `Site` for this deployment hash - fn site(&self, id: &DeploymentHash) -> Result, StoreError> { + async fn site(&self, id: &DeploymentHash) -> Result, StoreError> { if let Some(site) = self.sites.get(id) { return Ok(site); } let site = self .mirror - .find_active_site(id)? + .find_active_site(id) + .await? .ok_or_else(|| StoreError::DeploymentNotFound(id.to_string()))?; let site = Arc::new(site); @@ -455,14 +606,15 @@ impl SubgraphStoreInner { Ok(()) } - pub(crate) fn find_site(&self, id: DeploymentId) -> Result, StoreError> { + pub(crate) async fn find_site(&self, id: DeploymentId) -> Result, StoreError> { if let Some(site) = self.sites.find(|site| site.id == id) { return Ok(site); } let site = self .mirror - .find_site_by_ref(id)? + .find_site_by_ref(id) + .await? .ok_or_else(|| StoreError::DeploymentNotFound(id.to_string()))?; let site = Arc::new(site); @@ -472,8 +624,11 @@ impl SubgraphStoreInner { /// Return the store and site for the active deployment of this /// deployment hash - fn store(&self, id: &DeploymentHash) -> Result<(&Arc, Arc), StoreError> { - let site = self.site(id)?; + async fn store( + &self, + id: &DeploymentHash, + ) -> Result<(&Arc, Arc), StoreError> { + let site = self.site(id).await?; let store = self .stores .get(&site.shard) @@ -487,12 +642,12 @@ impl SubgraphStoreInner { .ok_or_else(|| StoreError::UnknownShard(site.shard.to_string())) } - pub(crate) fn layout(&self, id: &DeploymentHash) -> Result, StoreError> { - let (store, site) = self.store(id)?; - store.find_layout(site) + pub(crate) async fn layout(&self, id: &DeploymentHash) -> Result, StoreError> { + let (store, site) = self.store(id).await?; + store.find_layout(site).await } - fn place_on_node( + async fn place_on_node( &self, mut nodes: Vec, default_node: NodeId, @@ -504,30 +659,30 @@ impl SubgraphStoreInner { } 1 => Ok(nodes.pop().unwrap()), _ => { - let mut conn = self.primary_conn()?; + let mut conn = self.primary_conn().await?; // unwrap is fine since nodes is not empty - let node = conn.least_assigned_node(&nodes)?.unwrap(); + let node = conn.least_assigned_node(&nodes).await?.unwrap(); Ok(node) } } } - fn place_in_shard(&self, mut shards: Vec) -> Result { + async fn place_in_shard(&self, mut shards: Vec) -> Result { match shards.len() { 0 => Ok(PRIMARY_SHARD.clone()), 1 => Ok(shards.pop().unwrap()), _ => { - let mut conn = self.primary_conn()?; + let mut conn = self.primary_conn().await?; // unwrap is fine since shards is not empty - let shard = conn.least_used_shard(&shards)?.unwrap(); + let shard = conn.least_used_shard(&shards).await?.unwrap(); Ok(shard) } } } - fn place( + async fn place( &self, name: &SubgraphName, network_name: &str, @@ -546,134 +701,15 @@ impl SubgraphStoreInner { match placement { None => Ok((PRIMARY_SHARD.clone(), default_node)), Some((shards, nodes)) => { - let node = self.place_on_node(nodes, default_node)?; - let shard = self.place_in_shard(shards)?; + let node = self.place_on_node(nodes, default_node).await?; + let shard = self.place_in_shard(shards).await?; Ok((shard, node)) } } } - /// Create a new deployment. This requires creating an entry in - /// `deployment_schemas` in the primary, the subgraph schema in another - /// shard, assigning the deployment to a node, and handling any changes - /// to current/pending versions of the subgraph `name` - /// - /// This process needs to modify two databases: the primary and the - /// shard for the subgraph and is therefore not transactional. The code - /// is careful to make sure this process is at least idempotent, so that - /// a failed deployment creation operation can be fixed by deploying - /// again. - fn create_deployment_internal( - &self, - name: SubgraphName, - schema: &InputSchema, - deployment: DeploymentCreate, - node_id: NodeId, - network_name: String, - mode: SubgraphVersionSwitchingMode, - // replace == true is only used in tests; for non-test code, it must - // be 'false' - replace: bool, - ) -> Result { - #[cfg(not(debug_assertions))] - assert!(!replace); - - self.evict(schema.id())?; - let graft_base = deployment.graft_base.as_ref(); - - let (site, exists, node_id) = { - // We need to deal with two situations: - // (1) We are really creating a new subgraph; it therefore needs - // to go in the shard and onto the node that the placement - // rules dictate - // (2) The deployment has previously been created, and either - // failed partway through, or the deployment rules have - // changed since the last time we created the deployment. - // In that case, we need to use the shard and node - // assignment that we used last time to avoid creating - // the same deployment in another shard - let (shard, node_id) = self.place(&name, &network_name, node_id)?; - let mut conn = self.primary_conn()?; - let (site, site_was_created) = - conn.allocate_site(shard, schema.id(), network_name, graft_base)?; - let node_id = conn.assigned_node(&site)?.unwrap_or(node_id); - (site, !site_was_created, node_id) - }; - let site = Arc::new(site); - - // if the deployment already exists, we don't need to perform any copying - // so we can set graft_base to None - // if it doesn't exist, we need to copy the graft base to the new deployment - let graft_base_layout = if !exists { - let graft_base = deployment - .graft_base - .as_ref() - .map(|base| self.layout(base)) - .transpose()?; - - if let Some(graft_base) = &graft_base { - self.primary_conn()? - .record_active_copy(graft_base.site.as_ref(), site.as_ref())?; - } - graft_base - } else { - None - }; - - // Create the actual databases schema and metadata entries - let deployment_store = self - .stores - .get(&site.shard) - .ok_or_else(|| StoreError::UnknownShard(site.shard.to_string()))?; - - let index_def = if let Some(graft) = &graft_base.clone() { - if let Some(site) = self.sites.get(graft) { - let store = self - .stores - .get(&site.shard) - .ok_or_else(|| StoreError::UnknownShard(site.shard.to_string()))?; - - Some(store.load_indexes(site)?) - } else { - None - } - } else { - None - }; - - deployment_store.create_deployment( - schema, - deployment, - site.clone(), - graft_base_layout, - replace, - OnSync::None, - index_def, - )?; - - let exists_and_synced = |id: &DeploymentHash| { - let (store, _) = self.store(id)?; - store.deployment_exists_and_synced(id) - }; - - // FIXME: This simultaneously holds a `primary_conn` and a shard connection, which can - // potentially deadlock. - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - // Create subgraph, subgraph version, and assignment - let changes = - pconn.create_subgraph_version(name, &site, node_id, mode, exists_and_synced)?; - - let event = StoreEvent::new(changes); - pconn.send_store_event(&self.sender, &event)?; - Ok(()) - })?; - Ok(site.as_ref().into()) - } - - pub fn copy_deployment( + pub async fn copy_deployment( &self, src: &DeploymentLocator, shard: Shard, @@ -681,11 +717,16 @@ impl SubgraphStoreInner { block: BlockPtr, on_sync: OnSync, ) -> Result { - let src = self.find_site(src.id.into())?; + let src = self.find_site(src.id.into()).await?; let src_store = self.for_site(src.as_ref())?; let src_loc = DeploymentLocator::from(src.as_ref()); - let src_layout = src_store.find_layout(src.cheap_clone())?; - let dst = Arc::new(self.primary_conn()?.copy_site(&src, shard.clone())?); + let src_layout = src_store.find_layout(src.cheap_clone()).await?; + let dst = Arc::new( + self.primary_conn() + .await? + .copy_site(&src, shard.clone()) + .await?, + ); let dst_loc = DeploymentLocator::from(dst.as_ref()); if src.id == dst.id { @@ -697,15 +738,15 @@ impl SubgraphStoreInner { // The very last thing we do when we set up a copy here is assign it // to a node. Therefore, if `dst` is already assigned, this function // should not have been called. - if let Some(node) = self.mirror.assigned_node(dst.as_ref())? { + if let Some(node) = self.mirror.assigned_node(dst.as_ref()).await? { return Err(StoreError::Unknown(anyhow!( "can not copy into deployment {} since it is already assigned to node `{}`", dst_loc, node ))); } - let deployment = src_store.load_deployment(src.clone())?; - let index_def = src_store.load_indexes(src.clone())?; + let deployment = src_store.load_deployment(src.clone()).await?; + let index_def = src_store.load_indexes(src.clone()).await?; // Transmogrify the deployment into a new one let deployment = DeploymentCreate { @@ -717,10 +758,12 @@ impl SubgraphStoreInner { history_blocks_override: None, }; - let graft_base = self.layout(&src.deployment)?; + let graft_base = self.layout(&src.deployment).await?; - self.primary_conn()? - .record_active_copy(src.as_ref(), dst.as_ref())?; + self.primary_conn() + .await? + .record_active_copy(src.as_ref(), dst.as_ref()) + .await?; // Create the actual databases schema and metadata entries let deployment_store = self @@ -728,96 +771,70 @@ impl SubgraphStoreInner { .get(&shard) .ok_or_else(|| StoreError::UnknownShard(shard.to_string()))?; - deployment_store.create_deployment( - &src_layout.input_schema, - deployment, - dst.clone(), - Some(graft_base), - false, - on_sync, - Some(index_def), - )?; - - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - // Create subgraph, subgraph version, and assignment. We use the - // existence of an assignment as a signal that we already set up - // the copy - let changes = pconn.assign_subgraph(dst.as_ref(), &node)?; - let event = StoreEvent::new(changes); - pconn.send_store_event(&self.sender, &event)?; - Ok(()) - })?; + deployment_store + .create_deployment( + &src_layout.input_schema, + deployment, + dst.clone(), + Some(graft_base), + false, + on_sync, + Some(index_def), + ) + .await?; + + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + // Create subgraph, subgraph version, and assignment. We use the + // existence of an assignment as a signal that we already set up + // the copy + let changes = pconn.assign_subgraph(dst.as_ref(), &node).await?; + let event = StoreEvent::new(changes); + pconn.send_store_event(&self.sender, &event).await?; + Ok(()) + } + .scope_boxed() + }) + .await?; Ok(dst.as_ref().into()) } /// Mark `deployment` as the only active deployment amongst all sites /// with the same deployment hash. Activating this specific deployment /// will make queries use that instead of whatever was active before - pub fn activate(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { - self.primary_conn()?.activate(deployment)?; + pub async fn activate(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { + self.primary_conn().await?.activate(deployment).await?; // As a side-effect, this will update the `self.sites` cache with // the new active site - self.find_site(deployment.id.into())?; + self.find_site(deployment.id.into()).await?; Ok(()) } - // Only for tests to simplify their handling of test fixtures, so that - // tests can reset the block pointer of a subgraph by recreating it - #[cfg(debug_assertions)] - pub fn create_deployment_replace( - &self, - name: SubgraphName, - schema: &InputSchema, - deployment: DeploymentCreate, - node_id: NodeId, - network_name: String, - mode: SubgraphVersionSwitchingMode, - ) -> Result { - self.create_deployment_internal(name, schema, deployment, node_id, network_name, mode, true) - } - - pub(crate) fn send_store_event(&self, event: &StoreEvent) -> Result<(), StoreError> { - let mut conn = self.primary_conn()?; - conn.send_store_event(&self.sender, event) - } - /// Get a connection to the primary shard. Code must never hold one of these /// connections while also accessing a `DeploymentStore`, since both /// might draw connections from the same pool, and trying to get two /// connections can deadlock the entire process if the pool runs out /// of connections in between getting the first one and trying to get the /// second one. - pub(crate) fn primary_conn(&self) -> Result, StoreError> { - let conn = self.mirror.primary().get()?; + pub(crate) async fn primary_conn(&self) -> Result { + let conn = self.mirror.primary().get().await?; Ok(primary::Connection::new(conn)) } - pub(crate) async fn with_primary_conn( - &self, - f: impl 'static - + Send - + FnOnce(&mut primary::Connection) -> Result>, - ) -> Result { - let pool = self.mirror.primary(); - pool.with_conn(move |pg_conn, _| { - let mut conn = primary::Connection::new(pg_conn); - f(&mut conn) - }) - .await - } - - pub(crate) fn replica_for_query( + pub(crate) async fn replica_for_query( &self, target: QueryTarget, ) -> Result<(Arc, Arc, ReplicaId), StoreError> { let id = match target { - QueryTarget::Name(name, _) => self.mirror.current_deployment_for_subgraph(&name)?, + QueryTarget::Name(name, _) => { + self.mirror.current_deployment_for_subgraph(&name).await? + } QueryTarget::Deployment(id, _) => id, }; - let (store, site) = self.store(&id)?; + let (store, site) = self.store(&id).await?; let replica = store.replica_for_query()?; Ok((store.clone(), site, replica)) @@ -827,20 +844,20 @@ impl SubgraphStoreInner { /// and should never be called from any other code. Unfortunately, Rust makes /// it very hard to export items just for testing #[cfg(debug_assertions)] - pub fn delete_all_entities_for_test_use_only(&self) -> Result<(), StoreError> { - let mut pconn = self.primary_conn()?; - let schemas = pconn.sites()?; + pub async fn remove_all_subgraphs_for_test_use_only(&self) -> Result<(), StoreError> { + let mut pconn = self.primary_conn().await?; + let schemas = pconn.sites().await?; // Delete all subgraph schemas for schema in schemas { - let (store, _) = self.store(&schema.deployment)?; - store.drop_deployment_schema(&schema.namespace)?; + let (store, _) = self.store(&schema.deployment).await?; + store.drop_deployment_schema(&schema.namespace).await?; } for store in self.stores.values() { - store.drop_all_metadata()?; + store.drop_all_metadata().await?; } - self.clear_caches(); + self.clear_caches().await; Ok(()) } @@ -866,8 +883,12 @@ impl SubgraphStoreInner { /// Look for new unused deployments and add them to the `unused_deployments` /// table - pub fn record_unused_deployments(&self) -> Result, StoreError> { - let deployments = self.primary_conn()?.detect_unused_deployments()?; + pub async fn record_unused_deployments(&self) -> Result, StoreError> { + let deployments = self + .primary_conn() + .await? + .detect_unused_deployments() + .await?; // deployments_by_shard takes an empty vec to mean 'give me everything', // so we short-circuit that here @@ -887,69 +908,87 @@ impl SubgraphStoreInner { .into_iter() .map(|site| site.deployment.to_string()) .collect(); - details.extend(store.deployment_details(ids)?); + details.extend(store.deployment_details(ids).await?); } - self.primary_conn()?.update_unused_deployments(&details)?; + self.primary_conn() + .await? + .update_unused_deployments(&details) + .await?; Ok(details) } - pub fn list_unused_deployments( + pub async fn list_unused_deployments( &self, filter: unused::Filter, ) -> Result, StoreError> { - self.primary_conn()?.list_unused_deployments(filter) + self.primary_conn() + .await? + .list_unused_deployments(filter) + .await } /// Remove a deployment, i.e., all its data and metadata. This is only permissible /// if the deployment is unused in the sense that it is neither the current nor /// pending version of any subgraph, and is not currently assigned to any node - pub fn remove_deployment(&self, id: DeploymentId) -> Result<(), StoreError> { - let site = self.find_site(id)?; + pub async fn remove_deployment(&self, id: DeploymentId) -> Result<(), StoreError> { + let site = self.find_site(id).await?; let store = self.for_site(site.as_ref())?; // Check that deployment is not assigned - let mut removable = self.mirror.assigned_node(site.as_ref())?.is_none(); + let mut removable = self.mirror.assigned_node(site.as_ref()).await?.is_none(); // Check that it is not current/pending for any subgraph if it is // the active deployment of that subgraph if site.active && !self - .primary_conn()? - .subgraphs_using_deployment(site.as_ref())? + .primary_conn() + .await? + .subgraphs_using_deployment(site.as_ref()) + .await? .is_empty() { removable = false; } if removable { - store.drop_deployment(&site)?; + store.drop_deployment(&site).await?; - self.primary_conn()?.drop_site(site.as_ref())?; + self.primary_conn().await?.drop_site(site.as_ref()).await?; } else { - self.primary_conn()? - .unused_deployment_is_used(site.as_ref())?; + self.primary_conn() + .await? + .unused_deployment_is_used(site.as_ref()) + .await?; } Ok(()) } - pub fn status_for_id(&self, id: graph::components::store::DeploymentId) -> status::Info { + pub async fn status_for_id(&self, id: graph::components::store::DeploymentId) -> status::Info { let filter = status::Filter::DeploymentIds(vec![id]); - self.status(filter).unwrap().into_iter().next().unwrap() + self.status(filter) + .await + .unwrap() + .into_iter() + .next() + .unwrap() } - pub(crate) fn status(&self, filter: status::Filter) -> Result, StoreError> { + pub(crate) async fn status( + &self, + filter: status::Filter, + ) -> Result, StoreError> { let sites = match filter { status::Filter::SubgraphName(name) => { - let deployments = self.mirror.deployments_for_subgraph(&name)?; + let deployments = self.mirror.deployments_for_subgraph(&name).await?; if deployments.is_empty() { return Ok(Vec::new()); } deployments } status::Filter::SubgraphVersion(name, use_current) => { - let deployment = self.mirror.subgraph_version(&name, use_current)?; + let deployment = self.mirror.subgraph_version(&name, use_current).await?; match deployment { Some(deployment) => vec![deployment], None => { @@ -958,11 +997,11 @@ impl SubgraphStoreInner { } } status::Filter::Deployments(deployments) => { - self.mirror.find_sites(&deployments, true)? + self.mirror.find_sites(&deployments, true).await? } status::Filter::DeploymentIds(ids) => { let ids: Vec<_> = ids.into_iter().map(|id| id.into()).collect(); - self.mirror.find_sites_by_id(&ids)? + self.mirror.find_sites_by_id(&ids).await? } }; @@ -975,18 +1014,23 @@ impl SubgraphStoreInner { .stores .get(&shard) .ok_or_else(|| StoreError::UnknownShard(shard.to_string()))?; - infos.extend(store.deployment_statuses(&sites)?); + infos.extend(store.deployment_statuses(&sites).await?); + } + let nodes = self.mirror.fill_assignments(&mut infos).await?; + for info in infos.iter_mut() { + info.node = nodes.get(&info.id).map(|(node, _)| node.clone()); + info.paused = nodes.get(&info.id).map(|(_, paused)| *paused); } - self.mirror.fill_assignments(&mut infos)?; + Ok(infos) } - pub(crate) fn version_info(&self, version: &str) -> Result { - if let Some((deployment_id, created_at)) = self.mirror.version_info(version)? { + pub(crate) async fn version_info(&self, version: &str) -> Result { + if let Some((deployment_id, created_at)) = self.mirror.version_info(version).await? { let id = DeploymentHash::new(deployment_id.clone()) .map_err(|id| internal_error!("illegal deployment id {}", id))?; - let (store, site) = self.store(&id)?; - let statuses = store.deployment_statuses(&[site.clone()])?; + let (store, site) = self.store(&id).await?; + let statuses = store.deployment_statuses(&[site.clone()]).await?; let status = statuses .first() .ok_or_else(|| StoreError::DeploymentNotFound(deployment_id.clone()))?; @@ -996,8 +1040,8 @@ impl SubgraphStoreInner { .ok_or_else(|| internal_error!("no chain info for {}", deployment_id))?; let latest_ethereum_block_number = chain.latest_block.as_ref().map(|block| block.number()); - let subgraph_info = store.subgraph_info(site.cheap_clone())?; - let layout = store.find_layout(site.cheap_clone())?; + let subgraph_info = store.subgraph_info(site.cheap_clone()).await?; + let layout = store.find_layout(site.cheap_clone()).await?; let network = site.network.clone(); let info = VersionInfo { @@ -1018,24 +1062,26 @@ impl SubgraphStoreInner { } } - pub(crate) fn versions_for_subgraph_id( + pub(crate) async fn versions_for_subgraph_id( &self, subgraph_id: &str, ) -> Result<(Option, Option), StoreError> { - self.mirror.versions_for_subgraph_id(subgraph_id) + self.mirror.versions_for_subgraph_id(subgraph_id).await } - pub(crate) fn subgraphs_for_deployment_hash( + pub(crate) async fn subgraphs_for_deployment_hash( &self, deployment_hash: &str, ) -> Result, StoreError> { - self.mirror.subgraphs_by_deployment_hash(deployment_hash) + self.mirror + .subgraphs_by_deployment_hash(deployment_hash) + .await } #[cfg(debug_assertions)] - pub fn error_count(&self, id: &DeploymentHash) -> Result { - let (store, _) = self.store(id)?; - store.error_count(id) + pub async fn error_count(&self, id: &DeploymentHash) -> Result { + let (store, _) = self.store(id).await?; + store.error_count(id).await } /// Vacuum the `head` and `deployment` table in each shard @@ -1043,14 +1089,22 @@ impl SubgraphStoreInner { join_all(self.stores.values().map(|store| store.vacuum())).await } - pub fn rewind(&self, id: DeploymentHash, block_ptr_to: BlockPtr) -> Result<(), StoreError> { - let (store, site) = self.store(&id)?; - store.rewind(site, block_ptr_to) + pub async fn rewind( + &self, + id: DeploymentHash, + block_ptr_to: BlockPtr, + ) -> Result<(), StoreError> { + let (store, site) = self.store(&id).await?; + store.rewind(site, block_ptr_to).await } - pub fn truncate(&self, id: DeploymentHash, block_ptr_to: BlockPtr) -> Result<(), StoreError> { - let (store, site) = self.store(&id)?; - store.truncate(site, block_ptr_to) + pub async fn truncate( + &self, + id: DeploymentHash, + block_ptr_to: BlockPtr, + ) -> Result<(), StoreError> { + let (store, site) = self.store(&id).await?; + store.truncate(site, block_ptr_to).await } pub(crate) async fn get_proof_of_indexing( @@ -1059,7 +1113,7 @@ impl SubgraphStoreInner { indexer: &Option
, block: BlockPtr, ) -> Result, StoreError> { - let (store, site) = self.store(id)?; + let (store, site) = self.store(id).await?; store.get_proof_of_indexing(site, indexer, block).await } @@ -1067,17 +1121,19 @@ impl SubgraphStoreInner { &self, id: &DeploymentHash, block_number: BlockNumber, - block_store: Arc, + block_store: impl BlockStore, fetch_block_ptr: &dyn BlockPtrForNumber, ) -> Result, StoreError> { - let (store, site) = self.store(id)?; + let (store, site) = self.store(id).await?; let block_hash = { - let chain_store = match block_store.chain_store(&site.network) { + let chain_store = match block_store.chain_store(&site.network).await { Some(chain_store) => chain_store, None => return Ok(None), }; - let mut hashes = chain_store.block_hashes_by_block_number(block_number)?; + let mut hashes = chain_store + .block_hashes_by_block_number(block_number) + .await?; // If we have multiple versions of this block using any of them could introduce // non-determinism because we don't know which one is the right one @@ -1115,22 +1171,23 @@ impl SubgraphStoreInner { // Only used by tests #[cfg(debug_assertions)] - pub fn find( + pub async fn find( &self, query: graph::prelude::EntityQuery, ) -> Result, graph::prelude::QueryExecutionError> { - let (store, site) = self.store(&query.subgraph_id)?; - store.find(site, query) + let (store, site) = self.store(&query.subgraph_id).await?; + store.find(site, query).await } - pub fn locate_in_shard( + pub async fn locate_in_shard( &self, hash: &DeploymentHash, shard: Shard, ) -> Result, StoreError> { Ok(self .mirror - .find_site_in_shard(hash, &shard)? + .find_site_in_shard(hash, &shard) + .await? .as_ref() .map(|site| site.into())) } @@ -1153,39 +1210,39 @@ impl SubgraphStoreInner { .await; } - pub fn analyze( + pub async fn analyze( &self, deployment: &DeploymentLocator, entity_name: Option<&str>, ) -> Result<(), StoreError> { - let (store, site) = self.store(&deployment.hash)?; - store.analyze(site, entity_name) + let (store, site) = self.store(&deployment.hash).await?; + store.analyze(site, entity_name).await } /// Return the statistics targets for all tables of `deployment`. The /// first return value is the default target, and the second value maps /// the name of each table to a map of column name to its statistics /// target. A value of `-1` means that the global default will be used. - pub fn stats_targets( + pub async fn stats_targets( &self, deployment: &DeploymentLocator, ) -> Result<(i32, BTreeMap>), StoreError> { - let (store, site) = self.store(&deployment.hash)?; - store.stats_targets(site) + let (store, site) = self.store(&deployment.hash).await?; + store.stats_targets(site).await } /// Set the statistics target for columns `columns` in `deployment`. If /// `entity` is `Some`, only set it for the table for that entity, if it /// is `None`, set it for all tables in the deployment. - pub fn set_stats_target( + pub async fn set_stats_target( &self, deployment: &DeploymentLocator, entity: Option<&str>, columns: Vec, target: i32, ) -> Result<(), StoreError> { - let (store, site) = self.store(&deployment.hash)?; - store.set_stats_target(site, entity, columns, target) + let (store, site) = self.store(&deployment.hash).await?; + store.set_stats_target(site, entity, columns, target).await } pub async fn create_manual_index( @@ -1196,7 +1253,7 @@ impl SubgraphStoreInner { index_method: Method, after: Option, ) -> Result<(), StoreError> { - let (store, site) = self.store(&deployment.hash)?; + let (store, site) = self.store(&deployment.hash).await?; store .create_manual_index(site, entity_name, field_names, index_method, after) .await @@ -1207,7 +1264,7 @@ impl SubgraphStoreInner { deployment: &DeploymentLocator, entity_name: &str, ) -> Result, StoreError> { - let (store, site) = self.store(&deployment.hash)?; + let (store, site) = self.store(&deployment.hash).await?; store.indexes_for_entity(site, entity_name).await } @@ -1216,7 +1273,7 @@ impl SubgraphStoreInner { deployment: &DeploymentLocator, index_name: &str, ) -> Result<(), StoreError> { - let (store, site) = self.store(&deployment.hash)?; + let (store, site) = self.store(&deployment.hash).await?; store.drop_index(site, index_name).await } @@ -1226,7 +1283,7 @@ impl SubgraphStoreInner { table: &str, is_account_like: bool, ) -> Result<(), StoreError> { - let (store, site) = self.store(&deployment.hash)?; + let (store, site) = self.store(&deployment.hash).await?; store.set_account_like(site, table, is_account_like).await } @@ -1245,7 +1302,7 @@ impl SubgraphStoreInner { ) -> Result, StoreError> { // Find the store by the deployment id; otherwise, we could only // prune the active copy of the deployment with `deployment.hash` - let site = self.find_site(deployment.id.into())?; + let site = self.find_site(deployment.id.into()).await?; let store = self.for_site(&site)?; store.prune(reporter, site, req).await @@ -1255,41 +1312,46 @@ impl SubgraphStoreInner { &self, deployment: &DeploymentLocator, ) -> Result { - let site = self.find_site(deployment.id.into())?; + let site = self.find_site(deployment.id.into()).await?; let store = self.for_site(&site)?; store.prune_viewer(site).await } - pub fn set_history_blocks( + pub async fn set_history_blocks( &self, deployment: &DeploymentLocator, history_blocks: BlockNumber, reorg_threshold: BlockNumber, ) -> Result<(), StoreError> { - let site = self.find_site(deployment.id.into())?; + let site = self.find_site(deployment.id.into()).await?; let store = self.for_site(&site)?; - store.set_history_blocks(&site, history_blocks, reorg_threshold) + store + .set_history_blocks(&site, history_blocks, reorg_threshold) + .await } - pub fn load_deployment(&self, site: Arc) -> Result { + pub async fn load_deployment( + &self, + site: Arc, + ) -> Result { let src_store = self.for_site(&site)?; - src_store.load_deployment(site) + src_store.load_deployment(site).await } - pub fn load_deployment_by_id( + pub async fn load_deployment_by_id( &self, id: DeploymentId, ) -> Result { - let site = self.find_site(id)?; + let site = self.find_site(id).await?; let src_store = self.for_site(&site)?; - src_store.load_deployment(site) + src_store.load_deployment(site).await } - pub fn load_indexes(&self, site: Arc) -> Result { + pub async fn load_indexes(&self, site: Arc) -> Result { let src_store = self.for_site(&site)?; - src_store.load_indexes(site) + src_store.load_indexes(site).await } } @@ -1318,19 +1380,20 @@ impl EnsLookup { } } - fn is_table_empty(pool: &ConnectionPool) -> Result { - let conn = pool.get()?; - primary::Connection::new(conn).is_ens_table_empty() + async fn is_table_empty(pool: &ConnectionPool) -> Result { + let conn = pool.get().await?; + primary::Connection::new(conn).is_ens_table_empty().await } } +#[async_trait] impl EnsLookupTrait for EnsLookup { - fn find_name(&self, hash: &str) -> Result, StoreError> { - let conn = self.primary.get()?; - primary::Connection::new(conn).find_ens_name(hash) + async fn find_name(&self, hash: &str) -> Result, StoreError> { + let conn = self.primary.get().await?; + primary::Connection::new(conn).find_ens_name(hash).await } - fn is_table_empty(&self) -> Result { + async fn is_table_empty(&self) -> Result { match self.state.load(std::sync::atomic::Ordering::SeqCst) { STATE_ENS_NOT_CHECKED => {} STATE_ENS_EMPTY => return Ok(true), @@ -1338,7 +1401,7 @@ impl EnsLookupTrait for EnsLookup { _ => unreachable!("unsupported state"), } - let is_empty = Self::is_table_empty(&self.primary)?; + let is_empty = Self::is_table_empty(&self.primary).await?; let new_state = match is_empty { true => STATE_ENS_EMPTY, false => STATE_ENS_NOT_EMPTY, @@ -1357,7 +1420,7 @@ impl SubgraphStoreTrait for SubgraphStore { } // FIXME: This method should not get a node_id - fn create_subgraph_deployment( + async fn create_subgraph_deployment( &self, name: SubgraphName, schema: &InputSchema, @@ -1375,80 +1438,115 @@ impl SubgraphStoreTrait for SubgraphStore { mode, false, ) + .await } - fn create_subgraph(&self, name: SubgraphName) -> Result { - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| { - let mut pconn = primary::Connection::new(conn); - pconn.create_subgraph(&name) - }) + async fn create_subgraph(&self, name: SubgraphName) -> Result { + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| pconn.create_subgraph(&name).scope_boxed()) + .await } - fn create_subgraph_features(&self, features: DeploymentFeatures) -> Result<(), StoreError> { - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| { - let mut pconn = primary::Connection::new(conn); - pconn.create_subgraph_features(features) - }) + async fn create_subgraph_features( + &self, + features: DeploymentFeatures, + ) -> Result<(), StoreError> { + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| pconn.create_subgraph_features(features).scope_boxed()) + .await } - fn remove_subgraph(&self, name: SubgraphName) -> Result<(), StoreError> { - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.remove_subgraph(name)?; - pconn.send_store_event(&self.sender, &StoreEvent::new(changes)) - }) + async fn remove_subgraph(&self, name: SubgraphName) -> Result<(), StoreError> { + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.remove_subgraph(name).await?; + pconn + .send_store_event(&self.sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await } - fn reassign_subgraph( + async fn reassign_subgraph( &self, deployment: &DeploymentLocator, node_id: &NodeId, ) -> Result<(), StoreError> { - let site = self.find_site(deployment.id.into())?; - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.reassign_subgraph(site.as_ref(), node_id)?; - pconn.send_store_event(&self.sender, &StoreEvent::new(changes)) - }) + let site = self.find_site(deployment.id.into()).await?; + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.reassign_subgraph(site.as_ref(), node_id).await?; + pconn + .send_store_event(&self.sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await } - fn unassign_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { - let site = self.find_site(deployment.id.into())?; - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.unassign_subgraph(site.as_ref())?; - pconn.send_store_event(&self.sender, &StoreEvent::new(changes)) - }) + async fn unassign_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { + let site = self.find_site(deployment.id.into()).await?; + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.unassign_subgraph(site.as_ref()).await?; + pconn + .send_store_event(&self.sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await } - fn pause_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { - let site = self.find_site(deployment.id.into())?; - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.pause_subgraph(site.as_ref())?; - pconn.send_store_event(&self.sender, &StoreEvent::new(changes)) - }) + async fn pause_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { + let site = self.find_site(deployment.id.into()).await?; + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.pause_subgraph(site.as_ref()).await?; + pconn + .send_store_event(&self.sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await } - fn resume_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { - let site = self.find_site(deployment.id.into())?; - let mut pconn = self.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.resume_subgraph(site.as_ref())?; - pconn.send_store_event(&self.sender, &StoreEvent::new(changes)) - }) + async fn resume_subgraph(&self, deployment: &DeploymentLocator) -> Result<(), StoreError> { + let site = self.find_site(deployment.id.into()).await?; + let mut pconn = self.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.resume_subgraph(site.as_ref()).await?; + pconn + .send_store_event(&self.sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await } - fn assigned_node(&self, deployment: &DeploymentLocator) -> Result, StoreError> { - let site = self.find_site(deployment.id.into())?; - self.mirror.assigned_node(site.as_ref()) + async fn assigned_node( + &self, + deployment: &DeploymentLocator, + ) -> Result, StoreError> { + let site = self.find_site(deployment.id.into()).await?; + self.mirror.assigned_node(site.as_ref()).await } /// Returns Option<(node_id,is_paused)> where `node_id` is the node that @@ -1459,16 +1557,10 @@ impl SubgraphStoreTrait for SubgraphStore { &self, deployment: &DeploymentLocator, ) -> Result, StoreError> { - let site = self.find_site(deployment.id.into())?; + let site = self.find_site(deployment.id.into()).await?; self.mirror.assignment_status(site).await } - fn assignments(&self, node: &NodeId) -> Result, StoreError> { - self.mirror - .assignments(node) - .map(|sites| sites.iter().map(|site| site.into()).collect()) - } - async fn active_assignments( &self, node: &NodeId, @@ -1479,8 +1571,8 @@ impl SubgraphStoreTrait for SubgraphStore { .map(|sites| sites.iter().map(|site| site.into()).collect()) } - fn subgraph_exists(&self, name: &SubgraphName) -> Result { - self.mirror.subgraph_exists(name) + async fn subgraph_exists(&self, name: &SubgraphName) -> Result { + self.mirror.subgraph_exists(name).await } async fn subgraph_features( @@ -1488,51 +1580,47 @@ impl SubgraphStoreTrait for SubgraphStore { deployment: &DeploymentHash, ) -> Result, StoreError> { let deployment = deployment.to_string(); - self.with_primary_conn(|conn| { - conn.transaction(|conn| { - let mut pconn = primary::Connection::new(conn); - pconn - .get_subgraph_features(deployment) - .map_err(|e| e.into()) - }) - }) - .await + let mut pconn = self.primary_conn().await?; + pconn + .get_subgraph_features(deployment) + .await + .map_err(|e| e.into()) } - fn entity_changes_in_block( + async fn entity_changes_in_block( &self, subgraph_id: &DeploymentHash, block: BlockNumber, ) -> Result, StoreError> { - let (store, site) = self.store(subgraph_id)?; - let changes = store.get_changes(site, block)?; + let (store, site) = self.store(subgraph_id).await?; + let changes = store.get_changes(site, block).await?; Ok(changes) } - fn input_schema(&self, id: &DeploymentHash) -> Result { - let (store, site) = self.store(id)?; - let layout = store.find_layout(site)?; + async fn input_schema(&self, id: &DeploymentHash) -> Result { + let (store, site) = self.store(id).await?; + let layout = store.find_layout(site).await?; Ok(layout.input_schema.cheap_clone()) } - fn api_schema( + async fn api_schema( &self, id: &DeploymentHash, version: &ApiVersion, ) -> Result, StoreError> { - let (store, site) = self.store(id)?; - let info = store.subgraph_info(site)?; + let (store, site) = self.store(id).await?; + let info = store.subgraph_info(site).await?; Ok(info.api.get(version).unwrap().clone()) } - fn debug_fork( + async fn debug_fork( &self, id: &DeploymentHash, logger: Logger, ) -> Result>, StoreError> { - let (store, site) = self.store(id)?; - let info = store.subgraph_info(site.cheap_clone())?; - let layout = store.find_layout(site)?; + let (store, site) = self.store(id).await?; + let info = store.subgraph_info(site.cheap_clone()).await?; + let layout = store.find_layout(site).await?; let fork_id = info.debug_fork; let schema = layout.input_schema.cheap_clone(); @@ -1563,9 +1651,9 @@ impl SubgraphStoreTrait for SubgraphStore { deployment: graph::components::store::DeploymentId, ) -> Result, StoreError> { let deployment = deployment.into(); - let site = self.find_site(deployment)?; + let site = self.find_site(deployment).await?; let store = self.for_site(&site)?; - let input_schema = self.input_schema(&site.deployment)?; + let input_schema = self.input_schema(&site.deployment).await?; Ok(Arc::new(SourceableStore::new( site, @@ -1586,43 +1674,44 @@ impl SubgraphStoreTrait for SubgraphStore { } } - fn is_deployed(&self, id: &DeploymentHash) -> Result { - match self.site(id) { + async fn is_deployed(&self, id: &DeploymentHash) -> Result { + match self.site(id).await { Ok(_) => Ok(true), Err(StoreError::DeploymentNotFound(_)) => Ok(false), Err(e) => Err(e), } } - fn graft_pending(&self, id: &DeploymentHash) -> Result { - let (store, _) = self.store(id)?; - let graft_detail = store.graft_pending(id)?; + async fn graft_pending(&self, id: &DeploymentHash) -> Result { + let (store, _) = self.store(id).await?; + let graft_detail = store.graft_pending(id).await?; Ok(graft_detail.is_some()) } async fn least_block_ptr(&self, id: &DeploymentHash) -> Result, StoreError> { - let (store, site) = self.store(id)?; + let (store, site) = self.store(id).await?; store.block_ptr(site.cheap_clone()).await } async fn is_healthy(&self, id: &DeploymentHash) -> Result { - let (store, site) = self.store(id)?; + let (store, site) = self.store(id).await?; let health = store.health(&site).await?; Ok(matches!(health, SubgraphHealth::Healthy)) } /// Find the deployment locators for the subgraph with the given hash - fn locators(&self, hash: &str) -> Result, StoreError> { + async fn locators(&self, hash: &str) -> Result, StoreError> { Ok(self .mirror - .find_sites(&[hash.to_string()], false)? + .find_sites(&[hash.to_string()], false) + .await? .iter() .map(|site| site.into()) .collect()) } - fn active_locator(&self, hash: &str) -> Result, StoreError> { - let sites = self.mirror.find_sites(&[hash.to_string()], true)?; + async fn active_locator(&self, hash: &str) -> Result, StoreError> { + let sites = self.mirror.find_sites(&[hash.to_string()], true).await?; if sites.len() > 1 { return Err(internal_error!( "There are {} active deployments for {hash}, there should only be one", @@ -1637,15 +1726,15 @@ impl SubgraphStoreTrait for SubgraphStore { hash: &DeploymentHash, raw_yaml: String, ) -> Result<(), StoreError> { - let (store, site) = self.store(hash)?; + let (store, site) = self.store(hash).await?; store.set_manifest_raw_yaml(site, raw_yaml).await } - fn instrument(&self, deployment: &DeploymentLocator) -> Result { - let site = self.find_site(deployment.id.into())?; + async fn instrument(&self, deployment: &DeploymentLocator) -> Result { + let site = self.find_site(deployment.id.into()).await?; let store = self.for_site(&site)?; - let info = store.subgraph_info(site)?; + let info = store.subgraph_info(site).await?; Ok(info.instrument) } } diff --git a/store/postgres/src/vid_batcher.rs b/store/postgres/src/vid_batcher.rs index feb58787c43..0dea582bbac 100644 --- a/store/postgres/src/vid_batcher.rs +++ b/store/postgres/src/vid_batcher.rs @@ -3,8 +3,8 @@ use std::time::{Duration, Instant}; use diesel::{ sql_query, sql_types::{BigInt, Integer}, - PgConnection, RunQueryDsl as _, }; +use diesel_async::RunQueryDsl as _; use graph::{ env::ENV_VARS, prelude::{BlockNumber, BlockPtr, StoreError}, @@ -15,6 +15,7 @@ use crate::{ catalog, primary::Namespace, relational::{Table, VID_COLUMN}, + AsyncPgConnection, }; /// The initial batch size for tables that do not have an array column @@ -118,13 +119,13 @@ impl VidBatcher { /// The `vid_range` is inclusive, i.e., the batcher will iterate over /// all vids `vid_range.0 <= vid <= vid_range.1`; for an empty table, /// the `vid_range` must be set to `(-1, 0)` - pub fn load( - conn: &mut PgConnection, + pub async fn load( + conn: &mut AsyncPgConnection, nsp: &Namespace, table: &Table, vid_range: VidRange, ) -> Result { - let bounds = catalog::histogram_bounds(conn, nsp, &table.name, VID_COLUMN)?; + let bounds = catalog::histogram_bounds(conn, nsp, &table.name, VID_COLUMN).await?; let batch_size = AdaptiveBatchSize::new(table); Self::new(bounds, vid_range, batch_size) } @@ -209,9 +210,9 @@ impl VidBatcher { /// The function returns the time it took to process the batch and the /// result of `f`. If the batcher is finished, `f` will not be called, /// and `None` will be returned as its result. - pub fn step(&mut self, f: F) -> Result<(Duration, Option), StoreError> + pub async fn step(&mut self, f: F) -> Result<(Duration, Option), StoreError> where - F: FnOnce(i64, i64) -> Result, + F: AsyncFnOnce(i64, i64) -> Result, { if self.finished() { return Ok((Duration::from_secs(0), None)); @@ -222,7 +223,7 @@ impl VidBatcher { Some(ogive) => { self.step_timer.start(); - let res = f(self.start, self.end)?; + let res = f(self.start, self.end).await?; let duration = self.step_timer.elapsed(); let batch_size = self.batch_size.adapt(duration); @@ -274,8 +275,8 @@ impl VidRange { } /// Return the full range of `vid` values in the table `src` - pub fn for_copy( - conn: &mut PgConnection, + pub async fn for_copy( + conn: &mut AsyncPgConnection, src: &Table, target_block: &BlockPtr, ) -> Result { @@ -284,17 +285,20 @@ impl VidRange { } else { "lower(block_range) <= $1" }; - let vid_range = sql_query(format!( - "/* controller=copy,target={target_number} */ \ - select coalesce(min(vid), 0) as min_vid, \ - coalesce(max(vid), -1) as max_vid \ - from {src_name} where {max_block_clause}", - target_number = target_block.number, - src_name = src.qualified_name.as_str(), - max_block_clause = max_block_clause - )) - .bind::(&target_block.number) - .load::(conn)? + let vid_range = diesel_async::RunQueryDsl::load::( + sql_query(format!( + "/* controller=copy,target={target_number} */ \ + select coalesce(min(vid), 0) as min_vid, \ + coalesce(max(vid), -1) as max_vid \ + from {src_name} where {max_block_clause}", + target_number = target_block.number, + src_name = src.qualified_name.as_str(), + max_block_clause = max_block_clause + )) + .bind::(&target_block.number), + conn, + ) + .await? .pop() .unwrap_or(EMPTY_VID_RANGE); Ok(vid_range) @@ -303,8 +307,8 @@ impl VidRange { /// Return the first and last vid of any entity that is visible in the /// block range from `first_block` (inclusive) to `last_block` /// (exclusive) - pub fn for_prune( - conn: &mut PgConnection, + pub async fn for_prune( + conn: &mut AsyncPgConnection, src: &Table, first_block: BlockNumber, last_block: BlockNumber, @@ -322,6 +326,7 @@ impl VidRange { .bind::(first_block) .bind::(last_block) .get_result::(conn) + .await .map_err(StoreError::from) } } @@ -356,11 +361,10 @@ mod tests { assert_eq!(self.vid.batch_size.size, size, "at size"); } - #[track_caller] - fn step(&mut self, start: i64, end: i64, duration: Duration) { + async fn step(&mut self, start: i64, end: i64, duration: Duration) { self.vid.step_timer.set(duration); - match self.vid.step(|s, e| Ok((s, e))).unwrap() { + match self.vid.step(async |s, e| Ok((s, e))).await.unwrap() { (d, Some((s, e))) => { // Failing here indicates that our clever Timer is misbehaving assert_eq!(d, duration, "step duration"); @@ -378,10 +382,9 @@ mod tests { } } - #[track_caller] - fn run(&mut self, start: i64, end: i64, size: i64, duration: Duration) { + async fn run(&mut self, start: i64, end: i64, size: i64, duration: Duration) { self.at(start, end, size); - self.step(start, end, duration); + self.step(start, end, duration).await; } fn finished(&self) -> bool { @@ -400,31 +403,31 @@ mod tests { } } - #[test] - fn simple() { + #[graph::test] + async fn simple() { let bounds = vec![10, 20, 30, 40, 49]; let mut batcher = Batcher::new(bounds, 5); batcher.at(10, 15, 5); - batcher.step(10, 15, S001); + batcher.step(10, 15, S001).await; batcher.at(16, 26, 10); - batcher.step(16, 26, S001); + batcher.step(16, 26, S001).await; batcher.at(27, 46, 20); assert!(!batcher.finished()); - batcher.step(27, 46, S001); + batcher.step(27, 46, S001).await; batcher.at(47, 49, 40); assert!(!batcher.finished()); - batcher.step(47, 49, S001); + batcher.step(47, 49, S001).await; assert!(batcher.finished()); batcher.at(50, 49, 80); } - #[test] - fn non_uniform() { + #[graph::test] + async fn non_uniform() { // A distribution that is flat in the beginning and then steeper and // linear towards the end. The easiest way to see this is to graph // `(bounds[i], i*40)` @@ -433,23 +436,23 @@ mod tests { // The schedule of how we move through the bounds above in batches, // with varying timings for each batch - batcher.run(040, 075, 10, S010); - batcher.run(076, 145, 20, S010); - batcher.run(146, 240, 40, S200); - batcher.run(241, 270, 20, S200); - batcher.run(271, 281, 10, S200); - batcher.run(282, 287, 05, S050); - batcher.run(288, 298, 10, S050); - batcher.run(299, 309, 20, S050); - batcher.run(310, 325, 40, S100); - batcher.run(326, 336, 40, S100); - batcher.run(337, 347, 40, S100); - batcher.run(348, 357, 40, S100); - batcher.run(358, 359, 40, S010); + batcher.run(040, 075, 10, S010).await; + batcher.run(076, 145, 20, S010).await; + batcher.run(146, 240, 40, S200).await; + batcher.run(241, 270, 20, S200).await; + batcher.run(271, 281, 10, S200).await; + batcher.run(282, 287, 05, S050).await; + batcher.run(288, 298, 10, S050).await; + batcher.run(299, 309, 20, S050).await; + batcher.run(310, 325, 40, S100).await; + batcher.run(326, 336, 40, S100).await; + batcher.run(337, 347, 40, S100).await; + batcher.run(348, 357, 40, S100).await; + batcher.run(358, 359, 40, S010).await; assert!(batcher.finished()); batcher.at(360, 359, 80); - batcher.step(360, 359, S010); + batcher.step(360, 359, S010).await; } #[test] @@ -471,8 +474,8 @@ mod tests { assert_eq!(100_000, ogive.end()); } - #[test] - fn vid_batcher_handles_large_vid() { + #[graph::test] + async fn vid_batcher_handles_large_vid() { // An example with very large `vid` values which come from the new // schema of setting the `vid` to `block_num << 32 + sequence_num`. // These values are taken from an actual example subgraph and cuased @@ -556,16 +559,15 @@ mod tests { // Run through the entire `vid_batcher`, collecting start and end in // `steps` - let steps = std::iter::from_fn(|| { - vid_batcher - .step(|start, end| Ok((start, end, end - start))) - .unwrap() - .1 - }) - .fold(Vec::new(), |mut steps, (start, end, step)| { + let mut steps = Vec::new(); + while let Some((start, end, step)) = vid_batcher + .step(async |start, end| Ok((start, end, end - start))) + .await + .unwrap() + .1 + { steps.push((start, end, step)); - steps - }); + } assert_eq!(STEPS, &steps); } diff --git a/store/postgres/src/writable.rs b/store/postgres/src/writable.rs index 9c512e27ae7..74b8c99b6c8 100644 --- a/store/postgres/src/writable.rs +++ b/store/postgres/src/writable.rs @@ -6,6 +6,7 @@ use std::time::Instant; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; +use diesel_async::scoped_futures::ScopedFutureExt; use graph::blockchain::block_stream::{EntitySourceOperation, FirehoseCursor}; use graph::blockchain::BlockTime; use graph::components::store::{Batch, DeploymentCursorTracker, DerivedEntityQuery, ReadStore}; @@ -19,27 +20,27 @@ use graph::prelude::{ }; use graph::schema::{EntityKey, EntityType, InputSchema}; use graph::slog::{debug, info, warn}; -use graph::tokio::select; -use graph::tokio::sync::Notify; -use graph::tokio::task::JoinHandle; use graph::util::bounded_queue::BoundedQueue; use graph::{ cheap_clone::CheapClone, components::store::{self, write::EntityOp, WritableStore as WritableStoreTrait}, data::subgraph::schema::SubgraphError, prelude::{ - BlockPtr, DeploymentHash, EntityModification, Error, Logger, StopwatchMetrics, StoreError, + BlockPtr, DeploymentHash, EntityModification, Logger, StopwatchMetrics, StoreError, StoreEvent, UnfailOutcome, ENV_VARS, }, slog::error, }; use store::StoredDynamicDataSource; +use tokio::select; +use tokio::sync::Notify; +use tokio::task::JoinHandle; use crate::deployment_store::DeploymentStore; use crate::primary::DeploymentId; use crate::relational::index::IndexList; -use crate::retry; use crate::{primary, primary::Site, relational::Layout, SubgraphStore}; +use crate::{retry, NotificationSender}; /// A wrapper around `SubgraphStore` that only exposes functions that are /// safe to call from `WritableStore`, i.e., functions that either do not @@ -49,28 +50,31 @@ use crate::{primary, primary::Site, relational::Layout, SubgraphStore}; struct WritableSubgraphStore(SubgraphStore); impl WritableSubgraphStore { - fn primary_conn(&self) -> Result, StoreError> { - self.0.primary_conn() + async fn primary_conn(&self) -> Result { + self.0.primary_conn().await } - pub(crate) fn send_store_event(&self, event: &StoreEvent) -> Result<(), StoreError> { - self.0.send_store_event(event) + fn notification_sender(&self) -> Arc { + self.0.notification_sender() } - fn layout(&self, id: &DeploymentHash) -> Result, StoreError> { - self.0.layout(id) + async fn layout(&self, id: &DeploymentHash) -> Result, StoreError> { + self.0.layout(id).await } - fn load_deployment(&self, site: Arc) -> Result { - self.0.load_deployment(site) + async fn load_deployment( + &self, + site: Arc, + ) -> Result { + self.0.load_deployment(site).await } - fn find_site(&self, id: DeploymentId) -> Result, StoreError> { - self.0.find_site(id) + async fn find_site(&self, id: DeploymentId) -> Result, StoreError> { + self.0.find_site(id).await } - fn load_indexes(&self, site: Arc) -> Result { - self.0.load_indexes(site) + async fn load_indexes(&self, site: Arc) -> Result { + self.0.load_indexes(site).await } } @@ -86,7 +90,7 @@ pub enum LastRollup { } impl LastRollup { - fn new( + async fn new( store: Arc, site: Arc, has_aggregations: bool, @@ -96,7 +100,7 @@ impl LastRollup { (false, _) => LastRollup::NotNeeded, (true, None) => LastRollup::Unknown, (true, Some(_)) => { - let block_time = store.block_time(site)?; + let block_time = store.block_time(site).await?; block_time .map(|b| LastRollup::Some(b)) .unwrap_or(LastRollup::Unknown) @@ -109,7 +113,7 @@ impl LastRollup { pub struct LastRollupTracker(Mutex); impl LastRollupTracker { - fn new( + async fn new( store: Arc, site: Arc, has_aggregations: bool, @@ -121,6 +125,7 @@ impl LastRollupTracker { has_aggregations, block, ) + .await .map(|kind| Mutex::new(kind))?; Ok(Self(rollup)) } @@ -172,13 +177,14 @@ impl SyncStore { ) -> Result { let store = WritableSubgraphStore(subgraph_store.clone()); let writable = subgraph_store.for_site(site.as_ref())?.clone(); - let input_schema = subgraph_store.input_schema(&site.deployment)?; + let input_schema = subgraph_store.input_schema(&site.deployment).await?; let last_rollup = LastRollupTracker::new( writable.cheap_clone(), site.cheap_clone(), input_schema.has_aggregations(), block, - )?; + ) + .await?; Ok(Self { logger, @@ -195,7 +201,7 @@ impl SyncStore { // Methods that mirror `WritableStoreTrait` impl SyncStore { async fn block_ptr(&self) -> Result, StoreError> { - retry::forever_async(&self.logger, "block_ptr", || { + retry::forever(&self.logger, "block_ptr", || { let site = self.site.clone(); async move { self.writable.block_ptr(site).await } }) @@ -209,65 +215,72 @@ impl SyncStore { .map(FirehoseCursor::from) } - fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { - retry::forever(&self.logger, "start_subgraph_deployment", || { - let graft_base = match self.writable.graft_pending(&self.site.deployment)? { + async fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { + retry::forever(&self.logger, "start_subgraph_deployment", || async { + let graft_base = match self.writable.graft_pending(&self.site.deployment).await? { Some((base_id, base_ptr)) => { - let src = self.store.layout(&base_id)?; - let deployment_entity = self.store.load_deployment(src.site.clone())?; - let indexes = self.store.load_indexes(src.site.clone())?; + let src = self.store.layout(&base_id).await?; + let deployment_entity = self.store.load_deployment(src.site.clone()).await?; + let indexes = self.store.load_indexes(src.site.clone()).await?; Some((src, base_ptr, deployment_entity, indexes)) } None => None, }; - graph::block_on( - self.writable - .start_subgraph(logger, self.site.clone(), graft_base), - )?; - self.store.primary_conn()?.copy_finished(self.site.as_ref()) + self.writable + .start_subgraph(logger, self.site.clone(), graft_base) + .await?; + self.store + .primary_conn() + .await? + .copy_finished(self.site.as_ref()) + .await }) + .await } - fn revert_block_operations( + async fn revert_block_operations( &self, block_ptr_to: BlockPtr, firehose_cursor: &FirehoseCursor, ) -> Result<(), StoreError> { - retry::forever(&self.logger, "revert_block_operations", || { - self.writable.revert_block_operations( - self.site.clone(), - block_ptr_to.clone(), - firehose_cursor, - )?; + retry::forever(&self.logger, "revert_block_operations", || async { + self.writable + .revert_block_operations(self.site.clone(), block_ptr_to.clone(), firehose_cursor) + .await?; - let block_time = self.writable.block_time(self.site.cheap_clone())?; + let block_time = self.writable.block_time(self.site.cheap_clone()).await?; self.last_rollup.set(block_time) }) + .await } - fn unfail_deterministic_error( + async fn unfail_deterministic_error( &self, current_ptr: &BlockPtr, parent_ptr: &BlockPtr, ) -> Result { - retry::forever(&self.logger, "unfail_deterministic_error", || { + retry::forever(&self.logger, "unfail_deterministic_error", || async { self.writable .unfail_deterministic_error(self.site.clone(), current_ptr, parent_ptr) + .await }) + .await } - fn unfail_non_deterministic_error( + async fn unfail_non_deterministic_error( &self, current_ptr: &BlockPtr, ) -> Result { - retry::forever(&self.logger, "unfail_non_deterministic_error", || { + retry::forever(&self.logger, "unfail_non_deterministic_error", || async { self.writable .unfail_non_deterministic_error(self.site.clone(), current_ptr) + .await }) + .await } async fn fail_subgraph(&self, error: SubgraphError) -> Result<(), StoreError> { - retry::forever_async(&self.logger, "fail_subgraph", || { + retry::forever(&self.logger, "fail_subgraph", || { let error = error.clone(); async { self.writable @@ -279,34 +292,40 @@ impl SyncStore { .await } - fn get(&self, key: &EntityKey, block: BlockNumber) -> Result, StoreError> { - retry::forever(&self.logger, "get", || { - self.writable.get(self.site.cheap_clone(), key, block) + async fn get(&self, key: &EntityKey, block: BlockNumber) -> Result, StoreError> { + retry::forever(&self.logger, "get", || async { + self.writable.get(self.site.cheap_clone(), key, block).await }) + .await } - fn transact_block_operations( + async fn transact_block_operations( &self, batch: &Batch, stopwatch: &StopwatchMetrics, ) -> Result<(), StoreError> { retry::forever(&self.logger, "transact_block_operations", move || { - self.writable.transact_block_operations( - &self.logger, - self.site.clone(), - batch, - self.last_rollup.get(), - stopwatch, - &self.manifest_idx_and_name, - )?; - // unwrap: batch.block_times is never empty - let last_block_time = batch.block_times.last().unwrap().1; - self.last_rollup.set(Some(last_block_time))?; - Ok(()) + async move { + self.writable + .transact_block_operations( + &self.logger, + self.site.clone(), + batch, + self.last_rollup.get(), + stopwatch, + &self.manifest_idx_and_name, + ) + .await?; + // unwrap: batch.block_times is never empty + let last_block_time = batch.block_times.last().unwrap().1; + self.last_rollup.set(Some(last_block_time))?; + Ok(()) + } }) + .await } - fn get_many( + async fn get_many( &self, keys: BTreeSet, block: BlockNumber, @@ -320,26 +339,30 @@ impl SyncStore { .push(key.entity_id)?; } - retry::forever(&self.logger, "get_many", || { + retry::forever(&self.logger, "get_many", || async { self.writable .get_many(self.site.cheap_clone(), &by_type, block) + .await }) + .await } - fn get_derived( + async fn get_derived( &self, key: &DerivedEntityQuery, block: BlockNumber, excluded_keys: Vec, ) -> Result, StoreError> { - retry::forever(&self.logger, "get_derived", || { + retry::forever(&self.logger, "get_derived", || async { self.writable .get_derived(self.site.cheap_clone(), key, block, &excluded_keys) + .await }) + .await } async fn is_deployment_synced(&self) -> Result { - retry::forever_async(&self.logger, "is_deployment_synced", || async { + retry::forever(&self.logger, "is_deployment_synced", || async { self.writable .exists_and_synced(self.site.deployment.cheap_clone()) .await @@ -347,26 +370,42 @@ impl SyncStore { .await } - fn unassign_subgraph(&self, site: &Site) -> Result<(), StoreError> { - retry::forever(&self.logger, "unassign_subgraph", || { - let mut pconn = self.store.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.unassign_subgraph(site)?; - self.store.send_store_event(&StoreEvent::new(changes)) - }) + async fn unassign_subgraph(&self, site: &Site) -> Result<(), StoreError> { + retry::forever(&self.logger, "unassign_subgraph", || async { + let mut pconn = self.store.primary_conn().await?; + let sender = self.store.notification_sender(); + pconn + .transaction(|pconn| { + async { + let changes = pconn.unassign_subgraph(site).await?; + pconn + .send_store_event(&sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await }) + .await } - fn pause_subgraph(&self, site: &Site) -> Result<(), StoreError> { - retry::forever(&self.logger, "unassign_subgraph", || { - let mut pconn = self.store.primary_conn()?; - pconn.transaction(|conn| -> Result<_, StoreError> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.pause_subgraph(site)?; - self.store.send_store_event(&StoreEvent::new(changes)) - }) + async fn pause_subgraph(&self, site: &Site) -> Result<(), StoreError> { + retry::forever(&self.logger, "pause_subgraph", || async { + let mut pconn = self.store.primary_conn().await?; + let sender = self.store.notification_sender(); + pconn + .transaction(|pconn| { + async { + let changes = pconn.pause_subgraph(site).await?; + pconn + .send_store_event(&sender, &StoreEvent::new(changes)) + .await + } + .scope_boxed() + }) + .await }) + .await } async fn load_dynamic_data_sources( @@ -374,7 +413,7 @@ impl SyncStore { block: BlockNumber, manifest_idx_and_name: Vec<(u32, String)>, ) -> Result, StoreError> { - retry::forever_async(&self.logger, "load_dynamic_data_sources", || async { + retry::forever(&self.logger, "load_dynamic_data_sources", || async { self.writable .load_dynamic_data_sources( self.site.cheap_clone(), @@ -389,7 +428,7 @@ impl SyncStore { pub(crate) async fn causality_region_curr_val( &self, ) -> Result, StoreError> { - retry::forever_async(&self.logger, "causality_region_curr_val", || async { + retry::forever(&self.logger, "causality_region_curr_val", || async { self.writable .causality_region_curr_val(self.site.cheap_clone()) .await @@ -397,51 +436,61 @@ impl SyncStore { .await } - fn maybe_find_site(&self, src: DeploymentId) -> Result>, StoreError> { - match self.store.find_site(src) { + async fn maybe_find_site(&self, src: DeploymentId) -> Result>, StoreError> { + match self.store.find_site(src).await { Ok(site) => Ok(Some(site)), Err(StoreError::DeploymentNotFound(_)) => Ok(None), Err(e) => Err(e), } } - fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError> { - retry::forever(&self.logger, "deployment_synced", || { + async fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError> { + retry::forever(&self.logger, "deployment_synced", || async { let event = { // Make sure we drop `pconn` before we call into the deployment // store so that we do not hold two database connections which // might come from the same pool and could therefore deadlock - let mut pconn = self.store.primary_conn()?; - pconn.transaction(|conn| -> Result<_, Error> { - let mut pconn = primary::Connection::new(conn); - let changes = pconn.promote_deployment(&self.site.deployment)?; - Ok(StoreEvent::new(changes)) - })? + let mut pconn = self.store.primary_conn().await?; + pconn + .transaction(|pconn| { + async { + let changes = pconn.promote_deployment(&self.site.deployment).await?; + Ok(StoreEvent::new(changes)) + } + .scope_boxed() + }) + .await? }; // Handle on_sync actions. They only apply to copies (not // grafts) so we make sure that the source, if it exists, has // the same hash as `self.site` - if let Some(src) = self.writable.source_of_copy(&self.site)? { - if let Some(src) = self.maybe_find_site(src)? { + if let Some(src) = self.writable.source_of_copy(&self.site).await? { + if let Some(src) = self.maybe_find_site(src).await? { if src.deployment == self.site.deployment { - let on_sync = self.writable.on_sync(&self.site)?; + let on_sync = self.writable.on_sync(&self.site).await?; if on_sync.activate() { - let mut pconn = self.store.primary_conn()?; - pconn.activate(&self.site.as_ref().into())?; + let mut pconn = self.store.primary_conn().await?; + pconn.activate(&self.site.as_ref().into()).await?; } if on_sync.replace() { - self.unassign_subgraph(&src)?; + self.unassign_subgraph(&src).await?; } } } } self.writable - .deployment_synced(&self.site.deployment, block_ptr.clone())?; + .deployment_synced(&self.site.deployment, block_ptr.clone()) + .await?; - self.store.send_store_event(&event) + let mut pconn = self.store.primary_conn().await?; + let sender = self.store.notification_sender(); + pconn + .transaction(|pconn| pconn.send_store_event(&sender, &event).scope_boxed()) + .await }) + .await } fn shard(&self) -> &str { @@ -449,7 +498,7 @@ impl SyncStore { } async fn health(&self) -> Result { - retry::forever_async(&self.logger, "health", || async { + retry::forever(&self.logger, "health", || async { self.writable.health(&self.site).await.map(Into::into) }) .await @@ -682,7 +731,7 @@ impl Request { } } - fn execute(&self) -> Result { + async fn execute(&self) -> Result { match self { Request::Write { batch, @@ -701,6 +750,7 @@ impl Request { } let res = store .transact_block_operations(batch.deref(), stopwatch) + .await .map(|()| ExecResult::Continue); info!(store.logger, "Committed write batch"; "block_number" => batch.block_ptr.number, @@ -717,6 +767,7 @@ impl Request { processed: _, } => store .revert_block_operations(block_ptr.clone(), firehose_cursor) + .await .map(|()| ExecResult::Continue), Request::Stop => Ok(ExecResult::Stop), } @@ -882,7 +933,7 @@ impl Queue { // batch should be processed or after some time // passed. The latter is just for safety in case // there is a mistake with notifications. - let sleep = graph::tokio::time::sleep(ENV_VARS.store.write_batch_duration); + let sleep = tokio::time::sleep(ENV_VARS.store.write_batch_duration); let notify = batch_stop_notify.notified(); select!( () = sleep => (), @@ -906,7 +957,7 @@ impl Queue { }; let res = { let _section = queue.stopwatch.start_section("queue_execute"); - graph::spawn_blocking_allow_panic(move || req.execute()).await + graph::spawn_blocking_allow_panic(move || graph::block_on(req.execute())).await }; let _section = queue.stopwatch.start_section("queue_pop"); @@ -1132,7 +1183,7 @@ impl Queue { /// Get the entity for `key` if it exists by looking at both the queue /// and the store - fn get(&self, key: &EntityKey) -> Result, StoreError> { + async fn get(&self, key: &EntityKey) -> Result, StoreError> { enum Op { Write(Entity), Remove, @@ -1154,12 +1205,12 @@ impl Queue { match op { Some(Op::Write(entity)) => Ok(Some(entity)), Some(Op::Remove) => Ok(None), - None => self.store.get(key, query_block), + None => self.store.get(key, query_block).await, } } /// Get many entities at once by looking at both the queue and the store - fn get_many( + async fn get_many( &self, mut keys: BTreeSet, ) -> Result, StoreError> { @@ -1191,7 +1242,7 @@ impl Queue { // Look entities for the remaining keys up in the store keys.retain(|key| !entities_in_queue.contains_key(key)); - let mut map = self.store.get_many(keys, query_block)?; + let mut map = self.store.get_many(keys, query_block).await?; // Extend the store results with the entities from the queue. for (key, entity) in entities_in_queue { @@ -1204,7 +1255,7 @@ impl Queue { Ok(map) } - fn get_derived( + async fn get_derived( &self, derived_query: &DerivedEntityQuery, ) -> Result, StoreError> { @@ -1251,9 +1302,10 @@ impl Queue { let excluded_keys: Vec = entities_in_queue.keys().cloned().collect(); // We filter to exclude the entities ids that we already have from the queue - let mut items_from_database = - self.store - .get_derived(derived_query, query_block, excluded_keys)?; + let mut items_from_database = self + .store + .get_derived(derived_query, query_block, excluded_keys) + .await?; // Extend the store results with the entities from the queue. // This overwrites any entitiy from the database with the same key from queue @@ -1366,7 +1418,7 @@ impl Writer { async fn write(&self, batch: Batch, stopwatch: &StopwatchMetrics) -> Result<(), StoreError> { match self { - Writer::Sync(store) => store.transact_block_operations(&batch, stopwatch), + Writer::Sync(store) => store.transact_block_operations(&batch, stopwatch).await, Writer::Async { queue, .. } => { self.check_queue_running()?; queue.push_write(batch).await @@ -1380,7 +1432,11 @@ impl Writer { firehose_cursor: FirehoseCursor, ) -> Result<(), StoreError> { match self { - Writer::Sync(store) => store.revert_block_operations(block_ptr_to, &firehose_cursor), + Writer::Sync(store) => { + store + .revert_block_operations(block_ptr_to, &firehose_cursor) + .await + } Writer::Async { queue, .. } => { self.check_queue_running()?; let req = Request::revert(queue.store.cheap_clone(), block_ptr_to, firehose_cursor); @@ -1399,30 +1455,30 @@ impl Writer { } } - fn get(&self, key: &EntityKey) -> Result, StoreError> { + async fn get(&self, key: &EntityKey) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get(key, BLOCK_NUMBER_MAX), - Writer::Async { queue, .. } => queue.get(key), + Writer::Sync(store) => store.get(key, BLOCK_NUMBER_MAX).await, + Writer::Async { queue, .. } => queue.get(key).await, } } - fn get_many( + async fn get_many( &self, keys: BTreeSet, ) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_many(keys, BLOCK_NUMBER_MAX), - Writer::Async { queue, .. } => queue.get_many(keys), + Writer::Sync(store) => store.get_many(keys, BLOCK_NUMBER_MAX).await, + Writer::Async { queue, .. } => queue.get_many(keys).await, } } - fn get_derived( + async fn get_derived( &self, key: &DerivedEntityQuery, ) -> Result, StoreError> { match self { - Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, vec![]), - Writer::Async { queue, .. } => queue.get_derived(key), + Writer::Sync(store) => store.get_derived(key, BLOCK_NUMBER_MAX, vec![]).await, + Writer::Async { queue, .. } => queue.get_derived(key).await, } } @@ -1532,23 +1588,24 @@ impl WritableStore { } } +#[async_trait] impl ReadStore for WritableStore { - fn get(&self, key: &EntityKey) -> Result, StoreError> { - self.writer.get(key) + async fn get(&self, key: &EntityKey) -> Result, StoreError> { + self.writer.get(key).await } - fn get_many( + async fn get_many( &self, keys: BTreeSet, ) -> Result, StoreError> { - self.writer.get_many(keys) + self.writer.get_many(keys).await } - fn get_derived( + async fn get_derived( &self, key: &DerivedEntityQuery, ) -> Result, StoreError> { - self.writer.get_derived(key) + self.writer.get_derived(key).await } fn input_schema(&self) -> InputSchema { @@ -1574,18 +1631,20 @@ impl SourceableStore { #[async_trait] impl store::SourceableStore for SourceableStore { - fn get_range( + async fn get_range( &self, entity_types: Vec, causality_region: CausalityRegion, block_range: Range, ) -> Result>, StoreError> { - self.store.get_range( - self.site.clone(), - entity_types, - causality_region, - block_range, - ) + self.store + .get_range( + self.site.clone(), + entity_types, + causality_region, + block_range, + ) + .await } fn input_schema(&self) -> InputSchema { @@ -1616,9 +1675,8 @@ impl WritableStoreTrait for WritableStore { async fn start_subgraph_deployment(&self, logger: &Logger) -> Result<(), StoreError> { let store = self.store.cheap_clone(); let logger = logger.cheap_clone(); - graph::spawn_blocking_allow_panic(move || store.start_subgraph_deployment(&logger)) - .await - .map_err(Error::from)??; + + store.start_subgraph_deployment(&logger).await?; // Refresh all in memory state in case this instance was used before *self.block_ptr.lock().unwrap() = self.store.block_ptr().await?; @@ -1645,7 +1703,8 @@ impl WritableStoreTrait for WritableStore { ) -> Result { let outcome = self .store - .unfail_deterministic_error(current_ptr, parent_ptr)?; + .unfail_deterministic_error(current_ptr, parent_ptr) + .await?; if let UnfailOutcome::Unfailed = outcome { *self.block_ptr.lock().unwrap() = self.store.block_ptr().await?; @@ -1655,14 +1714,14 @@ impl WritableStoreTrait for WritableStore { Ok(outcome) } - fn unfail_non_deterministic_error( + async fn unfail_non_deterministic_error( &self, current_ptr: &BlockPtr, ) -> Result { // We don't have to update in memory self.block_ptr // because the method call below doesn't rewind/revert // any block. - self.store.unfail_non_deterministic_error(current_ptr) + self.store.unfail_non_deterministic_error(current_ptr).await } async fn fail_subgraph(&self, error: SubgraphError) -> Result<(), StoreError> { @@ -1683,7 +1742,7 @@ impl WritableStoreTrait for WritableStore { is_caught_up_with_chain_head: bool, ) -> Result<(), StoreError> { if is_caught_up_with_chain_head { - self.deployment_synced(block_ptr_to.clone())?; + self.deployment_synced(block_ptr_to.clone()).await?; } else { self.writer.start_batching(); } @@ -1720,10 +1779,10 @@ impl WritableStoreTrait for WritableStore { /// - Disable the time-to-sync metrics gathering. /// - Stop batching writes. /// - Promote it to 'synced' status in the DB, if that hasn't been done already. - fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError> { + async fn deployment_synced(&self, block_ptr: BlockPtr) -> Result<(), StoreError> { self.writer.deployment_synced(); if !self.is_deployment_synced.load(Ordering::SeqCst) { - self.store.deployment_synced(block_ptr)?; + self.store.deployment_synced(block_ptr).await?; self.is_deployment_synced.store(true, Ordering::SeqCst); } Ok(()) @@ -1733,8 +1792,8 @@ impl WritableStoreTrait for WritableStore { self.is_deployment_synced.load(Ordering::SeqCst) } - fn pause_subgraph(&self) -> Result<(), StoreError> { - self.store.pause_subgraph(&self.store.site) + async fn pause_subgraph(&self) -> Result<(), StoreError> { + self.store.pause_subgraph(&self.store.site).await } async fn load_dynamic_data_sources( diff --git a/store/test-store/Cargo.toml b/store/test-store/Cargo.toml index 909c26453c6..fd6f9ba0566 100644 --- a/store/test-store/Cargo.toml +++ b/store/test-store/Cargo.toml @@ -6,6 +6,7 @@ authors = ["Leonardo Yvens "] description = "Provides static store instance for tests." [dependencies] +async-trait = { workspace = true } graph-graphql = { path = "../../graphql" } graph-node = { path = "../../node" } graph = { path = "../../graph" } @@ -14,7 +15,9 @@ graph-chain-ethereum = { path = "../../chain/ethereum" } lazy_static = "1.5" hex-literal = "1.0" diesel = { workspace = true } +diesel-async = { workspace = true } prost-types = { workspace = true } +tokio = { workspace = true } [dev-dependencies] hex = "0.4.3" diff --git a/store/test-store/src/block_store.rs b/store/test-store/src/block_store.rs index 092be0274a8..f085e2dbd9d 100644 --- a/store/test-store/src/block_store.rs +++ b/store/test-store/src/block_store.rs @@ -189,7 +189,7 @@ pub type FakeBlockList = Vec<&'static FakeBlock>; /// `null` pub async fn set_chain(chain: FakeBlockList, network: &str) -> Vec<(BlockPtr, BlockHash)> { let block_store = crate::store::STORE.block_store(); - let store = match block_store.chain_store(network) { + let store = match block_store.chain_store(network).await { Some(cs) => cs, None => block_store .create_chain_store( @@ -199,6 +199,7 @@ pub async fn set_chain(chain: FakeBlockList, network: &str) -> Vec<(BlockPtr, Bl genesis_block_hash: GENESIS_PTR.hash.clone(), }, ) + .await .unwrap(), }; let chain: Vec> = chain diff --git a/store/test-store/src/store.rs b/store/test-store/src/store.rs index 96da86a7b64..bfcb035456b 100644 --- a/store/test-store/src/store.rs +++ b/store/test-store/src/store.rs @@ -1,4 +1,4 @@ -use diesel::{self, PgConnection}; +use diesel; use graph::blockchain::mock::MockDataSource; use graph::blockchain::BlockTime; use graph::blockchain::ChainIdentifier; @@ -25,6 +25,7 @@ use graph_graphql::prelude::{ use graph_graphql::test_support::GraphQLMetrics; use graph_node::config::{Config, Opt}; use graph_node::store_builder::StoreBuilder; +use graph_store_postgres::AsyncPgConnection; use graph_store_postgres::{ layout_for_tests::FAKE_NETWORK_SHARED, BlockStore as DieselBlockStore, ConnectionPool, DeploymentPlacer, Shard, SubgraphStore as DieselSubgraphStore, SubscriptionManager, @@ -36,7 +37,6 @@ use std::collections::BTreeSet; use std::collections::HashMap; use std::time::Instant; use std::{marker::PhantomData, sync::Mutex}; -use tokio::runtime::{Builder, Runtime}; use web3::types::H256; pub const NETWORK_NAME: &str = "fake_network"; @@ -53,8 +53,6 @@ lazy_static! { None => Logger::root(slog::Discard, o!()), }; static ref SEQ_LOCK: Mutex<()> = Mutex::new(()); - pub static ref STORE_RUNTIME: Runtime = - Builder::new_multi_thread().enable_all().build().unwrap(); pub static ref METRICS_REGISTRY: Arc = Arc::new(MetricsRegistry::mock()); pub static ref LOAD_MANAGER: Arc = Arc::new(LoadManager::new( &LOGGER, @@ -69,7 +67,7 @@ lazy_static! { static ref CONFIG: Config = STORE_POOL_CONFIG.2.clone(); pub static ref NODE_ID: NodeId = NodeId::new("test").unwrap(); pub static ref SUBGRAPH_STORE: Arc = STORE.subgraph_store(); - static ref BLOCK_STORE: Arc = STORE.block_store(); + static ref BLOCK_STORE: DieselBlockStore = STORE.block_store(); pub static ref GENESIS_PTR: BlockPtr = ( H256::from(hex!( "bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f" @@ -118,16 +116,16 @@ where Err(err) => err.into_inner(), }; - STORE_RUNTIME.handle().block_on(async { + graph::TEST_RUNTIME.handle().block_on(async { let store = STORE.clone(); test(store).await }) } /// Run a test with a connection into the primary database, not a full store -pub fn run_test_with_conn(test: F) +pub async fn run_test_with_conn(test: F) where - F: FnOnce(&mut PgConnection), + F: AsyncFnOnce(&mut AsyncPgConnection), { // Lock regardless of poisoning. This also forces sequential test execution. let _lock = match SEQ_LOCK.lock() { @@ -137,14 +135,17 @@ where let mut conn = PRIMARY_POOL .get() + .await .expect("failed to get connection for primary database"); - test(&mut conn); + test(&mut conn).await; } -pub fn remove_subgraphs() { +/// Removes test data from the database behind the store. +pub async fn remove_subgraphs() { SUBGRAPH_STORE - .delete_all_entities_for_test_use_only() + .remove_all_subgraphs_for_test_use_only() + .await .expect("deleting test entities succeeds"); } @@ -187,14 +188,16 @@ pub async fn create_subgraph_with_manifest( let yaml = serde_yaml::to_string(&yaml).unwrap(); let deployment = DeploymentCreate::new(yaml, &manifest, None).graft(base); let name = SubgraphName::new_unchecked(subgraph_id.to_string()); - let deployment = SUBGRAPH_STORE.create_deployment_replace( - name, - &schema, - deployment, - NODE_ID.clone(), - NETWORK_NAME.to_string(), - SubgraphVersionSwitchingMode::Instant, - )?; + let deployment = SUBGRAPH_STORE + .create_deployment_replace( + name, + &schema, + deployment, + NODE_ID.clone(), + NETWORK_NAME.to_string(), + SubgraphVersionSwitchingMode::Instant, + ) + .await?; SUBGRAPH_STORE .cheap_clone() @@ -249,20 +252,21 @@ pub async fn create_test_subgraph_with_features( SUBGRAPH_STORE .create_subgraph_features(deployment_features) + .await .unwrap(); locator } -pub fn remove_subgraph(id: &DeploymentHash) { +pub async fn remove_subgraph(id: &DeploymentHash) { let name = SubgraphName::new_unchecked(id.to_string()); - SUBGRAPH_STORE.remove_subgraph(name).unwrap(); - let locs = SUBGRAPH_STORE.locators(id.as_str()).unwrap(); - let mut conn = primary_connection(); + SUBGRAPH_STORE.remove_subgraph(name).await.unwrap(); + let locs = SUBGRAPH_STORE.locators(id.as_str()).await.unwrap(); + let mut conn = primary_connection().await; for loc in locs { - let site = conn.locate_site(loc.clone()).unwrap().unwrap(); - conn.unassign_subgraph(&site).unwrap(); - SUBGRAPH_STORE.remove_deployment(site.id).unwrap(); + let site = conn.locate_site(loc.clone()).await.unwrap().unwrap(); + conn.unassign_subgraph(&site).await.unwrap(); + SUBGRAPH_STORE.remove_deployment(site.id).await.unwrap(); } } @@ -282,7 +286,7 @@ pub async fn transact_errors( deployment.hash.clone(), "transact", metrics_registry.clone(), - store.subgraph_store().shard(deployment)?.to_string(), + store.subgraph_store().shard(deployment).await?.to_string(), ); let block_time = BlockTime::for_test(&block_ptr_to); store @@ -360,6 +364,7 @@ pub async fn transact_entities_and_dynamic_data_sources( entity_cache.append(ops); let mods = entity_cache .as_modifications(block_ptr_to.number) + .await .expect("failed to convert to modifications") .modifications; let metrics_registry = Arc::new(MetricsRegistry::mock()); @@ -400,17 +405,19 @@ pub async fn revert_block(store: &Arc, deployment: &DeploymentLocator, pt flush(deployment).await.unwrap(); } -pub fn insert_ens_name(hash: &str, name: &str) { +pub async fn insert_ens_name(hash: &str, name: &str) { use diesel::insert_into; - use diesel::prelude::*; + use diesel::ExpressionMethods; + use diesel_async::RunQueryDsl; use graph_store_postgres::command_support::catalog::ens_names; - let mut conn = PRIMARY_POOL.get().unwrap(); + let mut conn = PRIMARY_POOL.get().await.unwrap(); insert_into(ens_names::table) .values((ens_names::hash.eq(hash), ens_names::name.eq(name))) .on_conflict_do_nothing() .execute(&mut conn) + .await .unwrap(); } @@ -455,15 +462,15 @@ pub async fn flush(deployment: &DeploymentLocator) -> Result<(), StoreError> { /// requires. Of course, this does not test that events that are sent are /// actually received by anything, but makes ensuring that the right events /// get sent much more convenient than trying to receive them -pub fn tap_store_events(f: F) -> (R, Vec) +pub async fn tap_store_events(f: F) -> (R, Vec) where - F: FnOnce() -> R, + F: AsyncFnOnce() -> R, { use graph_store_postgres::layout_for_tests::{EVENT_TAP, EVENT_TAP_ENABLED}; EVENT_TAP.lock().unwrap().clear(); *EVENT_TAP_ENABLED.lock().unwrap() = true; - let res = f(); + let res = f().await; *EVENT_TAP_ENABLED.lock().unwrap() = false; (res, EVENT_TAP.lock().unwrap().clone()) } @@ -508,11 +515,15 @@ async fn execute_subgraph_query_internal( QueryTarget::Deployment(id, version) => (id, version), _ => unreachable!("tests do not use this"), }; - let schema = SUBGRAPH_STORE.api_schema(&id, &Default::default()).unwrap(); + let schema = SUBGRAPH_STORE + .api_schema(&id, &Default::default()) + .await + .unwrap(); let status = StatusStore::status( STORE.as_ref(), status::Filter::Deployments(vec![id.to_string()]), ) + .await .unwrap(); let network = Some(status[0].chains[0].network.clone()); let trace = query.trace; @@ -616,7 +627,7 @@ fn build_store() -> (Arc, ConnectionPool, Config, Arc (Arc, ConnectionPool, Config, Arc { cs.set_chain_identifier_for_tests(&ChainIdentifier { net_version: NETWORK_VERSION.to_string(), genesis_block_hash: GENESIS_PTR.hash.clone(), }) + .await .expect("unable to set identifier"); } None => { store .block_store() .create_chain_store(NETWORK_NAME, ident) + .await .expect("unable to create test network store"); } } @@ -656,8 +671,8 @@ fn build_store() -> (Arc, ConnectionPool, Config, Arc graph_store_postgres::layout_for_tests::Connection<'static> { - let conn = PRIMARY_POOL.get().unwrap(); +pub async fn primary_connection() -> graph_store_postgres::layout_for_tests::Connection { + let conn = PRIMARY_POOL.get().await.unwrap(); graph_store_postgres::layout_for_tests::Connection::new(conn) } diff --git a/store/test-store/tests/chain/ethereum/manifest.rs b/store/test-store/tests/chain/ethereum/manifest.rs index b72f70dcd78..172068add9c 100644 --- a/store/test-store/tests/chain/ethereum/manifest.rs +++ b/store/test-store/tests/chain/ethereum/manifest.rs @@ -4,6 +4,7 @@ use std::str::FromStr; use std::sync::Arc; use std::time::Duration; +use async_trait::async_trait; use graph::blockchain::DataSource; use graph::components::store::BLOCK_NUMBER_MAX; use graph::data::store::scalar::Bytes; @@ -19,8 +20,8 @@ use graph::entity; use graph::env::ENV_VARS; use graph::prelude::web3::types::H256; use graph::prelude::{ - anyhow, async_trait, serde_yaml, tokio, BigDecimal, BigInt, DeploymentHash, Link, - SubgraphManifest, SubgraphManifestResolveError, SubgraphManifestValidationError, SubgraphStore, + anyhow, serde_yaml, BigDecimal, BigInt, DeploymentHash, Link, SubgraphManifest, + SubgraphManifestResolveError, SubgraphManifestValidationError, SubgraphStore, UnvalidatedSubgraphManifest, }; use graph::{ @@ -168,7 +169,7 @@ async fn resolve_unvalidated(text: &str) -> UnvalidatedSubgraphManifest { // Some of these manifest tests should be made chain-independent, but for // now we just run them for the ethereum `Chain` -#[tokio::test] +#[graph::test] async fn simple_manifest() { const YAML: &str = " dataSources: [] @@ -184,7 +185,7 @@ specVersion: 0.0.2 assert!(manifest.graft.is_none()); } -#[tokio::test] +#[graph::test] async fn ipfs_manifest() { let yaml = " schema: @@ -217,7 +218,7 @@ specVersion: 0.0.7 assert_eq!(data_source.kind, OffchainDataSourceKind::Ipfs); } -#[tokio::test] +#[graph::test] async fn subgraph_ds_manifest() { let yaml = " schema: @@ -260,7 +261,7 @@ specVersion: 1.3.0 } } -#[tokio::test] +#[graph::test] async fn subgraph_ds_manifest_aggregations_should_fail() { let yaml = " schema: @@ -296,7 +297,7 @@ specVersion: 1.3.0 .contains("Entity TokenStats is an aggregation and cannot be used as a mapping entity")); } -#[tokio::test] +#[graph::test] async fn multiple_subgraph_ds_manifest() { let yaml = " schema: @@ -368,7 +369,7 @@ specVersion: 1.3.0 } } -#[tokio::test] +#[graph::test] async fn graft_manifest() { const YAML: &str = " dataSources: [] @@ -389,7 +390,7 @@ specVersion: 0.0.2 assert_eq!(12345, graft.block); } -#[tokio::test] +#[graph::test] async fn parse_indexer_hints() { const YAML: &str = " dataSources: [] @@ -467,6 +468,7 @@ specVersion: 0.0.2 let schema = store .subgraph_store() .input_schema(&deployment.hash) + .await .unwrap(); // Adds an example entity. @@ -551,6 +553,7 @@ specVersion: 0.0.2 let schema = store .subgraph_store() .input_schema(&deployment.hash) + .await .unwrap(); // This check is awkward since the test manifest has other problems // that the validation complains about as setting up a valid manifest @@ -639,7 +642,7 @@ specVersion: 0.0.2 }) } -#[tokio::test] +#[graph::test] async fn parse_data_source_context() { const YAML: &str = " dataSources: @@ -749,7 +752,7 @@ specVersion: 0.0.8 ); } -#[tokio::test] +#[graph::test] async fn parse_event_handlers_with_topics() { const YAML: &str = " dataSources: @@ -820,7 +823,7 @@ specVersion: 1.2.0 ); } -#[tokio::test] +#[graph::test] async fn parse_block_handlers_with_polling_filter() { const YAML: &str = " dataSources: @@ -876,7 +879,7 @@ specVersion: 0.0.8 assert_eq!("Qmmanifest", manifest.id.as_str()); } -#[tokio::test] +#[graph::test] async fn parse_data_source_with_end_block() { const YAML: &str = " dataSources: @@ -913,7 +916,7 @@ specVersion: 0.0.9 assert_eq!(Some(9562481), end_block); } -#[tokio::test] +#[graph::test] async fn parse_block_handlers_with_both_polling_and_once_filter() { const YAML: &str = " dataSources: @@ -980,7 +983,7 @@ specVersion: 0.0.8 assert_eq!("Qmmanifest", manifest.id.as_str()); } -#[tokio::test] +#[graph::test] async fn should_not_parse_block_handlers_with_both_filtered_and_non_filtered_handlers() { const YAML: &str = " dataSources: @@ -1045,7 +1048,7 @@ specVersion: 0.0.8 assert_eq!("Qmmanifest", manifest.id.as_str()); } -#[tokio::test] +#[graph::test] async fn parse_block_handlers_with_call_filter() { const YAML: &str = " dataSources: @@ -1093,7 +1096,7 @@ specVersion: 0.0.2 assert_eq!("Qmmanifest", manifest.id.as_str()); } -#[tokio::test] +#[graph::test] async fn parse_block_handlers_with_once_filter() { const YAML: &str = " dataSources: @@ -1141,7 +1144,7 @@ specVersion: 0.0.8 assert_eq!("Qmmanifest", manifest.id.as_str()); } -#[tokio::test] +#[graph::test] async fn parse_call_handlers() { const YAML: &str = " dataSources: @@ -1717,7 +1720,7 @@ dataSources: }); } -#[tokio::test] +#[graph::test] async fn mixed_subgraph_and_onchain_ds_manifest_should_fail() { let yaml = " schema: @@ -1869,7 +1872,7 @@ specVersion: 1.3.0 }) } -#[tokio::test] +#[graph::test] async fn subgraph_ds_manifest_mutable_entities_should_fail() { let yaml = " schema: @@ -1905,7 +1908,7 @@ specVersion: 1.3.0 .contains("Entity MutableEntity is not immutable and cannot be used as a mapping entity")); } -#[tokio::test] +#[graph::test] async fn subgraph_ds_manifest_immutable_entities_should_succeed() { let yaml = " schema: diff --git a/store/test-store/tests/core/interfaces.rs b/store/test-store/tests/core/interfaces.rs index a4fc8314665..2352bd939f5 100644 --- a/store/test-store/tests/core/interfaces.rs +++ b/store/test-store/tests/core/interfaces.rs @@ -45,7 +45,7 @@ macro_rules! extract_data { }; } -#[tokio::test] +#[graph::test] async fn one_interface_zero_entities() { let subgraph_id = "oneInterfaceZeroEntities"; let schema = "interface Legged { legs: Int } @@ -62,7 +62,7 @@ async fn one_interface_zero_entities() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn one_interface_one_entity() { let subgraph_id = "oneInterfaceOneEntity"; let document = "interface Legged { legs: Int } @@ -90,7 +90,7 @@ async fn one_interface_one_entity() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn one_interface_one_entity_typename() { let subgraph_id = "oneInterfaceOneEntityTypename"; let document = "interface Legged { legs: Int } @@ -109,7 +109,7 @@ async fn one_interface_one_entity_typename() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn one_interface_multiple_entities() { let subgraph_id = "oneInterfaceMultipleEntities"; let document = "interface Legged { legs: Int } @@ -140,7 +140,7 @@ async fn one_interface_multiple_entities() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn reference_interface() { let subgraph_id = "ReferenceInterface"; let document = "type Leg @entity { id: ID! } @@ -162,7 +162,7 @@ async fn reference_interface() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn reference_interface_derived() { // Test the different ways in which interface implementations // can reference another entity @@ -230,7 +230,7 @@ async fn reference_interface_derived() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn follow_interface_reference_invalid() { let subgraph_id = "FollowInterfaceReferenceInvalid"; let schema = "interface Legged { legs: Int! } @@ -263,7 +263,7 @@ async fn follow_interface_reference_invalid() { } } -#[tokio::test] +#[graph::test] async fn follow_interface_reference() { let subgraph_id = "FollowInterfaceReference"; let document = "interface Legged { id: ID!, legs: Int! } @@ -296,7 +296,7 @@ async fn follow_interface_reference() { assert_eq!(data, exp) } -#[tokio::test] +#[graph::test] async fn conflicting_implementors_id() { let subgraph_id = "ConflictingImplementorsId"; let document = "interface Legged { legs: Int } @@ -325,7 +325,7 @@ async fn conflicting_implementors_id() { assert!(msg == EXPECTED1 || msg == EXPECTED2); } -#[tokio::test] +#[graph::test] async fn derived_interface_relationship() { let subgraph_id = "DerivedInterfaceRelationship"; let document = "interface ForestDweller { id: ID!, forest: Forest } @@ -349,7 +349,7 @@ async fn derived_interface_relationship() { ); } -#[tokio::test] +#[graph::test] async fn two_interfaces() { let subgraph_id = "TwoInterfaces"; let document = "interface IFoo { foo: String! } @@ -381,7 +381,7 @@ async fn two_interfaces() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn interface_non_inline_fragment() { let subgraph_id = "interfaceNonInlineFragment"; let document = "interface Legged { legs: Int } @@ -412,7 +412,7 @@ async fn interface_non_inline_fragment() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn interface_inline_fragment() { let subgraph_id = "interfaceInlineFragment"; let document = "interface Legged { legs: Int } @@ -436,7 +436,7 @@ async fn interface_inline_fragment() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn interface_inline_fragment_with_subquery() { let subgraph_id = "InterfaceInlineFragmentWithSubquery"; let document = " @@ -488,7 +488,7 @@ async fn interface_inline_fragment_with_subquery() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn invalid_fragment() { let subgraph_id = "InvalidFragment"; let schema = "interface Legged { legs: Int! } @@ -520,7 +520,7 @@ async fn invalid_fragment() { } } -#[tokio::test] +#[graph::test] async fn alias() { let subgraph_id = "Alias"; let document = "interface Legged { id: ID!, legs: Int! } @@ -570,7 +570,7 @@ async fn alias() { ) } -#[tokio::test] +#[graph::test] async fn fragments_dont_panic() { let subgraph_id = "FragmentsDontPanic"; let document = " @@ -641,7 +641,7 @@ async fn fragments_dont_panic() { } // See issue #1816 -#[tokio::test] +#[graph::test] async fn fragments_dont_duplicate_data() { let subgraph_id = "FragmentsDupe"; let document = " @@ -709,7 +709,7 @@ async fn fragments_dont_duplicate_data() { } // See also: e0d6da3e-60cf-41a5-b83c-b60a7a766d4a -#[tokio::test] +#[graph::test] async fn redundant_fields() { let subgraph_id = "RedundantFields"; let document = "interface Legged { id: ID!, parent: Legged } @@ -759,7 +759,7 @@ async fn redundant_fields() { ) } -#[tokio::test] +#[graph::test] async fn fragments_merge_selections() { let subgraph_id = "FragmentsMergeSelections"; let document = " @@ -820,7 +820,7 @@ async fn fragments_merge_selections() { ) } -#[tokio::test] +#[graph::test] async fn merge_fields_not_in_interface() { let subgraph_id = "MergeFieldsNotInInterface"; let document = "interface Iface { id: ID! } @@ -881,7 +881,7 @@ async fn merge_fields_not_in_interface() { ) } -#[tokio::test] +#[graph::test] async fn nested_interface_fragments() { let subgraph_id = "NestedInterfaceFragments"; let document = "interface I1face { id: ID!, foo1: Foo! } @@ -978,7 +978,7 @@ async fn nested_interface_fragments() { ) } -#[tokio::test] +#[graph::test] async fn nested_interface_fragments_overlapping() { let subgraph_id = "NestedInterfaceFragmentsOverlapping"; let document = "interface I1face { id: ID!, foo1: Foo! } @@ -1071,7 +1071,7 @@ async fn nested_interface_fragments_overlapping() { ); } -#[tokio::test] +#[graph::test] async fn enums() { use r::Value::Enum; let subgraph_id = "enums"; @@ -1124,7 +1124,7 @@ async fn enums() { ); } -#[tokio::test] +#[graph::test] async fn enum_list_filters() { use r::Value::Enum; let subgraph_id = "enum_list_filters"; @@ -1199,7 +1199,7 @@ async fn enum_list_filters() { ); } -#[tokio::test] +#[graph::test] async fn recursive_fragment() { // Depending on whether `ENABLE_GRAPHQL_VALIDATIONS` is set or not, we // get different error messages @@ -1273,7 +1273,7 @@ async fn recursive_fragment() { assert!(FOO_BAR_ERRORS.contains(&data.as_str())); } -#[tokio::test] +#[graph::test] async fn mixed_mutability() { let subgraph_id = "MixedMutability"; let document = "interface Event { id: String! } @@ -1297,7 +1297,7 @@ async fn mixed_mutability() { // this. They rely on the EntityCache filtering out entity changes // that are already in the store let id = DeploymentHash::new(subgraph_id).unwrap(); - remove_subgraph(&id); + remove_subgraph(&id).await; } let res = insert_and_query(subgraph_id, document, entities, query) .await @@ -1308,7 +1308,7 @@ async fn mixed_mutability() { assert_eq!(data, exp); } -#[tokio::test] +#[graph::test] async fn derived_interface_bytes() { fn b(s: &str) -> Value { Value::Bytes(s.parse().unwrap()) diff --git a/store/test-store/tests/graph/entity_cache.rs b/store/test-store/tests/graph/entity_cache.rs index cf9bc3faffa..be27d111fa8 100644 --- a/store/test-store/tests/graph/entity_cache.rs +++ b/store/test-store/tests/graph/entity_cache.rs @@ -1,3 +1,4 @@ +use async_trait::async_trait; use graph::blockchain::block_stream::FirehoseCursor; use graph::blockchain::BlockTime; use graph::components::store::{ @@ -54,19 +55,20 @@ impl MockStore { } } +#[async_trait] impl ReadStore for MockStore { - fn get(&self, key: &EntityKey) -> Result, StoreError> { + async fn get(&self, key: &EntityKey) -> Result, StoreError> { Ok(self.get_many_res.get(key).cloned()) } - fn get_many( + async fn get_many( &self, _keys: BTreeSet, ) -> Result, StoreError> { Ok(self.get_many_res.clone()) } - fn get_derived( + async fn get_derived( &self, _key: &DerivedEntityQuery, ) -> Result, StoreError> { @@ -113,7 +115,10 @@ impl WritableStore for MockStore { unimplemented!() } - fn unfail_non_deterministic_error(&self, _: &BlockPtr) -> Result { + async fn unfail_non_deterministic_error( + &self, + _: &BlockPtr, + ) -> Result { unimplemented!() } @@ -141,7 +146,7 @@ impl WritableStore for MockStore { unimplemented!() } - fn pause_subgraph(&self) -> Result<(), StoreError> { + async fn pause_subgraph(&self) -> Result<(), StoreError> { unimplemented!() } @@ -152,7 +157,7 @@ impl WritableStore for MockStore { unimplemented!() } - fn deployment_synced(&self, _block_ptr: BlockPtr) -> Result<(), StoreError> { + async fn deployment_synced(&self, _block_ptr: BlockPtr) -> Result<(), StoreError> { unimplemented!() } @@ -186,16 +191,16 @@ fn sort_by_entity_key(mut mods: Vec) -> Vec id: "sigurros", name: "Sigur Ros" }; let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone(), 0, None) + .await .unwrap(); mogwai_data.set_vid(100).unwrap(); sigurros_data.set_vid(101).unwrap(); - let result = cache.as_modifications(0); + let result = cache.as_modifications(0).await; assert_eq!( sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![ @@ -237,8 +244,8 @@ fn entity_version_map(entity_type: &str, entities: Vec) -> BTreeMap id: "sigurros", name: "Sigur Ros", founded: 1994}; let sigurros_key = make_band_key("sigurros"); cache .set(sigurros_key.clone(), sigurros_data.clone(), 0, None) + .await .unwrap(); mogwai_data.set_vid(100).unwrap(); sigurros_data.set_vid(101).unwrap(); - let result = cache.as_modifications(0); + let result = cache.as_modifications(0).await; assert_eq!( sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![ @@ -277,8 +286,8 @@ fn overwrite_modifications() { ); } -#[test] -fn consecutive_modifications() { +#[graph::test] +async fn consecutive_modifications() { // Pre-populate the store with data so that we can test setting a field to // `Value::Null`. let store = { @@ -295,16 +304,19 @@ fn consecutive_modifications() { let update_data = entity! { SCHEMA => id: "mogwai", founded: 1995, label: "Rock Action Records" }; let update_key = make_band_key("mogwai"); - cache.set(update_key, update_data, 0, None).unwrap(); + cache.set(update_key, update_data, 0, None).await.unwrap(); // Then, just reset the "label". let update_data = entity! { SCHEMA => id: "mogwai", label: Value::Null }; let update_key = make_band_key("mogwai"); - cache.set(update_key.clone(), update_data, 0, None).unwrap(); + cache + .set(update_key.clone(), update_data, 0, None) + .await + .unwrap(); // We expect a single overwrite modification for the above that leaves "id" // and "name" untouched, sets "founded" and removes the "label" field. - let result = cache.as_modifications(0); + let result = cache.as_modifications(0).await; assert_eq!( sort_by_entity_key(result.unwrap().modifications), sort_by_entity_key(vec![EntityModification::overwrite( @@ -315,8 +327,8 @@ fn consecutive_modifications() { ); } -#[test] -fn check_vid_sequence() { +#[graph::test] +async fn check_vid_sequence() { let store = MockStore::new(BTreeMap::new()); let store = Arc::new(store); let mut cache = EntityCache::new(store); @@ -328,10 +340,11 @@ fn check_vid_sequence() { let mogwai_data = entity! { SCHEMA => id: id, name: name }; cache .set(mogwai_key.clone(), mogwai_data.clone(), 0, None) + .await .unwrap(); } - let result = cache.as_modifications(0); + let result = cache.as_modifications(0).await; let mods = result.unwrap().modifications; for m in mods { match m { @@ -397,12 +410,6 @@ lazy_static! { static ref PURSE_TYPE: EntityType = LOAD_RELATED_SUBGRAPH.entity_type(PURSE).unwrap(); } -fn remove_test_data(store: Arc) { - store - .delete_all_entities_for_test_use_only() - .expect("deleting test entities succeeds"); -} - fn run_store_test(test: F) where F: FnOnce( @@ -418,7 +425,7 @@ where run_test_sequentially(|store| async move { let subgraph_store = store.subgraph_store(); // Reset state before starting - remove_test_data(subgraph_store.clone()); + remove_subgraphs().await; // Seed database with test data let deployment = insert_test_data(subgraph_store.clone()).await; @@ -469,6 +476,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator NETWORK_NAME.to_string(), SubgraphVersionSwitchingMode::Instant, ) + .await .unwrap(); // 1 account 3 wallets @@ -540,7 +548,7 @@ fn check_for_account_with_multiple_wallets() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_1 = create_wallet_entity("1", &account_id, 67_i32, 1); let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); @@ -560,7 +568,7 @@ fn check_for_account_with_single_wallet() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_1 = create_wallet_entity("4", &account_id, 32_i32, 4); let expeted_vec = vec![wallet_1]; @@ -578,7 +586,7 @@ fn check_for_account_with_no_wallet() { entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let expeted_vec = vec![]; assert_eq!(result, expeted_vec); @@ -595,7 +603,7 @@ fn check_for_account_that_doesnt_exist() { entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let expeted_vec = vec![]; assert_eq!(result, expeted_vec); @@ -612,7 +620,7 @@ fn check_for_non_existent_field() { entity_id: account_id, causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap_err(); + let result = cache.load_related(&request).await.unwrap_err(); let expected = format!( "Entity {}[{}]: unknown field `{}`", request.entity_type, request.entity_id, request.entity_field, @@ -644,7 +652,7 @@ fn check_for_insert_async_store() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_1 = create_wallet_entity("4", &account_id, 32_i32, 4); let wallet_2 = create_wallet_entity("5", &account_id, 79_i32, 12); let wallet_3 = create_wallet_entity("6", &account_id, 200_i32, 13); @@ -676,7 +684,7 @@ fn check_for_insert_async_not_related() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_1 = create_wallet_entity("1", &account_id, 67_i32, 1); let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); @@ -697,7 +705,7 @@ fn check_for_update_async_related() { EntityOperation::Set { ref data, .. } => data.clone(), _ => unreachable!(), }; - assert_ne!(writable.get(&entity_key).unwrap().unwrap(), new_data); + assert_ne!(writable.get(&entity_key).await.unwrap().unwrap(), new_data); // insert a new wallet transact_entity_operations( &store, @@ -714,7 +722,7 @@ fn check_for_update_async_related() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![new_data, wallet_2, wallet_3]; @@ -744,7 +752,7 @@ fn check_for_delete_async_related() { entity_id: account_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let result = cache.load_related(&request).unwrap(); + let result = cache.load_related(&request).await.unwrap(); let wallet_2 = create_wallet_entity("2", &account_id, 92_i32, 2); let wallet_3 = create_wallet_entity("3", &account_id, 192_i32, 3); let expeted_vec = vec![wallet_2, wallet_3]; @@ -764,37 +772,43 @@ fn scoped_get() { let account5 = ACCOUNT_TYPE.parse_id("5").unwrap(); let mut wallet5 = create_wallet_entity_no_vid("5", &account5, 100); let key5 = WALLET_TYPE.parse_key("5").unwrap(); - cache.set(key5.clone(), wallet5.clone(), 0, None).unwrap(); + cache + .set(key5.clone(), wallet5.clone(), 0, None) + .await + .unwrap(); wallet5.set_vid(100).unwrap(); // For the new entity, we can retrieve it with either scope - let act5 = cache.get(&key5, GetScope::InBlock).unwrap(); + let act5 = cache.get(&key5, GetScope::InBlock).await.unwrap(); assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); - let act5 = cache.get(&key5, GetScope::Store).unwrap(); + let act5 = cache.get(&key5, GetScope::Store).await.unwrap(); assert_eq!(Some(&wallet5), act5.as_ref().map(|e| e.as_ref())); let mut wallet1a = wallet1.clone(); wallet1a.set_vid(1).unwrap(); // For an entity in the store, we can not get it `InBlock` but with // `Store` - let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + let act1 = cache.get(&key1, GetScope::InBlock).await.unwrap(); assert_eq!(None, act1); - let act1 = cache.get(&key1, GetScope::Store).unwrap(); + let act1 = cache.get(&key1, GetScope::Store).await.unwrap(); assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); // Even after reading from the store, the entity is not visible with // `InBlock` - let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + let act1 = cache.get(&key1, GetScope::InBlock).await.unwrap(); assert_eq!(None, act1); // But if it gets updated, it becomes visible with either scope let mut wallet1 = wallet1; wallet1.set("balance", 70).unwrap(); - cache.set(key1.clone(), wallet1.clone(), 0, None).unwrap(); + cache + .set(key1.clone(), wallet1.clone(), 0, None) + .await + .unwrap(); wallet1a = wallet1; wallet1a.set_vid(101).unwrap(); - let act1 = cache.get(&key1, GetScope::InBlock).unwrap(); + let act1 = cache.get(&key1, GetScope::InBlock).await.unwrap(); assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); - let act1 = cache.get(&key1, GetScope::Store).unwrap(); + let act1 = cache.get(&key1, GetScope::Store).await.unwrap(); assert_eq!(Some(&wallet1a), act1.as_ref().map(|e| e.as_ref())); }) } @@ -814,10 +828,10 @@ fn no_internal_keys() { } let key = WALLET_TYPE.parse_key("1").unwrap(); - let wallet = writable.get(&key).unwrap().unwrap(); + let wallet = writable.get(&key).await.unwrap().unwrap(); check(&key, &wallet); - let wallet = cache.get(&key, GetScope::Store).unwrap().unwrap(); + let wallet = cache.get(&key, GetScope::Store).await.unwrap().unwrap(); check(&key, &wallet); }); } @@ -829,15 +843,15 @@ fn no_interface_mods() { // This should probably be an error, but changing that would not be // backwards compatible - assert_eq!(None, cache.get(&key, GetScope::InBlock).unwrap()); + assert_eq!(None, cache.get(&key, GetScope::InBlock).await.unwrap()); assert!(matches!( - cache.get(&key, GetScope::Store), + cache.get(&key, GetScope::Store).await, Err(StoreError::UnknownTable(_)) )); let entity = entity! { LOAD_RELATED_SUBGRAPH => id: "1", balance: 100 }; - cache.set(key, entity, 0, None).unwrap_err(); + cache.set(key, entity, 0, None).await.unwrap_err(); }) } diff --git a/store/test-store/tests/graphql/introspection.rs b/store/test-store/tests/graphql/introspection.rs index 4358621b2dc..6607a04be05 100644 --- a/store/test-store/tests/graphql/introspection.rs +++ b/store/test-store/tests/graphql/introspection.rs @@ -3,12 +3,13 @@ use std::io::Write; use std::sync::Arc; use std::time::Duration; +use async_trait::async_trait; use graph::components::store::QueryPermit; use graph::data::graphql::{object_value, ObjectOrInterface}; use graph::data::query::Trace; use graph::prelude::{ - async_trait, o, q, r, s, serde_json, slog, tokio, DeploymentHash, Logger, Query, - QueryExecutionError, QueryResult, + o, q, r, s, serde_json, slog, tokio, DeploymentHash, Logger, Query, QueryExecutionError, + QueryResult, }; use graph::schema::{ApiSchema, InputSchema}; @@ -25,7 +26,7 @@ pub struct MockResolver; impl Resolver for MockResolver { const CACHEABLE: bool = false; - fn prefetch( + async fn prefetch( &self, _: &ExecutionContext, _: &a::SelectionSet, @@ -256,7 +257,7 @@ fn maybe_save(data: &r::Value) { } } -#[tokio::test] +#[graph::test] async fn satisfies_graphiql_introspection_query_without_fragments() { let result = introspection_query( mock_schema(), @@ -508,7 +509,7 @@ async fn satisfies_graphiql_introspection_query_without_fragments() { assert!(same_value(&data, &expected_mock_schema_introspection())); } -#[tokio::test] +#[graph::test] async fn satisfies_graphiql_introspection_query_with_fragments() { let result = introspection_query( mock_schema(), @@ -818,7 +819,7 @@ type Parameter @entity { } "; -#[tokio::test] +#[graph::test] async fn successfully_runs_introspection_query_against_complex_schema() { let schema = api_schema(COMPLEX_SCHEMA, "complexschema"); @@ -923,7 +924,7 @@ async fn successfully_runs_introspection_query_against_complex_schema() { assert!(!result.has_errors(), "{:#?}", result); } -#[tokio::test] +#[graph::test] async fn introspection_possible_types() { let schema = api_schema(COMPLEX_SCHEMA, "complexschema"); diff --git a/store/test-store/tests/graphql/query.rs b/store/test-store/tests/graphql/query.rs index 5acf9754772..34f8a3fb935 100644 --- a/store/test-store/tests/graphql/query.rs +++ b/store/test-store/tests/graphql/query.rs @@ -192,7 +192,7 @@ async fn setup( if !STORE_CLEAN.load(Ordering::SeqCst) { let chain = CHAIN.iter().collect(); block_store::set_chain(chain, NETWORK_NAME).await; - test_store::remove_subgraphs(); + test_store::remove_subgraphs().await; STORE_CLEAN.store(true, Ordering::SeqCst); } } @@ -223,12 +223,12 @@ async fn setup( global_init().await; let id = DeploymentHash::new(id).unwrap(); - let loc = store.subgraph_store().active_locator(&id).unwrap(); + let loc = store.subgraph_store().active_locator(&id).await.unwrap(); match loc { Some(loc) if id_type.deployment_id() == loc.hash.as_str() => loc, Some(loc) => { - test_store::remove_subgraph(&loc.hash); + test_store::remove_subgraph(&loc.hash).await; initialize(store, id, features, id_type).await } None => initialize(store, id, features, id_type).await, @@ -456,6 +456,7 @@ async fn insert_test_entities( NETWORK_NAME.to_string(), SubgraphVersionSwitchingMode::Instant, ) + .await .unwrap(); let s = id_type.songs(); diff --git a/store/test-store/tests/postgres/aggregation.rs b/store/test-store/tests/postgres/aggregation.rs index b131cb4a323..b83e71cf39c 100644 --- a/store/test-store/tests/postgres/aggregation.rs +++ b/store/test-store/tests/postgres/aggregation.rs @@ -21,8 +21,10 @@ use graph::{ prelude::lazy_static, schema::InputSchema, }; -use graph_store_postgres::{Store as DieselStore, SubgraphStore}; -use test_store::{create_test_subgraph, run_test_sequentially, BLOCKS, LOGGER, METRICS_REGISTRY}; +use graph_store_postgres::Store as DieselStore; +use test_store::{ + create_test_subgraph, remove_subgraphs, run_test_sequentially, BLOCKS, LOGGER, METRICS_REGISTRY, +}; const SCHEMA: &str = r#" type Data @entity(timeseries: true) { @@ -63,12 +65,6 @@ lazy_static! { static ref TIMES: Vec = vec![minutes(30), minutes(40), minutes(65), minutes(120)]; } -fn remove_test_data(store: Arc) { - store - .delete_all_entities_for_test_use_only() - .expect("deleting test entities succeeds"); -} - pub async fn insert( store: &Arc, deployment: &DeploymentLocator, @@ -91,6 +87,7 @@ pub async fn insert( entity_cache.append(ops); let mods = entity_cache .as_modifications(block_ptr_to.number) + .await .expect("failed to convert to modifications") .modifications; let metrics_registry = METRICS_REGISTRY.clone(); @@ -206,8 +203,7 @@ struct TestEnv { } impl TestEnv { - #[track_caller] - fn all_entities(&self, entity_type: &str, block: BlockNumber) -> Vec { + async fn all_entities(&self, entity_type: &str, block: BlockNumber) -> Vec { let entity_type = self .writable .input_schema() @@ -221,6 +217,7 @@ impl TestEnv { self.store .subgraph_store() .find(query) + .await .expect("query succeeds") } } @@ -231,9 +228,8 @@ where R: Future + Send + 'static, { run_test_sequentially(|store| async move { - let subgraph_store = store.subgraph_store(); // Reset state before starting - remove_test_data(subgraph_store.clone()); + remove_subgraphs().await; // Seed database with test data let hash = DeploymentHash::new("rollupSubgraph").unwrap(); @@ -288,12 +284,12 @@ fn entity_diff(left: &[Entity], right: &[Entity]) -> Result::new(), x); let exp = stats_hour(&env.writable.input_schema()); for i in 0..4 { - let act = env.all_entities("Stats_hour", BLOCKS[i].number); + let act = env.all_entities("Stats_hour", BLOCKS[i].number).await; let diff = entity_diff(&exp[i], &act).unwrap(); if !diff.is_empty() { panic!("entities for BLOCKS[{}] differ:\n{}", i, diff); diff --git a/store/test-store/tests/postgres/chain_head.rs b/store/test-store/tests/postgres/chain_head.rs index acc42ad1ee7..98f1045de7a 100644 --- a/store/test-store/tests/postgres/chain_head.rs +++ b/store/test-store/tests/postgres/chain_head.rs @@ -1,7 +1,7 @@ //! Test ChainStore implementation of Store, in particular, how //! the chain head pointer gets updated in various situations -use diesel::RunQueryDsl; +use diesel_async::RunQueryDsl; use graph::blockchain::{BlockHash, BlockPtr}; use graph::data::store::ethereum::call; use graph::data::store::scalar::Bytes; @@ -43,7 +43,11 @@ where for name in &[NETWORK_NAME, FAKE_NETWORK_SHARED] { block_store::set_chain(chain.clone(), name).await; - let chain_store = store.block_store().chain_store(name).expect("chain store"); + let chain_store = store + .block_store() + .chain_store(name) + .await + .expect("chain store"); // Run test test(chain_store.cheap_clone(), store.cheap_clone()).unwrap_or_else(|err| { @@ -68,7 +72,11 @@ where for name in &[NETWORK_NAME, FAKE_NETWORK_SHARED] { let cached = block_store::set_chain(chain.clone(), name).await; - let chain_store = store.block_store().chain_store(name).expect("chain store"); + let chain_store = store + .block_store() + .chain_store(name) + .await + .expect("chain store"); // Run test test(chain_store.cheap_clone(), store.clone(), cached).await; @@ -259,40 +267,42 @@ fn block_hashes_by_number() { &*BLOCK_TWO, &*BLOCK_TWO_NO_PARENT, ]; - run_test(chain, move |store, _| { - let hashes = store.block_hashes_by_block_number(1).unwrap(); + run_test_async(chain, move |store, _, _| async move { + let hashes = store.block_hashes_by_block_number(1).await.unwrap(); assert_eq!(vec![BLOCK_ONE.block_hash()], hashes); - let hashes = store.block_hashes_by_block_number(2).unwrap(); + let hashes = store.block_hashes_by_block_number(2).await.unwrap(); assert_eq!(2, hashes.len()); assert!(hashes.contains(&BLOCK_TWO.block_hash())); assert!(hashes.contains(&BLOCK_TWO_NO_PARENT.block_hash())); - let hashes = store.block_hashes_by_block_number(127).unwrap(); + let hashes = store.block_hashes_by_block_number(127).await.unwrap(); assert_eq!(0, hashes.len()); let deleted = store .confirm_block_hash(1, &BLOCK_ONE.block_hash()) + .await .unwrap(); assert_eq!(0, deleted); let deleted = store .confirm_block_hash(2, &BLOCK_TWO.block_hash()) + .await .unwrap(); assert_eq!(1, deleted); // Make sure that we do not delete anything for a nonexistent block let deleted = store .confirm_block_hash(127, &GENESIS_BLOCK.block_hash()) + .await .unwrap(); assert_eq!(0, deleted); - let hashes = store.block_hashes_by_block_number(1).unwrap(); + let hashes = store.block_hashes_by_block_number(1).await.unwrap(); assert_eq!(vec![BLOCK_ONE.block_hash()], hashes); - let hashes = store.block_hashes_by_block_number(2).unwrap(); + let hashes = store.block_hashes_by_block_number(2).await.unwrap(); assert_eq!(vec![BLOCK_TWO.block_hash()], hashes); - Ok(()) }) } @@ -425,7 +435,7 @@ fn ancestor_block_skipped() { fn eth_call_cache() { let chain = vec![&*GENESIS_BLOCK, &*BLOCK_ONE, &*BLOCK_TWO]; - run_test(chain, |store, _| { + run_test_async(chain, |store, _, _| async move { let logger = LOGGER.cheap_clone(); fn ccr(value: &[u8]) -> call::Retval { call::Retval::Value(Bytes::from(value)) @@ -437,39 +447,48 @@ fn eth_call_cache() { let call = call::Request::new(address, call.to_vec(), 0); store + .cheap_clone() .set_call( &logger, call.cheap_clone(), BLOCK_ONE.block_ptr(), ccr(&return_value), ) + .await .unwrap(); - let ret = store.get_call(&call, GENESIS_BLOCK.block_ptr()).unwrap(); + let ret = store + .get_call(&call, GENESIS_BLOCK.block_ptr()) + .await + .unwrap(); assert!(ret.is_none()); let ret = store .get_call(&call, BLOCK_ONE.block_ptr()) + .await .unwrap() .unwrap() .retval .unwrap(); assert_eq!(&return_value, ret.as_slice()); - let ret = store.get_call(&call, BLOCK_TWO.block_ptr()).unwrap(); + let ret = store.get_call(&call, BLOCK_TWO.block_ptr()).await.unwrap(); assert!(ret.is_none()); let new_return_value: [u8; 3] = [10, 11, 12]; store + .cheap_clone() .set_call( &logger, call.cheap_clone(), BLOCK_TWO.block_ptr(), ccr(&new_return_value), ) + .await .unwrap(); let ret = store .get_call(&call, BLOCK_TWO.block_ptr()) + .await .unwrap() .unwrap() .retval @@ -478,30 +497,35 @@ fn eth_call_cache() { // Reverted calls should not be cached store + .cheap_clone() .set_call( &logger, call.cheap_clone(), BLOCK_THREE.block_ptr(), call::Retval::Null, ) + .await + .unwrap(); + let ret = store + .get_call(&call, BLOCK_THREE.block_ptr()) + .await .unwrap(); - let ret = store.get_call(&call, BLOCK_THREE.block_ptr()).unwrap(); assert_eq!(None, ret); // Empty return values should not be cached let return_value: [u8; 0] = []; store + .cheap_clone() .set_call( &logger, call.cheap_clone(), BLOCK_FOUR.block_ptr(), ccr(&return_value), ) + .await .unwrap(); - let ret = store.get_call(&call, BLOCK_FOUR.block_ptr()).unwrap(); + let ret = store.get_call(&call, BLOCK_FOUR.block_ptr()).await.unwrap(); assert_eq!(None, ret); - - Ok(()) }) } @@ -522,32 +546,39 @@ fn test_clear_stale_call_cache() { let call: [u8; 6] = [1, 2, 3, 4, 5, 6]; let return_value: [u8; 3] = [7, 8, 9]; - let mut conn = PRIMARY_POOL.get().unwrap(); - // Insert a call cache entry, otherwise it will hit an early return and won't test all queries let call = call::Request::new(address, call.to_vec(), 0); chain_store + .cheap_clone() .set_call( &logger, call.cheap_clone(), BLOCK_ONE.block_ptr(), call::Retval::Value(Bytes::from(return_value)), ) + .await .unwrap(); // Confirm the call cache entry is there - let ret = chain_store.get_call(&call, BLOCK_ONE.block_ptr()).unwrap(); + let ret = chain_store + .get_call(&call, BLOCK_ONE.block_ptr()) + .await + .unwrap(); assert!(ret.is_some()); // Now we need to update the accessed_at timestamp to be stale, so it gets deleted - // Get namespace from chains table - let namespace: String = diesel::sql_query(format!( - "SELECT namespace FROM public.chains WHERE name = '{}'", - chain_store.chain - )) - .get_result::(&mut conn) - .unwrap() - .namespace; + // Get namespace from chains table in the primary + let namespace: String = { + let mut conn = PRIMARY_POOL.get().await.unwrap(); + diesel::sql_query(format!( + "SELECT namespace FROM public.chains WHERE name = '{}'", + chain_store.chain + )) + .get_result::(&mut conn) + .await + .unwrap() + .namespace + }; // Determine the correct meta table name let meta_table: String = match namespace.as_str() { @@ -555,18 +586,26 @@ fn test_clear_stale_call_cache() { _ => format!("{namespace}.call_meta"), }; - // Update accessed_at to be 8 days ago, so it's stale for a 7 day threshold - let _ = diesel::sql_query(format!( - "UPDATE {meta_table} SET accessed_at = NOW() - INTERVAL '8 days' WHERE contract_address = $1" - )).bind::(address.as_bytes()) - .execute(&mut conn) - .unwrap(); - + // Update accessed_at to be 8 days ago, so it's stale for a 7 day + // threshold in the shard where the chain lives + { + let mut conn = chain_store.get_conn_for_test().await.unwrap(); + diesel::sql_query(format!( + "UPDATE {meta_table} SET accessed_at = NOW() - INTERVAL '8 days' WHERE contract_address = $1" + )) + .bind::(address.as_bytes()) + .execute(&mut conn) + .await + .unwrap(); + } let result = chain_store.clear_stale_call_cache(7, None).await; assert!(result.is_ok()); // Confirm the call cache entry was removed - let ret = chain_store.get_call(&call, BLOCK_ONE.block_ptr()).unwrap(); + let ret = chain_store + .get_call(&call, BLOCK_ONE.block_ptr()) + .await + .unwrap(); assert!(ret.is_none()); }); } diff --git a/store/test-store/tests/postgres/graft.rs b/store/test-store/tests/postgres/graft.rs index 6c7b4e28f55..6527f12756b 100644 --- a/store/test-store/tests/postgres/graft.rs +++ b/store/test-store/tests/postgres/graft.rs @@ -106,7 +106,7 @@ where let store = store.subgraph_store(); // Reset state before starting - remove_test_data(store.clone()); + remove_subgraphs().await; // Seed database with test data let deployment = insert_test_data(store.clone()).await; @@ -164,6 +164,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator "fake_network".to_string(), SubgraphVersionSwitchingMode::Instant, ) + .await .unwrap(); let test_entity_1 = create_test_entity( @@ -268,13 +269,6 @@ fn create_test_entity( } } -/// Removes test data from the database behind the store. -fn remove_test_data(store: Arc) { - store - .delete_all_entities_for_test_use_only() - .expect("deleting test entities succeeds"); -} - async fn create_grafted_subgraph( subgraph_id: &DeploymentHash, schema: &str, @@ -285,7 +279,7 @@ async fn create_grafted_subgraph( test_store::create_subgraph(subgraph_id, schema, base).await } -fn find_entities( +async fn find_entities( store: &DieselSubgraphStore, deployment: &DeploymentLocator, ) -> (Vec, Vec) { @@ -302,6 +296,7 @@ fn find_entities( let entities = store .find(query) + .await .expect("store.find failed to execute query"); let ids = entities @@ -315,7 +310,7 @@ async fn check_graft( store: Arc, deployment: DeploymentLocator, ) -> Result<(), StoreError> { - let (entities, ids) = find_entities(store.as_ref(), &deployment); + let (entities, ids) = find_entities(store.as_ref(), &deployment).await; let ids_str = ids.iter().map(|id| id.to_string()).collect::>(); assert_eq!(vec!["3", "1", "2"], ids_str); @@ -325,7 +320,7 @@ async fn check_graft( let mut shaq = entities.first().unwrap().clone(); assert_eq!(Some(&Value::from("queensha@email.com")), shaq.get("email")); - let schema = store.input_schema(&deployment.hash)?; + let schema = store.input_schema(&deployment.hash).await?; let user_type = schema.entity_type("User").unwrap(); // Make our own entries for block 2 @@ -410,7 +405,7 @@ fn graft() { .await .expect("grafting onto block 0 works"); - let (entities, ids) = find_entities(store.as_ref(), &deployment); + let (entities, ids) = find_entities(store.as_ref(), &deployment).await; let ids_str = ids.iter().map(|id| id.to_string()).collect::>(); assert_eq!(vec!["1"], ids_str); let shaq = entities.first().unwrap().clone(); @@ -419,11 +414,11 @@ fn graft() { }) } -fn other_shard( +async fn other_shard( store: &DieselSubgraphStore, src: &DeploymentLocator, ) -> Result, StoreError> { - let src_shard = store.shard(src)?; + let src_shard = store.shard(src).await?; match all_shards() .into_iter() @@ -443,14 +438,16 @@ fn other_shard( #[test] fn copy() { run_test(|store, src| async move { - if let Some(dst_shard) = other_shard(&store, &src)? { - let deployment = store.copy_deployment( - &src, - dst_shard, - NODE_ID.clone(), - BLOCKS[1].clone(), - OnSync::None, - )?; + if let Some(dst_shard) = other_shard(&store, &src).await? { + let deployment = store + .copy_deployment( + &src, + dst_shard, + NODE_ID.clone(), + BLOCKS[1].clone(), + OnSync::None, + ) + .await?; store .cheap_clone() @@ -459,7 +456,7 @@ fn copy() { .start_subgraph_deployment(&LOGGER) .await?; - store.activate(&deployment)?; + store.activate(&deployment).await?; check_graft(store, deployment).await?; } @@ -474,14 +471,10 @@ fn copy() { fn on_sync() { for on_sync in [OnSync::None, OnSync::Activate, OnSync::Replace] { run_test(move |store, src| async move { - if let Some(dst_shard) = other_shard(&store, &src)? { - let dst = store.copy_deployment( - &src, - dst_shard, - NODE_ID.clone(), - BLOCKS[1].clone(), - on_sync, - )?; + if let Some(dst_shard) = other_shard(&store, &src).await? { + let dst = store + .copy_deployment(&src, dst_shard, NODE_ID.clone(), BLOCKS[1].clone(), on_sync) + .await?; let writable = store .cheap_clone() @@ -489,13 +482,13 @@ fn on_sync() { .await?; writable.start_subgraph_deployment(&LOGGER).await?; - writable.deployment_synced(BLOCKS[0].clone())?; + writable.deployment_synced(BLOCKS[0].clone()).await?; - let mut primary = primary_connection(); - let src_site = primary.locate_site(src)?.unwrap(); - let src_node = primary.assigned_node(&src_site)?; - let dst_site = primary.locate_site(dst)?.unwrap(); - let dst_node = primary.assigned_node(&dst_site)?; + let mut primary = primary_connection().await; + let src_site = primary.locate_site(src).await?.unwrap(); + let src_node = primary.assigned_node(&src_site).await?; + let dst_site = primary.locate_site(dst).await?.unwrap(); + let dst_node = primary.assigned_node(&dst_site).await?; assert!(dst_node.is_some()); match on_sync { @@ -523,14 +516,16 @@ fn on_sync() { // Check that on_sync does not cause an error when the source of the // copy has vanished run_test(move |store, src| async move { - if let Some(dst_shard) = other_shard(&store, &src)? { - let dst = store.copy_deployment( - &src, - dst_shard, - NODE_ID.clone(), - BLOCKS[1].clone(), - OnSync::Replace, - )?; + if let Some(dst_shard) = other_shard(&store, &src).await? { + let dst = store + .copy_deployment( + &src, + dst_shard, + NODE_ID.clone(), + BLOCKS[1].clone(), + OnSync::Replace, + ) + .await?; let writable = store .cheap_clone() @@ -540,13 +535,13 @@ fn on_sync() { // Perform the copy writable.start_subgraph_deployment(&LOGGER).await?; - let mut primary = primary_connection(); - let src_site = primary.locate_site(src.clone())?.unwrap(); - primary.unassign_subgraph(&src_site)?; - store.activate(&dst)?; - store.remove_deployment(src.id.into())?; + let mut primary = primary_connection().await; + let src_site = primary.locate_site(src.clone()).await?.unwrap(); + primary.unassign_subgraph(&src_site).await?; + store.activate(&dst).await?; + store.remove_deployment(src.id.into()).await?; - let res = writable.deployment_synced(BLOCKS[2].clone()); + let res = writable.deployment_synced(BLOCKS[2].clone()).await; assert!(res.is_ok()); } Ok(()) @@ -558,7 +553,7 @@ fn prune() { struct Progress; impl PruneReporter for Progress {} - fn check_at_block( + async fn check_at_block( store: &DieselSubgraphStore, src: &DeploymentLocator, strategy: PruningStrategy, @@ -578,6 +573,7 @@ fn prune() { .collect::>(); let act: Vec<_> = store .find(query) + .await .unwrap() .into_iter() .map(|entity| entity.id()) @@ -592,10 +588,12 @@ fn prune() { run_test(move |store, src| async move { store .set_history_blocks(&src, -3, 10) + .await .expect_err("history_blocks can not be set to a negative number"); store .set_history_blocks(&src, 10, 10) + .await .expect_err("history_blocks must be bigger than reorg_threshold"); // Add another version for user 2 at block 4 @@ -663,10 +661,10 @@ fn prune() { // Check which versions exist at every block, even if they are // before the new earliest block, since we don't have a convenient // way to load all entity versions with their block range - check_at_block(&store, &src, strategy, 0, vec!["1"]); - check_at_block(&store, &src, strategy, 1, vec!["1", "2"]); + check_at_block(&store, &src, strategy, 0, vec!["1"]).await; + check_at_block(&store, &src, strategy, 1, vec!["1", "2"]).await; for block in 2..=5 { - check_at_block(&store, &src, strategy, block, vec!["1", "2", "3"]); + check_at_block(&store, &src, strategy, block, vec!["1", "2", "3"]).await; } Ok(()) }) diff --git a/store/test-store/tests/postgres/relational.rs b/store/test-store/tests/postgres/relational.rs index 5d01bd3c510..80971cbb42f 100644 --- a/store/test-store/tests/postgres/relational.rs +++ b/store/test-store/tests/postgres/relational.rs @@ -1,11 +1,10 @@ //! Test mapping of GraphQL schema to a relational schema -use diesel::connection::SimpleConnection as _; -use diesel::pg::PgConnection; +use diesel_async::SimpleAsyncConnection; use graph::components::store::write::{EntityModification, RowGroup}; use graph::data::store::scalar; use graph::entity; use graph::prelude::{ - o, slog, tokio, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, + o, slog, web3::types::H256, DeploymentHash, Entity, EntityCollection, EntityFilter, EntityOrder, EntityQuery, Logger, StopwatchMetrics, Value, ValueType, BLOCK_NUMBER_MAX, }; use graph::prelude::{BlockNumber, MetricsRegistry}; @@ -13,6 +12,7 @@ use graph::schema::{EntityKey, EntityType, InputSchema}; use graph_store_postgres::layout_for_tests::set_account_like; use graph_store_postgres::layout_for_tests::LayoutCache; use graph_store_postgres::layout_for_tests::SqlName; +use graph_store_postgres::AsyncPgConnection; use hex_literal::hex; use lazy_static::lazy_static; use std::collections::BTreeSet; @@ -240,14 +240,15 @@ lazy_static! { } /// Removes test data from the database behind the store. -fn remove_schema(conn: &mut PgConnection) { +async fn remove_schema(conn: &mut AsyncPgConnection) { let query = format!("drop schema if exists {} cascade", NAMESPACE.as_str()); conn.batch_execute(&query) + .await .expect("Failed to drop test schema"); } -fn insert_entity_at( - conn: &mut PgConnection, +async fn insert_entity_at( + conn: &mut AsyncPgConnection, layout: &Layout, entity_type: &EntityType, mut entities: Vec, @@ -269,24 +270,27 @@ fn insert_entity_at( entity_type, entities_with_keys ); let group = row_group_insert(&entity_type, block, entities_with_keys_owned.clone()); - layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); + layout + .insert(conn, &group, &MOCK_STOPWATCH) + .await + .expect(&errmsg); assert_eq!( group.entity_count_change(), entities_with_keys_owned.len() as i32 ); } -fn insert_entity( - conn: &mut PgConnection, +async fn insert_entity( + conn: &mut AsyncPgConnection, layout: &Layout, entity_type: &EntityType, entities: Vec, ) { - insert_entity_at(conn, layout, entity_type, entities, 0); + insert_entity_at(conn, layout, entity_type, entities, 0).await; } -fn update_entity_at( - conn: &mut PgConnection, +async fn update_entity_at( + conn: &mut AsyncPgConnection, layout: &Layout, entity_type: &EntityType, mut entities: Vec, @@ -309,12 +313,15 @@ fn update_entity_at( entity_type, entities_with_keys ); let group = row_group_update(&entity_type, block, entities_with_keys_owned.clone()); - let updated = layout.update(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); + let updated = layout + .update(conn, &group, &MOCK_STOPWATCH) + .await + .expect(&errmsg); assert_eq!(updated, entities_with_keys_owned.len()); } -fn insert_user_entity( - conn: &mut PgConnection, +async fn insert_user_entity( + conn: &mut AsyncPgConnection, layout: &Layout, id: &str, entity_type: &EntityType, @@ -343,7 +350,7 @@ fn insert_user_entity( vid, ); - insert_entity_at(conn, layout, entity_type, vec![user], block); + insert_entity_at(conn, layout, entity_type, vec![user], block).await; } fn make_user( @@ -382,7 +389,7 @@ fn make_user( user } -fn insert_users(conn: &mut PgConnection, layout: &Layout) { +async fn insert_users(conn: &mut AsyncPgConnection, layout: &Layout) { insert_user_entity( conn, layout, @@ -398,7 +405,8 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { 60, 0, 0, - ); + ) + .await; insert_user_entity( conn, layout, @@ -414,7 +422,8 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { 50, 0, 1, - ); + ) + .await; insert_user_entity( conn, layout, @@ -430,11 +439,12 @@ fn insert_users(conn: &mut PgConnection, layout: &Layout) { 22, 0, 2, - ); + ) + .await; } -fn update_user_entity( - conn: &mut PgConnection, +async fn update_user_entity( + conn: &mut AsyncPgConnection, layout: &Layout, id: &str, entity_type: &EntityType, @@ -462,11 +472,11 @@ fn update_user_entity( visits, vid, ); - update_entity_at(conn, layout, entity_type, vec![user], block); + update_entity_at(conn, layout, entity_type, vec![user], block).await; } -fn insert_pet( - conn: &mut PgConnection, +async fn insert_pet( + conn: &mut AsyncPgConnection, layout: &Layout, entity_type: &EntityType, id: &str, @@ -479,15 +489,15 @@ fn insert_pet( name: name, vid: vid, }; - insert_entity_at(conn, layout, entity_type, vec![pet], block); + insert_entity_at(conn, layout, entity_type, vec![pet], block).await; } -fn insert_pets(conn: &mut PgConnection, layout: &Layout) { - insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0, 0); - insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0, 1); +async fn insert_pets(conn: &mut AsyncPgConnection, layout: &Layout) { + insert_pet(conn, layout, &*DOG_TYPE, "pluto", "Pluto", 0, 0).await; + insert_pet(conn, layout, &*CAT_TYPE, "garfield", "Garfield", 0, 1).await; } -fn create_schema(conn: &mut PgConnection) -> Layout { +async fn create_schema(conn: &mut AsyncPgConnection) -> Layout { let schema = InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); let site = make_dummy_site( THINGS_SUBGRAPH_ID.clone(), @@ -495,9 +505,10 @@ fn create_schema(conn: &mut PgConnection) -> Layout { NETWORK_NAME.to_string(), ); let query = format!("create schema {}", NAMESPACE.as_str()); - conn.batch_execute(&query).unwrap(); + conn.batch_execute(&query).await.unwrap(); Layout::create_relational_schema(conn, Arc::new(site), &schema, BTreeSet::new(), None) + .await .expect("Failed to create relational schema") } @@ -540,26 +551,27 @@ macro_rules! assert_entity_eq { } /// Test harness for running database integration tests. -fn run_test(test: F) +async fn run_test(test: F) where - F: FnOnce(&mut PgConnection, &Layout), + F: AsyncFnOnce(&mut AsyncPgConnection, &Layout), { - run_test_with_conn(|conn| { + run_test_with_conn(async |conn| { // Reset state before starting - remove_schema(conn); + remove_schema(conn).await; // Create the database schema - let layout = create_schema(conn); + let layout = create_schema(conn).await; // Run test - test(conn, &layout); - }); + test(conn, &layout).await; + }) + .await; } -#[test] -fn find() { - run_test(|conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); +#[graph::test] +async fn find() { + run_test(async |conn, layout| { + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Happy path: find existing entity let entity = layout @@ -568,6 +580,7 @@ fn find() { &SCALAR_TYPE.parse_key("one").unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&SCALAR_ENTITY), entity); @@ -579,20 +592,23 @@ fn find() { &SCALAR_TYPE.parse_key("noone").unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read Scalar[noone]"); assert!(entity.is_none()); - }); + }) + .await; } -#[test] -fn insert_null_fulltext_fields() { - run_test(|conn, layout| { +#[graph::test] +async fn insert_null_fulltext_fields() { + run_test(async |conn, layout| { insert_entity( conn, layout, &*NULLABLE_STRINGS_TYPE, vec![EMPTY_NULLABLESTRINGS_ENTITY.clone()], - ); + ) + .await; // Find entity with null string values let entity = layout @@ -601,16 +617,18 @@ fn insert_null_fulltext_fields() { &NULLABLE_STRINGS_TYPE.parse_key("one").unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read NullableStrings[one]") .unwrap(); assert_entity_eq!(scrub(&EMPTY_NULLABLESTRINGS_ENTITY), entity); - }); + }) + .await; } -#[test] -fn update() { - run_test(|conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); +#[graph::test] +async fn update() { + run_test(async |conn, layout| { + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -625,6 +643,7 @@ fn update() { let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to update"); let actual = layout @@ -633,15 +652,17 @@ fn update() { &SCALAR_TYPE.parse_key("one").unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(scrub(&entity), actual); - }); + }) + .await; } -#[test] -fn update_many() { - run_test(|conn, layout| { +#[graph::test] +async fn update_many() { + run_test(async |conn, layout| { let mut one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); @@ -654,10 +675,11 @@ fn update_many() { layout, &*SCALAR_TYPE, vec![one.clone(), two.clone(), three.clone()], - ); + ) + .await; // confidence test: there should be 3 scalar entities in store right now - assert_eq!(3, count_scalar_entities(conn, layout)); + assert_eq!(3, count_scalar_entities(conn, layout).await); // update with overwrite one.set("string", "updated").unwrap(); @@ -686,18 +708,21 @@ fn update_many() { let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to update"); // check updates took effect - let updated: Vec = ["one", "two", "three"] - .iter() - .map(|&id| { - layout - .find(conn, &SCALAR_TYPE.parse_key(id).unwrap(), BLOCK_NUMBER_MAX) - .unwrap_or_else(|_| panic!("Failed to read Scalar[{}]", id)) - .unwrap() - }) - .collect(); + let mut updated: Vec = Vec::new(); + for id in &["one", "two", "three"] { + let entity = layout + .find(conn, &SCALAR_TYPE.parse_key(*id).unwrap(), BLOCK_NUMBER_MAX) + .await + .unwrap_or_else(|_| panic!("Failed to read Scalar[{}]", id)) + .unwrap(); + updated.push(entity); + } + let updated = updated; + let new_one = &updated[0]; let new_two = &updated[1]; let new_three = &updated[2]; @@ -729,14 +754,15 @@ fn update_many() { new_three.get("color"), Some(&Value::String("red".to_string())) ); - }); + }) + .await; } /// Test that we properly handle BigDecimal values with a negative scale. -#[test] -fn serialize_bigdecimal() { - run_test(|conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); +#[graph::test] +async fn serialize_bigdecimal() { + run_test(async |conn, layout| { + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; // Update with overwrite let mut entity = SCALAR_ENTITY.clone(); @@ -754,6 +780,7 @@ fn serialize_bigdecimal() { let group = row_group_update(&entity_type, 0, entities); layout .update(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to update"); let actual = layout @@ -762,19 +789,21 @@ fn serialize_bigdecimal() { &SCALAR_TYPE.parse_key("one").unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read Scalar[one]") .unwrap(); assert_entity_eq!(entity, actual); } - }); + }) + .await; } -#[test] -fn enum_arrays() { +#[graph::test] +async fn enum_arrays() { // We had an issue where we would read an array of enums back as a // single string; for this test, we would get back the string // "{yellow,red,BLUE}" instead of the array ["yellow", "red", "BLUE"] - run_test(|conn, layout| { + run_test(async |conn, layout| { let spectrum = entity! { THINGS_SCHEMA => id: "rainbow", main: "yellow", @@ -787,7 +816,8 @@ fn enum_arrays() { layout, &THINGS_SCHEMA.entity_type("Spectrum").unwrap(), vec![spectrum.clone()], - ); + ) + .await; let actual = layout .find( @@ -799,13 +829,15 @@ fn enum_arrays() { .unwrap(), BLOCK_NUMBER_MAX, ) + .await .expect("Failed to read Spectrum[rainbow]") .unwrap(); assert_entity_eq!(spectrum, actual); - }); + }) + .await } -fn count_scalar_entities(conn: &mut PgConnection, layout: &Layout) -> usize { +async fn count_scalar_entities(conn: &mut AsyncPgConnection, layout: &Layout) -> usize { let filter = EntityFilter::Or(vec![ EntityFilter::Equal("bool".into(), true.into()), EntityFilter::Equal("bool".into(), false.into()), @@ -816,19 +848,20 @@ fn count_scalar_entities(conn: &mut PgConnection, layout: &Layout) -> usize { query.range.first = None; layout .query::(&LOGGER, conn, query) + .await .map(|(entities, _)| entities) .expect("Count query failed") .len() } -#[test] -fn delete() { - run_test(|conn, layout| { - insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]); +#[graph::test] +async fn delete() { + run_test(async |conn, layout| { + insert_entity(conn, layout, &*SCALAR_TYPE, vec![SCALAR_ENTITY.clone()]).await; let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![two]).await; // Delete where nothing is getting deleted let key = SCALAR_TYPE.parse_key("no such entity").unwrap(); @@ -837,9 +870,10 @@ fn delete() { let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to delete"); assert_eq!(0, count); - assert_eq!(2, count_scalar_entities(conn, layout)); + assert_eq!(2, count_scalar_entities(conn, layout).await); // Delete entity two entity_keys @@ -850,15 +884,17 @@ fn delete() { let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout .delete(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to delete"); assert_eq!(1, count); - assert_eq!(1, count_scalar_entities(conn, layout)); - }); + assert_eq!(1, count_scalar_entities(conn, layout).await); + }) + .await; } -#[test] -fn insert_many_and_delete_many() { - run_test(|conn, layout| { +#[graph::test] +async fn insert_many_and_delete_many() { + run_test(async |conn, layout| { let one = SCALAR_ENTITY.clone(); let mut two = SCALAR_ENTITY.clone(); two.set("id", "two").unwrap(); @@ -866,10 +902,10 @@ fn insert_many_and_delete_many() { let mut three = SCALAR_ENTITY.clone(); three.set("id", "three").unwrap(); three.set("vid", 2i64).unwrap(); - insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]); + insert_entity(conn, layout, &*SCALAR_TYPE, vec![one, two, three]).await; // confidence test: there should be 3 scalar entities in store right now - assert_eq!(3, count_scalar_entities(conn, layout)); + assert_eq!(3, count_scalar_entities(conn, layout).await); // Delete entities with ids equal to "two" and "three" let entity_keys: Vec<_> = vec!["two", "three"] @@ -879,68 +915,73 @@ fn insert_many_and_delete_many() { let group = row_group_delete(&*SCALAR_TYPE, 1, entity_keys); let num_removed = layout .delete(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to delete"); assert_eq!(2, num_removed); - assert_eq!(1, count_scalar_entities(conn, layout)); - }); + assert_eq!(1, count_scalar_entities(conn, layout).await); + }) + .await; } -#[tokio::test] +#[graph::test] async fn layout_cache() { - // We need to use `block_on` to call the `create_test_subgraph` function which must be called - // from a sync context, so we replicate what we do `spawn_module`. - let runtime = tokio::runtime::Handle::current(); - std::thread::spawn(move || { - run_test_with_conn(|conn| { - let _runtime_guard = runtime.enter(); - - let id = DeploymentHash::new("primaryLayoutCache").unwrap(); - let _loc = graph::block_on(create_test_subgraph(&id, THINGS_GQL)); - let site = Arc::new(primary_mirror().find_active_site(&id).unwrap().unwrap()); - let table_name = SqlName::verbatim("scalar".to_string()); - - let cache = LayoutCache::new(Duration::from_millis(10)); - - // Without an entry, account_like is false - let layout = cache - .get(&LOGGER, conn, site.clone()) - .expect("we can get the layout"); - let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); - - set_account_like(conn, site.as_ref(), &table_name, true) - .expect("we can set 'scalar' to account-like"); - sleep(Duration::from_millis(50)); - - // Flip account_like to true - let layout = cache - .get(&LOGGER, conn, site.clone()) - .expect("we can get the layout"); - let table = layout.table(&table_name).unwrap(); - assert_eq!(true, table.is_account_like); - - // Set it back to false - set_account_like(conn, site.as_ref(), &table_name, false) - .expect("we can set 'scalar' to account-like"); - sleep(Duration::from_millis(50)); - - let layout = cache - .get(&LOGGER, conn, site) - .expect("we can get the layout"); - let table = layout.table(&table_name).unwrap(); - assert_eq!(false, table.is_account_like); - }) + run_test_with_conn(async |conn| { + let id = DeploymentHash::new("primaryLayoutCache").unwrap(); + let _loc = create_test_subgraph(&id, THINGS_GQL).await; + let site = Arc::new( + primary_mirror() + .find_active_site(&id) + .await + .unwrap() + .unwrap(), + ); + let table_name = SqlName::verbatim("scalar".to_string()); + + let cache = LayoutCache::new(Duration::from_millis(10)); + + // Without an entry, account_like is false + let layout = cache + .get(&LOGGER, conn, site.clone()) + .await + .expect("we can get the layout"); + let table = layout.table(&table_name).unwrap(); + assert_eq!(false, table.is_account_like); + + set_account_like(conn, site.as_ref(), &table_name, true) + .await + .expect("we can set 'scalar' to account-like"); + sleep(Duration::from_millis(50)); + + // Flip account_like to true + let layout = cache + .get(&LOGGER, conn, site.clone()) + .await + .expect("we can get the layout"); + let table = layout.table(&table_name).unwrap(); + assert_eq!(true, table.is_account_like); + + // Set it back to false + set_account_like(conn, site.as_ref(), &table_name, false) + .await + .expect("we can set 'scalar' to account-like"); + sleep(Duration::from_millis(50)); + + let layout = cache + .get(&LOGGER, conn, site) + .await + .expect("we can get the layout"); + let table = layout.table(&table_name).unwrap(); + assert_eq!(false, table.is_account_like); }) - .join() - .unwrap(); + .await; } -#[test] -fn conflicting_entity() { +#[graph::test] +async fn conflicting_entity() { // `id` is the id of an entity to create, `cat`, `dog`, and `ferret` are // the names of the types for which to check entity uniqueness - fn check( - conn: &mut PgConnection, + async fn check( + conn: &mut AsyncPgConnection, layout: &Layout, id: Value, cat: &str, @@ -948,95 +989,102 @@ fn conflicting_entity() { ferret: &str, vid: i64, ) { - let conflicting = - |conn: &mut PgConnection, entity_type: &EntityType, types: Vec<&EntityType>| { - let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone() }; - let fred = Arc::new(fred); - let types: Vec<_> = types.into_iter().cloned().collect(); - let mut group = RowGroup::new(entity_type.clone(), false); - group - .push( - EntityModification::Insert { - key: entity_type.key(fred.id()), - data: fred, - block: 2, - end: None, - }, - 2, - ) - .unwrap(); - layout.conflicting_entities(conn, &types, &group) - }; + let conflicting = async |conn: &mut AsyncPgConnection, + entity_type: &EntityType, + types: Vec<&EntityType>| { + let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone() }; + let fred = Arc::new(fred); + let types: Vec<_> = types.into_iter().cloned().collect(); + let mut group = RowGroup::new(entity_type.clone(), false); + group + .push( + EntityModification::Insert { + key: entity_type.key(fred.id()), + data: fred, + block: 2, + end: None, + }, + 2, + ) + .unwrap(); + layout.conflicting_entities(conn, &types, &group).await + }; let cat_type = layout.input_schema.entity_type(cat).unwrap(); let dog_type = layout.input_schema.entity_type(dog).unwrap(); let ferret_type = layout.input_schema.entity_type(ferret).unwrap(); let fred = entity! { layout.input_schema => id: id.clone(), name: id.clone(), vid: vid }; - insert_entity(conn, layout, &cat_type, vec![fred]); + insert_entity(conn, layout, &cat_type, vec![fred]).await; // If we wanted to create Fred the dog, which is forbidden, we'd run this: - let conflict = conflicting(conn, &dog_type, vec![&cat_type, &ferret_type]).unwrap(); + let conflict = conflicting(conn, &dog_type, vec![&cat_type, &ferret_type]) + .await + .unwrap(); assert_eq!(Some(cat.to_string()), conflict.map(|r| r.0)); // If we wanted to manipulate Fred the cat, which is ok, we'd run: - let conflict = conflicting(conn, &cat_type, vec![&dog_type, &ferret_type]).unwrap(); + let conflict = conflicting(conn, &cat_type, vec![&dog_type, &ferret_type]) + .await + .unwrap(); assert_eq!(None, conflict); } - run_test(|mut conn, layout| { + run_test(async |mut conn, layout| { let id = Value::String("fred".to_string()); - check(&mut conn, layout, id, "Cat", "Dog", "Ferret", 0); + check(&mut conn, layout, id, "Cat", "Dog", "Ferret", 0).await; let id = Value::Bytes(scalar::Bytes::from_str("0xf1ed").unwrap()); - check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1); + check(&mut conn, layout, id, "ByteCat", "ByteDog", "ByteFerret", 1).await; }) + .await } -#[test] -fn revert_block() { - fn check_fred(conn: &mut PgConnection, layout: &Layout) { +#[graph::test] +async fn revert_block() { + async fn check_fred(conn: &mut AsyncPgConnection, layout: &Layout) { let id = "fred"; - let set_fred = |conn: &mut PgConnection, name, block| { + let set_fred = async |conn: &mut AsyncPgConnection, name, block| { let fred = entity! { layout.input_schema => id: id, name: name, vid: block as i64, }; if block == 0 { - insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block); + insert_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; } else { - update_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block); + update_entity_at(conn, layout, &*CAT_TYPE, vec![fred], block).await; } }; - let assert_fred = |conn: &mut PgConnection, name: &str| { + let assert_fred = async |conn: &mut AsyncPgConnection, name: &str| { let fred = layout .find(conn, &CAT_TYPE.parse_key(id).unwrap(), BLOCK_NUMBER_MAX) + .await .unwrap() .expect("there's a fred"); assert_eq!(name, fred.get("name").unwrap().as_str().unwrap()) }; - set_fred(conn, "zero", 0); - set_fred(conn, "one", 1); - set_fred(conn, "two", 2); - set_fred(conn, "three", 3); + set_fred(conn, "zero", 0).await; + set_fred(conn, "one", 1).await; + set_fred(conn, "two", 2).await; + set_fred(conn, "three", 3).await; - layout.revert_block(conn, 3).unwrap(); - assert_fred(conn, "two"); - layout.revert_block(conn, 2).unwrap(); - assert_fred(conn, "one"); + layout.revert_block(conn, 3).await.unwrap(); + assert_fred(conn, "two").await; + layout.revert_block(conn, 2).await.unwrap(); + assert_fred(conn, "one").await; - set_fred(conn, "three", 3); - assert_fred(conn, "three"); - layout.revert_block(conn, 3).unwrap(); - assert_fred(conn, "one"); + set_fred(conn, "three", 3).await; + assert_fred(conn, "three").await; + layout.revert_block(conn, 3).await.unwrap(); + assert_fred(conn, "one").await; } - fn check_marty(conn: &mut PgConnection, layout: &Layout) { - let set_marties = |conn: &mut PgConnection, from, to| { + async fn check_marty(conn: &mut AsyncPgConnection, layout: &Layout) { + let set_marties = async |conn: &mut AsyncPgConnection, from, to| { for block in from..=to { let id = format!("marty-{}", block); let marty = entity! { layout.input_schema => @@ -1044,11 +1092,13 @@ fn revert_block() { order: block, vid: (block + 10) as i64 }; - insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block); + insert_entity_at(conn, layout, &*MINK_TYPE, vec![marty], block).await; } }; - let assert_marties = |conn: &mut PgConnection, max_block, except: Vec| { + let assert_marties = async |conn: &mut AsyncPgConnection, + max_block, + except: Vec| { let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); let collection = EntityCollection::All(vec![(MINK_TYPE.clone(), AttributeNames::All)]); let filter = EntityFilter::StartsWith("id".to_string(), Value::from("marty")); @@ -1058,6 +1108,7 @@ fn revert_block() { .order(EntityOrder::Ascending("order".to_string(), ValueType::Int)); let marties: Vec = layout .query(&LOGGER, conn, query) + .await .map(|(entities, _)| entities) .expect("loading all marties works"); @@ -1074,39 +1125,41 @@ fn revert_block() { } }; - let assert_all_marties = - |conn: &mut PgConnection, max_block| assert_marties(conn, max_block, vec![]); + let assert_all_marties = async |conn: &mut AsyncPgConnection, max_block| { + assert_marties(conn, max_block, vec![]).await + }; - set_marties(conn, 0, 4); - assert_all_marties(conn, 4); + set_marties(conn, 0, 4).await; + assert_all_marties(conn, 4).await; - layout.revert_block(conn, 3).unwrap(); - assert_all_marties(conn, 2); - layout.revert_block(conn, 2).unwrap(); - assert_all_marties(conn, 1); + layout.revert_block(conn, 3).await.unwrap(); + assert_all_marties(conn, 2).await; + layout.revert_block(conn, 2).await.unwrap(); + assert_all_marties(conn, 1).await; - set_marties(conn, 4, 4); + set_marties(conn, 4, 4).await; // We don't have entries for 2 and 3 anymore - assert_marties(conn, 4, vec![2, 3]); + assert_marties(conn, 4, vec![2, 3]).await; - layout.revert_block(conn, 2).unwrap(); - assert_all_marties(conn, 1); + layout.revert_block(conn, 2).await.unwrap(); + assert_all_marties(conn, 1).await; } - run_test(|conn, layout| { - check_fred(conn, layout); - check_marty(conn, layout); - }); + run_test(async |conn, layout| { + check_fred(conn, layout).await; + check_marty(conn, layout).await; + }) + .await; } struct QueryChecker<'a> { - conn: &'a mut PgConnection, + conn: &'a mut AsyncPgConnection, layout: &'a Layout, } impl<'a> QueryChecker<'a> { - fn new(conn: &'a mut PgConnection, layout: &'a Layout) -> Self { - insert_users(conn, layout); + async fn new(conn: &'a mut AsyncPgConnection, layout: &'a Layout) -> Self { + insert_users(conn, layout).await; update_user_entity( conn, layout, @@ -1122,19 +1175,21 @@ impl<'a> QueryChecker<'a> { 23, 0, 3, - ); - insert_pets(conn, layout); + ) + .await; + insert_pets(conn, layout).await; Self { conn, layout } } - fn check(self, expected_entity_ids: Vec<&'static str>, mut query: EntityQuery) -> Self { + async fn check(self, expected_entity_ids: Vec<&'static str>, mut query: EntityQuery) -> Self { let q = query.clone(); let unordered = matches!(query.order, EntityOrder::Unordered); query.block = BLOCK_NUMBER_MAX; let entities = self .layout .query::(&LOGGER, self.conn, query) + .await .expect("layout.query failed to execute query") .0; @@ -1199,26 +1254,30 @@ impl EasyOrder for EntityQuery { } } -#[test] +#[graph::test] #[should_panic( expected = "layout.query failed to execute query: FulltextQueryInvalidSyntax(\"syntax error in tsquery: \\\"Jono 'a\\\"\")" )] -fn check_fulltext_search_syntax_error() { - run_test(move |mut conn, layout| { - QueryChecker::new(&mut conn, layout).check( - vec!["1"], - user_query().filter(EntityFilter::Fulltext( - "userSearch".into(), - "Jono 'a".into(), - )), - ); - }); +async fn check_fulltext_search_syntax_error() { + run_test(async |mut conn, layout| { + QueryChecker::new(&mut conn, layout) + .await + .check( + vec!["1"], + user_query().filter(EntityFilter::Fulltext( + "userSearch".into(), + "Jono 'a".into(), + )), + ) + .await; + }) + .await; } -#[test] -fn check_block_finds() { - run_test(move |mut conn, layout| { - let checker = QueryChecker::new(&mut conn, layout); +#[graph::test] +async fn check_block_finds() { + run_test(async |mut conn, layout| { + let checker = QueryChecker::new(&mut conn, layout).await; update_user_entity( checker.conn, @@ -1235,7 +1294,8 @@ fn check_block_finds() { 55, 1, 4, - ); + ) + .await; checker // Max block, we should get nothing @@ -1243,36 +1303,47 @@ fn check_block_finds() { vec![], user_query().filter(EntityFilter::ChangeBlockGte(BLOCK_NUMBER_MAX)), ) + .await // Initial block, we should get here all data .check( vec!["1", "2", "3"], user_query().filter(EntityFilter::ChangeBlockGte(0)), ) + .await // Block with an update, we should have one only .check( vec!["1"], user_query().filter(EntityFilter::ChangeBlockGte(1)), - ); - }); + ) + .await; + }) + .await; } -#[test] -fn check_find() { - run_test(move |mut conn, layout| { +#[graph::test] +async fn check_find() { + run_test(async |mut conn, layout| { // find with interfaces let types = vec![&*CAT_TYPE, &*DOG_TYPE]; let checker = QueryChecker::new(&mut conn, layout) + .await .check(vec!["garfield", "pluto"], query(&types)) + .await .check(vec!["pluto", "garfield"], query(&types).desc("name")) + .await .check( vec!["garfield"], query(&types) .filter(EntityFilter::StartsWith("name".into(), Value::from("Gar"))) .desc("name"), ) + .await .check(vec!["pluto", "garfield"], query(&types).desc("id")) + .await .check(vec!["garfield", "pluto"], query(&types).asc("id")) - .check(vec!["garfield", "pluto"], query(&types).unordered()); + .await + .check(vec!["garfield", "pluto"], query(&types).unordered()) + .await; // fulltext let checker = checker @@ -1280,13 +1351,15 @@ fn check_find() { vec!["3"], user_query().filter(EntityFilter::Fulltext("userSearch".into(), "Shaq:*".into())), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::Fulltext( "userSearch".into(), "Jono & achangedemail@email.com".into(), )), - ); + ) + .await; // Test with a second fulltext search; we had a bug that caused only // one search index to be populated (see issue #4794) let checker = checker @@ -1297,13 +1370,15 @@ fn check_find() { "Shaq:*".into(), )), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::Fulltext( "userSearch2".into(), "Jono & achangedemail@email.com".into(), )), - ); + ) + .await; // list contains fn drinks_query(v: Vec<&str>) -> EntityQuery { @@ -1313,11 +1388,16 @@ fn check_find() { let checker = checker .check(vec!["2"], drinks_query(vec!["beer"])) + .await // Reverse of how we stored it .check(vec!["3"], drinks_query(vec!["tea", "coffee"])) + .await .check(vec![], drinks_query(vec!["beer", "tea"])) + .await .check(vec![], drinks_query(vec!["beer", "water"])) - .check(vec![], drinks_query(vec!["beer", "wine", "water"])); + .await + .check(vec![], drinks_query(vec!["beer", "wine", "water"])) + .await; // list not contains let checker = checker @@ -1329,6 +1409,7 @@ fn check_find() { vec!["beer"].into(), )), ) + .await // Users 2 do not have "tea" on its drinks list. .check( vec!["2"], @@ -1336,7 +1417,8 @@ fn check_find() { "drinks".into(), vec!["tea"].into(), )), - ); + ) + .await; // string attributes let checker = checker @@ -1344,10 +1426,12 @@ fn check_find() { vec!["2"], user_query().filter(EntityFilter::Contains("name".into(), "ind".into())), ) + .await .check( vec!["2"], user_query().filter(EntityFilter::Equal("name".to_owned(), "Cindini".into())), ) + .await // Test that we can order by id .check( vec!["2"], @@ -1355,28 +1439,33 @@ fn check_find() { .filter(EntityFilter::Equal("name".to_owned(), "Cindini".into())) .desc("id"), ) + .await .check( vec!["1", "3"], user_query() .filter(EntityFilter::Not("name".to_owned(), "Cindini".into())) .asc("name"), ) + .await .check( vec!["3"], user_query().filter(EntityFilter::GreaterThan("name".to_owned(), "Kundi".into())), ) + .await .check( vec!["2", "1"], user_query() .filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into())) .asc("name"), ) + .await .check( vec!["1", "2"], user_query() .filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into())) .desc("name"), ) + .await .check( vec!["1"], user_query() @@ -1385,6 +1474,7 @@ fn check_find() { .first(1) .skip(1), ) + .await .check( vec!["2"], user_query() @@ -1394,18 +1484,21 @@ fn check_find() { ])) .desc("name"), ) + .await .check( vec!["2"], user_query() .filter(EntityFilter::EndsWith("name".to_owned(), "ini".into())) .desc("name"), ) + .await .check( vec!["3", "1"], user_query() .filter(EntityFilter::NotEndsWith("name".to_owned(), "ini".into())) .desc("name"), ) + .await .check( vec!["1"], user_query() @@ -1415,10 +1508,12 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec![], user_query().filter(EntityFilter::In("name".to_owned(), vec![])), ) + .await .check( vec!["1", "2"], user_query() @@ -1427,7 +1522,8 @@ fn check_find() { vec!["Shaqueeena".into()], )) .desc("name"), - ); + ) + .await; // float attributes let checker = checker .check( @@ -1437,6 +1533,7 @@ fn check_find() { Value::BigDecimal(184.4.into()), )), ) + .await .check( vec!["3", "2"], user_query() @@ -1446,6 +1543,7 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::GreaterThan( @@ -1453,6 +1551,7 @@ fn check_find() { Value::BigDecimal(160.0.into()), )), ) + .await .check( vec!["2", "3"], user_query() @@ -1462,6 +1561,7 @@ fn check_find() { )) .asc("name"), ) + .await .check( vec!["3", "2"], user_query() @@ -1471,6 +1571,7 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["2"], user_query() @@ -1482,6 +1583,7 @@ fn check_find() { .first(1) .skip(1), ) + .await .check( vec!["3", "1"], user_query() @@ -1495,6 +1597,7 @@ fn check_find() { .desc("name") .first(5), ) + .await .check( vec!["2"], user_query() @@ -1507,15 +1610,18 @@ fn check_find() { )) .desc("name") .first(5), - ); + ) + .await; // int 8 attributes - let checker = checker.check( - vec!["3"], - user_query() - .filter(EntityFilter::Equal("visits".to_owned(), Value::Int(22_i32))) - .desc("name"), - ); + let checker = checker + .check( + vec!["3"], + user_query() + .filter(EntityFilter::Equal("visits".to_owned(), Value::Int(22_i32))) + .desc("name"), + ) + .await; // int attributes let checker = checker @@ -1525,12 +1631,14 @@ fn check_find() { .filter(EntityFilter::Equal("age".to_owned(), Value::Int(67_i32))) .desc("name"), ) + .await .check( vec!["3", "2"], user_query() .filter(EntityFilter::Not("age".to_owned(), Value::Int(67_i32))) .desc("name"), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::GreaterThan( @@ -1538,6 +1646,7 @@ fn check_find() { Value::Int(43_i32), )), ) + .await .check( vec!["2", "1"], user_query() @@ -1547,12 +1656,14 @@ fn check_find() { )) .asc("name"), ) + .await .check( vec!["2", "3"], user_query() .filter(EntityFilter::LessThan("age".to_owned(), Value::Int(50_i32))) .asc("name"), ) + .await .check( vec!["2", "3"], user_query() @@ -1562,12 +1673,14 @@ fn check_find() { )) .asc("name"), ) + .await .check( vec!["3", "2"], user_query() .filter(EntityFilter::LessThan("age".to_owned(), Value::Int(50_i32))) .desc("name"), ) + .await .check( vec!["2"], user_query() @@ -1576,6 +1689,7 @@ fn check_find() { .first(1) .skip(1), ) + .await .check( vec!["1", "2"], user_query() @@ -1586,6 +1700,7 @@ fn check_find() { .desc("name") .first(5), ) + .await .check( vec!["3"], user_query() @@ -1595,7 +1710,8 @@ fn check_find() { )) .desc("name") .first(5), - ); + ) + .await; // bool attributes let checker = checker @@ -1605,12 +1721,14 @@ fn check_find() { .filter(EntityFilter::Equal("coffee".to_owned(), Value::Bool(true))) .desc("name"), ) + .await .check( vec!["1", "3"], user_query() .filter(EntityFilter::Not("coffee".to_owned(), Value::Bool(true))) .asc("name"), ) + .await .check( vec!["2"], user_query() @@ -1621,6 +1739,7 @@ fn check_find() { .desc("name") .first(5), ) + .await .check( vec!["3", "1"], user_query() @@ -1630,7 +1749,8 @@ fn check_find() { )) .desc("name") .first(5), - ); + ) + .await; // misc tests let checker = checker .check( @@ -1642,6 +1762,7 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["3"], user_query() @@ -1651,12 +1772,14 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["1", "2"], user_query() .filter(EntityFilter::Not("favorite_color".to_owned(), Value::Null)) .desc("name"), ) + .await .check( vec!["1", "2"], user_query() @@ -1666,6 +1789,7 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["1", "2"], user_query() @@ -1675,16 +1799,27 @@ fn check_find() { )) .desc("name"), ) + .await .check(vec!["3", "2", "1"], user_query().asc("weight")) + .await .check(vec!["1", "2", "3"], user_query().desc("weight")) + .await .check(vec!["1", "2", "3"], user_query().unordered()) + .await .check(vec!["1", "2", "3"], user_query().asc("id")) + .await .check(vec!["3", "2", "1"], user_query().desc("id")) + .await .check(vec!["1", "2", "3"], user_query().unordered()) + .await .check(vec!["3", "2", "1"], user_query().asc("age")) + .await .check(vec!["1", "2", "3"], user_query().desc("age")) + .await .check(vec!["2", "1", "3"], user_query().asc("name")) + .await .check(vec!["3", "1", "2"], user_query().desc("name")) + .await .check( vec!["1", "2"], user_query() @@ -1693,7 +1828,8 @@ fn check_find() { EntityFilter::Equal("id".to_owned(), Value::from("2")), ])])) .asc("id"), - ); + ) + .await; // enum attributes let checker = checker @@ -1706,12 +1842,14 @@ fn check_find() { )) .desc("name"), ) + .await .check( vec!["1"], user_query() .filter(EntityFilter::Not("favorite_color".to_owned(), "red".into())) .asc("name"), ) + .await .check( vec!["2"], user_query() @@ -1722,6 +1860,7 @@ fn check_find() { .desc("name") .first(5), ) + .await .check( vec!["1"], user_query() @@ -1731,7 +1870,8 @@ fn check_find() { )) .desc("name") .first(5), - ); + ) + .await; // empty and / or @@ -1745,12 +1885,15 @@ fn check_find() { vec![], user_query().filter(EntityFilter::And(vec![EntityFilter::Or(vec![])])), ) + .await // An empty 'and' is 'true' .check( vec!["1", "2", "3"], user_query().filter(EntityFilter::Or(vec![EntityFilter::And(vec![])])), - ); + ) + .await; }) + .await } // We call our test strings aN so that @@ -1770,22 +1913,26 @@ fn ferrets() -> (String, String, String, String) { } struct FilterChecker<'a> { - conn: &'a mut PgConnection, + conn: &'a mut AsyncPgConnection, layout: &'a Layout, } impl<'a> FilterChecker<'a> { - fn new(conn: &'a mut PgConnection, layout: &'a Layout) -> Self { + async fn new(conn: &'a mut AsyncPgConnection, layout: &'a Layout) -> Self { let (a1, a2, a2b, a3) = ferrets(); - insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0, 0); - insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0, 1); - insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0, 2); - insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0, 3); + insert_pet(conn, layout, &*FERRET_TYPE, "a1", &a1, 0, 0).await; + insert_pet(conn, layout, &*FERRET_TYPE, "a2", &a2, 0, 1).await; + insert_pet(conn, layout, &*FERRET_TYPE, "a2b", &a2b, 0, 2).await; + insert_pet(conn, layout, &*FERRET_TYPE, "a3", &a3, 0, 3).await; Self { conn, layout } } - fn check(&mut self, expected_entity_ids: Vec<&'static str>, filter: EntityFilter) -> &mut Self { + async fn check( + &mut self, + expected_entity_ids: Vec<&'static str>, + filter: EntityFilter, + ) -> &mut Self { let expected_entity_ids: Vec = expected_entity_ids.into_iter().map(str::to_owned).collect(); @@ -1794,6 +1941,7 @@ impl<'a> FilterChecker<'a> { let entities = self .layout .query::(&LOGGER, self.conn, query) + .await .expect("layout.query failed to execute query") .0; @@ -1811,8 +1959,8 @@ impl<'a> FilterChecker<'a> { } } -#[test] -fn check_filters() { +#[graph::test] +async fn check_filters() { let (a1, a2, a2b, a3) = ferrets(); fn filter_eq(name: &str) -> EntityFilter { @@ -1863,60 +2011,96 @@ fn check_filters() { ) } - run_test(move |conn, layout| { - let mut checker = FilterChecker::new(conn, layout); + run_test(async |conn, layout| { + let mut checker = FilterChecker::new(conn, layout).await; checker .check(vec!["a1"], filter_eq(&a1)) + .await .check(vec!["a2"], filter_eq(&a2)) + .await .check(vec!["a2b"], filter_eq(&a2b)) - .check(vec!["a3"], filter_eq(&a3)); + .await + .check(vec!["a3"], filter_eq(&a3)) + .await; checker .check(vec!["a2", "a2b", "a3"], filter_not(&a1)) + .await .check(vec!["a1", "a2b", "a3"], filter_not(&a2)) + .await .check(vec!["a1", "a2", "a3"], filter_not(&a2b)) - .check(vec!["a1", "a2", "a2b"], filter_not(&a3)); + .await + .check(vec!["a1", "a2", "a2b"], filter_not(&a3)) + .await; checker .check(vec![], filter_lt(&a1)) + .await .check(vec!["a1"], filter_lt(&a2)) + .await .check(vec!["a1", "a2", "a3"], filter_lt(&a2b)) - .check(vec!["a1", "a2"], filter_lt(&a3)); + .await + .check(vec!["a1", "a2"], filter_lt(&a3)) + .await; checker .check(vec!["a1"], filter_le(&a1)) + .await .check(vec!["a1", "a2"], filter_le(&a2)) + .await .check(vec!["a1", "a2", "a2b", "a3"], filter_le(&a2b)) - .check(vec!["a1", "a2", "a3"], filter_le(&a3)); + .await + .check(vec!["a1", "a2", "a3"], filter_le(&a3)) + .await; checker .check(vec!["a2", "a2b", "a3"], filter_gt(&a1)) + .await .check(vec!["a2b", "a3"], filter_gt(&a2)) + .await .check(vec![], filter_gt(&a2b)) - .check(vec!["a2b"], filter_gt(&a3)); + .await + .check(vec!["a2b"], filter_gt(&a3)) + .await; checker .check(vec!["a1", "a2", "a2b", "a3"], filter_ge(&a1)) + .await .check(vec!["a2", "a2b", "a3"], filter_ge(&a2)) + .await .check(vec!["a2b"], filter_ge(&a2b)) - .check(vec!["a2b", "a3"], filter_ge(&a3)); + .await + .check(vec!["a2b", "a3"], filter_ge(&a3)) + .await; checker .check(vec!["a1"], filter_in(vec![&a1])) + .await .check(vec!["a2"], filter_in(vec![&a2])) + .await .check(vec!["a2b"], filter_in(vec![&a2b])) + .await .check(vec!["a3"], filter_in(vec![&a3])) + .await .check(vec!["a1", "a2"], filter_in(vec![&a1, &a2])) - .check(vec!["a1", "a3"], filter_in(vec![&a1, &a3])); + .await + .check(vec!["a1", "a3"], filter_in(vec![&a1, &a3])) + .await; checker .check(vec!["a2", "a2b", "a3"], filter_not_in(vec![&a1])) + .await .check(vec!["a1", "a2b", "a3"], filter_not_in(vec![&a2])) + .await .check(vec!["a1", "a2", "a3"], filter_not_in(vec![&a2b])) + .await .check(vec!["a1", "a2", "a2b"], filter_not_in(vec![&a3])) + .await .check(vec!["a2b", "a3"], filter_not_in(vec![&a1, &a2])) - .check(vec!["a2", "a2b"], filter_not_in(vec![&a1, &a3])); + .await + .check(vec!["a2", "a2b"], filter_not_in(vec![&a1, &a3])) + .await; update_entity_at( checker.conn, @@ -1928,11 +2112,16 @@ fn check_filters() { vid: 5i64 }], 1, - ); + ) + .await; checker .check(vec!["a1", "a2", "a2b", "a3"], filter_block_gte(0)) + .await .check(vec!["a1"], filter_block_gte(1)) - .check(vec![], filter_block_gte(BLOCK_NUMBER_MAX)); - }); + .await + .check(vec![], filter_block_gte(BLOCK_NUMBER_MAX)) + .await; + }) + .await; } diff --git a/store/test-store/tests/postgres/relational_bytes.rs b/store/test-store/tests/postgres/relational_bytes.rs index 3f4bd88c8d8..4cc51e763f8 100644 --- a/store/test-store/tests/postgres/relational_bytes.rs +++ b/store/test-store/tests/postgres/relational_bytes.rs @@ -1,12 +1,13 @@ //! Test relational schemas that use `Bytes` to store ids -use diesel::connection::SimpleConnection as _; -use diesel::pg::PgConnection; +use diesel_async::SimpleAsyncConnection; + use graph::components::store::write::RowGroup; use graph::data::store::scalar; use graph::data_source::CausalityRegion; use graph::entity; use graph::prelude::{BlockNumber, EntityModification, EntityQuery, MetricsRegistry, StoreError}; use graph::schema::{EntityKey, EntityType, InputSchema}; +use graph_store_postgres::AsyncPgConnection; use hex_literal::hex; use lazy_static::lazy_static; use std::collections::BTreeSet; @@ -71,9 +72,10 @@ lazy_static! { } /// Removes test data from the database behind the store. -fn remove_test_data(conn: &mut PgConnection) { +async fn remove_test_data(conn: &mut AsyncPgConnection) { let query = format!("drop schema if exists {} cascade", NAMESPACE.as_str()); conn.batch_execute(&query) + .await .expect("Failed to drop test schema"); } @@ -119,17 +121,31 @@ pub fn row_group_delete( group } -fn insert_entity(conn: &mut PgConnection, layout: &Layout, entity_type: &str, entity: Entity) { +async fn insert_entity( + conn: &mut AsyncPgConnection, + layout: &Layout, + entity_type: &str, + entity: Entity, +) { let entity_type = layout.input_schema.entity_type(entity_type).unwrap(); let key = entity_type.key(entity.id()); let entities = vec![(key.clone(), entity)]; let group = row_group_insert(&entity_type, 0, entities); let errmsg = format!("Failed to insert entity {}[{}]", entity_type, key.entity_id); - layout.insert(conn, &group, &MOCK_STOPWATCH).expect(&errmsg); + layout + .insert(conn, &group, &MOCK_STOPWATCH) + .await + .expect(&errmsg); } -fn insert_thing(conn: &mut PgConnection, layout: &Layout, id: &str, name: &str, vid: i64) { +async fn insert_thing( + conn: &mut AsyncPgConnection, + layout: &Layout, + id: &str, + name: &str, + vid: i64, +) { insert_entity( conn, layout, @@ -139,14 +155,15 @@ fn insert_thing(conn: &mut PgConnection, layout: &Layout, id: &str, name: &str, name: name, vid: vid, }, - ); + ) + .await; } -fn create_schema(conn: &mut PgConnection) -> Layout { +async fn create_schema(conn: &mut AsyncPgConnection) -> Layout { let schema = InputSchema::parse_latest(THINGS_GQL, THINGS_SUBGRAPH_ID.clone()).unwrap(); let query = format!("create schema {}", NAMESPACE.as_str()); - conn.batch_execute(&query).unwrap(); + conn.batch_execute(&query).await.unwrap(); let site = make_dummy_site( THINGS_SUBGRAPH_ID.clone(), @@ -154,6 +171,7 @@ fn create_schema(conn: &mut PgConnection) -> Layout { NETWORK_NAME.to_string(), ); Layout::create_relational_schema(conn, Arc::new(site), &schema, BTreeSet::new(), None) + .await .expect("Failed to create relational schema") } @@ -189,38 +207,39 @@ macro_rules! assert_entity_eq { }}; } -fn run_test(test: F) +async fn run_test(test: F) where - F: FnOnce(&mut PgConnection, &Layout), + F: AsyncFnOnce(&mut AsyncPgConnection, &Layout), { - run_test_with_conn(|conn| { + run_test_with_conn(async |conn| { // Reset state before starting - remove_test_data(conn); + remove_test_data(conn).await; // Seed database with test data - let layout = create_schema(conn); + let layout = create_schema(conn).await; // Run test - test(conn, &layout); - }); + test(conn, &layout).await; + }) + .await; } -#[test] -fn bad_id() { - run_test(|conn, layout| { - fn find( - conn: &mut PgConnection, +#[graph::test] +async fn bad_id() { + run_test(async |conn, layout| { + async fn find( + conn: &mut AsyncPgConnection, layout: &Layout, id: &str, ) -> Result, StoreError> { let key = THING_TYPE.parse_key(id)?; - layout.find(conn, &key, BLOCK_NUMBER_MAX) + layout.find(conn, &key, BLOCK_NUMBER_MAX).await } // We test that we get errors for various strings that are not // valid 'Bytes' strings; we use `find` to force the conversion // from String -> Bytes internally - let res = find(conn, layout, "bad"); + let res = find(conn, layout, "bad").await; assert!(res.is_err()); assert_eq!( "store error: can not convert `bad` to Id::Bytes: Odd number of digits", @@ -228,7 +247,7 @@ fn bad_id() { ); // We do not allow the `\x` prefix that Postgres uses - let res = find(conn, layout, "\\xbadd"); + let res = find(conn, layout, "\\xbadd").await; assert!(res.is_err()); assert_eq!( "store error: can not convert `\\xbadd` to Id::Bytes: Invalid character '\\\\' at position 0", @@ -236,53 +255,59 @@ fn bad_id() { ); // Having the '0x' prefix is ok - let res = find(conn, layout, "0xbadd"); + let res = find(conn, layout, "0xbadd").await; assert!(res.is_ok()); // Using non-hex characters is also bad - let res = find(conn, layout, "nope"); + let res = find(conn, layout, "nope").await; assert!(res.is_err()); assert_eq!( "store error: can not convert `nope` to Id::Bytes: Invalid character 'n' at position 0", res.err().unwrap().to_string() ); - }); + }).await; } -#[test] -fn find() { - run_test(|mut conn, layout| { - fn find_entity(conn: &mut PgConnection, layout: &Layout, id: &str) -> Option { +#[graph::test] +async fn find() { + run_test(async |mut conn, layout| { + async fn find_entity( + conn: &mut AsyncPgConnection, + layout: &Layout, + id: &str, + ) -> Option { let key = THING_TYPE.parse_key(id).unwrap(); layout .find(conn, &key, BLOCK_NUMBER_MAX) + .await .expect(&format!("Failed to read Thing[{}]", id)) } const ID: &str = "deadbeef"; const NAME: &str = "Beef"; - insert_thing(&mut conn, layout, ID, NAME, 0); + insert_thing(&mut conn, layout, ID, NAME, 0).await; // Happy path: find existing entity - let entity = find_entity(conn, layout, ID).unwrap(); + let entity = find_entity(conn, layout, ID).await.unwrap(); assert_entity_eq!(BEEF_ENTITY.clone(), entity); assert!(CausalityRegion::from_entity(&entity) == CausalityRegion::ONCHAIN); // Find non-existing entity - let entity = find_entity(conn, layout, "badd"); + let entity = find_entity(conn, layout, "badd").await; assert!(entity.is_none()); - }); + }) + .await; } -#[test] -fn find_many() { - run_test(|mut conn, layout| { +#[graph::test] +async fn find_many() { + run_test(async |mut conn, layout| { const ID: &str = "0xdeadbeef"; const NAME: &str = "Beef"; const ID2: &str = "0xdeadbeef02"; const NAME2: &str = "Moo"; - insert_thing(&mut conn, layout, ID, NAME, 0); - insert_thing(&mut conn, layout, ID2, NAME2, 1); + insert_thing(&mut conn, layout, ID, NAME, 0).await; + insert_thing(&mut conn, layout, ID2, NAME2, 1).await; let mut id_map = BTreeMap::default(); let ids = IdList::try_from_iter( @@ -296,6 +321,7 @@ fn find_many() { let entities = layout .find_many(conn, &id_map, BLOCK_NUMBER_MAX) + .await .expect("Failed to read many things"); assert_eq!(2, entities.len()); @@ -303,13 +329,14 @@ fn find_many() { let id2_key = THING_TYPE.parse_key(ID2).unwrap(); assert!(entities.contains_key(&id_key), "Missing ID"); assert!(entities.contains_key(&id2_key), "Missing ID2"); - }); + }) + .await; } -#[test] -fn update() { - run_test(|mut conn, layout| { - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()); +#[graph::test] +async fn update() { + run_test(async |mut conn, layout| { + insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; // Update the entity let mut entity = BEEF_ENTITY.clone(); @@ -323,27 +350,30 @@ fn update() { let group = row_group_update(&entity_type, 1, entities); layout .update(conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to update"); let actual = layout .find(conn, &THING_TYPE.key(entity_id), BLOCK_NUMBER_MAX) + .await .expect("Failed to read Thing[deadbeef]") .unwrap(); assert_entity_eq!(entity, actual); - }); + }) + .await; } -#[test] -fn delete() { - run_test(|mut conn, layout| { +#[graph::test] +async fn delete() { + run_test(async |mut conn, layout| { const TWO_ID: &str = "deadbeef02"; - insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()); + insert_entity(&mut conn, layout, "Thing", BEEF_ENTITY.clone()).await; let mut two = BEEF_ENTITY.clone(); two.set("id", TWO_ID).unwrap(); two.set("vid", 1i64).unwrap(); - insert_entity(&mut conn, layout, "Thing", two); + insert_entity(&mut conn, layout, "Thing", two).await; // Delete where nothing is getting deleted let key = THING_TYPE.parse_key("ffff").unwrap(); @@ -352,6 +382,7 @@ fn delete() { let group = row_group_delete(&entity_type, 1, entity_keys.clone()); let count = layout .delete(&mut conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to delete"); assert_eq!(0, count); @@ -363,9 +394,11 @@ fn delete() { let group = row_group_delete(&entity_type, 1, entity_keys); let count = layout .delete(&mut conn, &group, &MOCK_STOPWATCH) + .await .expect("Failed to delete"); assert_eq!(1, count); - }); + }) + .await; } // @@ -386,7 +419,10 @@ const GRANDCHILD2: &str = "0xfafa02"; /// +- child2 /// +- grandchild2 /// -fn make_thing_tree(conn: &mut PgConnection, layout: &Layout) -> (Entity, Entity, Entity) { +async fn make_thing_tree( + conn: &mut AsyncPgConnection, + layout: &Layout, +) -> (Entity, Entity, Entity) { let root = entity! { layout.input_schema => id: ROOT, name: "root", @@ -420,21 +456,26 @@ fn make_thing_tree(conn: &mut PgConnection, layout: &Layout) -> (Entity, Entity, vid: 4i64, }; - insert_entity(conn, layout, "Thing", root.clone()); - insert_entity(conn, layout, "Thing", child1.clone()); - insert_entity(conn, layout, "Thing", child2.clone()); - insert_entity(conn, layout, "Thing", grand_child1); - insert_entity(conn, layout, "Thing", grand_child2); + insert_entity(conn, layout, "Thing", root.clone()).await; + insert_entity(conn, layout, "Thing", child1.clone()).await; + insert_entity(conn, layout, "Thing", child2.clone()).await; + insert_entity(conn, layout, "Thing", grand_child1).await; + insert_entity(conn, layout, "Thing", grand_child2).await; (root, child1, child2) } -#[test] -fn query() { - fn fetch(conn: &mut PgConnection, layout: &Layout, coll: EntityCollection) -> Vec { +#[graph::test] +async fn query() { + async fn fetch( + conn: &mut AsyncPgConnection, + layout: &Layout, + coll: EntityCollection, + ) -> Vec { let id = DeploymentHash::new("QmXW3qvxV7zXnwRntpj7yoK8HZVtaraZ67uMqaLRvXdxha").unwrap(); let query = EntityQuery::new(id, BLOCK_NUMBER_MAX, coll).first(10); layout .query::(&LOGGER, conn, query) + .await .map(|(entities, _)| entities) .expect("the query succeeds") .into_iter() @@ -442,21 +483,21 @@ fn query() { .collect::>() } - run_test(|mut conn, layout| { + run_test(async |mut conn, layout| { // This test exercises the different types of queries we generate; // the type of query is based on knowledge of what the test data // looks like, not on just an inference from the GraphQL model. // Especially the multiplicity for type A and B queries is determined // by knowing whether there are one or many entities per parent // in the test data - make_thing_tree(&mut conn, layout); + make_thing_tree(&mut conn, layout).await; // See https://graphprotocol.github.io/rfcs/engineering-plans/0001-graphql-query-prefetching.html#handling-parentchild-relationships // for a discussion of the various types of relationships and queries // EntityCollection::All let coll = EntityCollection::All(vec![(THING_TYPE.clone(), AttributeNames::All)]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2, ROOT, GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type A, many @@ -470,7 +511,7 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![ROOT], things); // EntityCollection::Window, type A, single @@ -486,7 +527,7 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, many @@ -500,7 +541,7 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type B, single @@ -514,7 +555,7 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![GRANDCHILD1, GRANDCHILD2], things); // EntityCollection::Window, type C @@ -529,7 +570,7 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![CHILD1, CHILD2], things); // EntityCollection::Window, type D @@ -544,7 +585,8 @@ fn query() { ), column_names: AttributeNames::All, }]); - let things = fetch(&mut conn, layout, coll); + let things = fetch(&mut conn, layout, coll).await; assert_eq!(vec![ROOT, ROOT], things); - }); + }) + .await; } diff --git a/store/test-store/tests/postgres/store.rs b/store/test-store/tests/postgres/store.rs index 28fd05da18f..60fb746fbe8 100644 --- a/store/test-store/tests/postgres/store.rs +++ b/store/test-store/tests/postgres/store.rs @@ -133,7 +133,7 @@ where run_test_sequentially(|store| async move { let subgraph_store = store.subgraph_store(); // Reset state before starting - remove_test_data(subgraph_store.clone()); + remove_subgraphs().await; // Seed database with test data let deployment = insert_test_data(subgraph_store.clone()).await; @@ -182,6 +182,7 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator NETWORK_NAME.to_string(), SubgraphVersionSwitchingMode::Instant, ) + .await .unwrap(); let test_entity_1 = create_test_entity( @@ -290,16 +291,10 @@ fn create_test_entity( } } -/// Removes test data from the database behind the store. -fn remove_test_data(store: Arc) { - store - .delete_all_entities_for_test_use_only() - .expect("deleting test entities succeeds"); -} - -fn get_entity_count(store: Arc, subgraph_id: &DeploymentHash) -> u64 { +async fn get_entity_count(store: Arc, subgraph_id: &DeploymentHash) -> u64 { let info = store .status(status::Filter::Deployments(vec![subgraph_id.to_string()])) + .await .unwrap(); let info = info.first().unwrap(); info.entity_count @@ -311,9 +306,9 @@ fn delete_entity() { let entity_key = USER_TYPE.parse_key("3").unwrap(); // Check that there is an entity to remove. - writable.get(&entity_key).unwrap().unwrap(); + writable.get(&entity_key).await.unwrap().unwrap(); - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; transact_and_wait( &store.subgraph_store(), &deployment, @@ -324,10 +319,13 @@ fn delete_entity() { ) .await .unwrap(); - assert_eq!(count, get_entity_count(store.clone(), &deployment.hash) + 1); + assert_eq!( + count, + get_entity_count(store.clone(), &deployment.hash).await + 1 + ); // Check that that the deleted entity id is not present - assert!(writable.get(&entity_key).unwrap().is_none()); + assert!(writable.get(&entity_key).await.unwrap().is_none()); }) } @@ -338,7 +336,7 @@ fn get_entity_1() { let schema = ReadStore::input_schema(&writable); let key = USER_TYPE.parse_key("1").unwrap(); - let result = writable.get(&key).unwrap(); + let result = writable.get(&key).await.unwrap(); let bin_name = Value::Bytes("Johnton".as_bytes().into()); let expected_entity = entity! { schema => @@ -365,7 +363,7 @@ fn get_entity_3() { run_test(|_, writable, _| async move { let schema = ReadStore::input_schema(&writable); let key = USER_TYPE.parse_key("3").unwrap(); - let result = writable.get(&key).unwrap(); + let result = writable.get(&key).await.unwrap(); let expected_entity = entity! { schema => id: "3", @@ -400,7 +398,7 @@ fn insert_entity() { Some("green"), 5, ); - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; transact_and_wait( &store.subgraph_store(), &deployment, @@ -409,10 +407,13 @@ fn insert_entity() { ) .await .unwrap(); - assert_eq!(count + 1, get_entity_count(store.clone(), &deployment.hash)); + assert_eq!( + count + 1, + get_entity_count(store.clone(), &deployment.hash).await + ); // Check that new record is in the store - writable.get(&entity_key).unwrap().unwrap(); + writable.get(&entity_key).await.unwrap().unwrap(); }) } @@ -438,10 +439,10 @@ fn update_existing() { }; // Verify that the entity before updating is different from what we expect afterwards - assert_ne!(writable.get(&entity_key).unwrap().unwrap(), new_data); + assert_ne!(writable.get(&entity_key).await.unwrap().unwrap(), new_data); // Set test entity; as the entity already exists an update should be performed - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; transact_entity_operations( &store.subgraph_store(), &deployment, @@ -450,7 +451,10 @@ fn update_existing() { ) .await .unwrap(); - assert_eq!(count, get_entity_count(store.clone(), &deployment.hash)); + assert_eq!( + count, + get_entity_count(store.clone(), &deployment.hash).await + ); // Verify that the entity in the store has changed to what we have set. let bin_name = match new_data.get("bin_name") { @@ -459,7 +463,7 @@ fn update_existing() { }; new_data.insert("bin_name", Value::Bytes(bin_name)).unwrap(); - assert_eq!(writable.get(&entity_key).unwrap(), Some(new_data)); + assert_eq!(writable.get(&entity_key).await.unwrap(), Some(new_data)); }) } @@ -474,6 +478,7 @@ fn partially_update_existing() { let original_entity = writable .get(&entity_key) + .await .unwrap() .expect("entity not found"); @@ -493,6 +498,7 @@ fn partially_update_existing() { // Obtain the updated entity from the store let updated_entity = writable .get(&entity_key) + .await .unwrap() .expect("entity not found"); @@ -518,7 +524,7 @@ impl QueryChecker { Self { store } } - fn check(self, expected_entity_ids: Vec<&str>, query: EntityQuery) -> Self { + async fn check(self, expected_entity_ids: Vec<&str>, query: EntityQuery) -> Self { let expected_entity_ids: Vec = expected_entity_ids.into_iter().map(str::to_owned).collect(); @@ -526,6 +532,7 @@ impl QueryChecker { .store .subgraph_store() .find(query) + .await .expect("store.find failed to execute query"); let entity_ids: Vec<_> = entities @@ -576,32 +583,38 @@ fn find() { vec!["2"], user_query().filter(EntityFilter::Contains("name".into(), "ind".into())), ) + .await .check( vec!["2"], user_query().filter(EntityFilter::Equal("name".to_owned(), "Cindini".into())), ) + .await .check( vec!["1", "3"], user_query() .filter(EntityFilter::Not("name".to_owned(), "Cindini".into())) .asc("name"), ) + .await .check( vec!["3"], user_query().filter(EntityFilter::GreaterThan("name".to_owned(), "Kundi".into())), ) + .await .check( vec!["2", "1"], user_query() .filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into())) .asc("name"), ) + .await .check( vec!["1", "2"], user_query() .filter(EntityFilter::LessThan("name".to_owned(), "Kundi".into())) .desc("name"), ) + .await .check( vec!["1"], user_query() @@ -610,6 +623,7 @@ fn find() { .first(1) .skip(1), ) + .await .check( vec!["2"], user_query() @@ -619,24 +633,28 @@ fn find() { ])) .desc("name"), ) + .await .check( vec!["2"], user_query() .filter(EntityFilter::EndsWith("name".to_owned(), "ini".into())) .desc("name"), ) + .await .check( vec!["3", "1"], user_query() .filter(EntityFilter::NotEndsWith("name".to_owned(), "ini".into())) .desc("name"), ) + .await .check( vec!["1"], user_query() .filter(EntityFilter::In("name".to_owned(), vec!["Johnton".into()])) .desc("name"), ) + .await .check( vec!["1", "2"], user_query() @@ -645,7 +663,8 @@ fn find() { vec!["Shaqueeena".into()], )) .desc("name"), - ); + ) + .await; // Filter tests with float attributes QueryChecker::new(store.clone()) @@ -656,6 +675,7 @@ fn find() { Value::BigDecimal(184.4.into()), )), ) + .await .check( vec!["3", "2"], user_query() @@ -665,6 +685,7 @@ fn find() { )) .desc("name"), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::GreaterThan( @@ -672,6 +693,7 @@ fn find() { Value::BigDecimal(160.0.into()), )), ) + .await .check( vec!["2", "3"], user_query() @@ -681,6 +703,7 @@ fn find() { )) .asc("name"), ) + .await .check( vec!["3", "2"], user_query() @@ -690,6 +713,7 @@ fn find() { )) .desc("name"), ) + .await .check( vec!["2"], user_query() @@ -701,6 +725,7 @@ fn find() { .first(1) .skip(1), ) + .await .check( vec!["3", "1"], user_query() @@ -714,6 +739,7 @@ fn find() { .desc("name") .first(5), ) + .await .check( vec!["2"], user_query() @@ -726,7 +752,8 @@ fn find() { )) .desc("name") .first(5), - ); + ) + .await; // Filter tests with int attributes QueryChecker::new(store.clone()) .check( @@ -735,12 +762,14 @@ fn find() { .filter(EntityFilter::Equal("age".to_owned(), Value::Int(67_i32))) .desc("name"), ) + .await .check( vec!["3", "2"], user_query() .filter(EntityFilter::Not("age".to_owned(), Value::Int(67_i32))) .desc("name"), ) + .await .check( vec!["1"], user_query().filter(EntityFilter::GreaterThan( @@ -748,6 +777,7 @@ fn find() { Value::Int(43_i32), )), ) + .await .check( vec!["2", "1"], user_query() @@ -757,12 +787,14 @@ fn find() { )) .asc("name"), ) + .await .check( vec!["2", "3"], user_query() .filter(EntityFilter::LessThan("age".to_owned(), Value::Int(50_i32))) .asc("name"), ) + .await .check( vec!["2", "3"], user_query() @@ -772,12 +804,14 @@ fn find() { )) .asc("name"), ) + .await .check( vec!["3", "2"], user_query() .filter(EntityFilter::LessThan("age".to_owned(), Value::Int(50_i32))) .desc("name"), ) + .await .check( vec!["2"], user_query() @@ -786,6 +820,7 @@ fn find() { .first(1) .skip(1), ) + .await .check( vec!["1", "2"], user_query() @@ -796,6 +831,7 @@ fn find() { .desc("name") .first(5), ) + .await .check( vec!["3"], user_query() @@ -805,7 +841,8 @@ fn find() { )) .desc("name") .first(5), - ); + ) + .await; // Filter tests with bool attributes QueryChecker::new(store.clone()) .check( @@ -814,12 +851,14 @@ fn find() { .filter(EntityFilter::Equal("coffee".to_owned(), Value::Bool(true))) .desc("name"), ) + .await .check( vec!["1", "3"], user_query() .filter(EntityFilter::Not("coffee".to_owned(), Value::Bool(true))) .asc("name"), ) + .await .check( vec!["2"], user_query() @@ -830,6 +869,7 @@ fn find() { .desc("name") .first(5), ) + .await .check( vec!["3", "1"], user_query() @@ -839,7 +879,8 @@ fn find() { )) .desc("name") .first(5), - ); + ) + .await; // Misc filter tests QueryChecker::new(store) .check( @@ -851,6 +892,7 @@ fn find() { )) .desc("name"), ) + .await .check( vec!["3", "1"], user_query() @@ -860,6 +902,7 @@ fn find() { )) .desc("name"), ) + .await .check( vec!["3", "1"], user_query() @@ -869,12 +912,14 @@ fn find() { )) .desc("name"), ) + .await .check( vec!["2"], user_query() .filter(EntityFilter::Not("favorite_color".to_owned(), Value::Null)) .desc("name"), ) + .await .check( vec!["2"], user_query() @@ -884,14 +929,23 @@ fn find() { )) .desc("name"), ) + .await .check(vec!["3", "2", "1"], user_query().asc("weight")) + .await .check(vec!["1", "2", "3"], user_query().desc("weight")) + .await .check(vec!["1", "2", "3"], user_query().asc("id")) + .await .check(vec!["3", "2", "1"], user_query().desc("id")) + .await .check(vec!["3", "2", "1"], user_query().asc("age")) + .await .check(vec!["1", "2", "3"], user_query().desc("age")) + .await .check(vec!["2", "1", "3"], user_query().asc("name")) + .await .check(vec!["3", "1", "2"], user_query().desc("name")) + .await .check( vec!["1", "2"], user_query() @@ -900,7 +954,8 @@ fn find() { EntityFilter::Equal("id".to_owned(), Value::from("2")), ])])) .asc("id"), - ); + ) + .await; }); } @@ -921,6 +976,7 @@ async fn check_basic_revert(store: Arc, deployment: &DeploymentLoca let returned_entities = store .subgraph_store() .find(this_query.clone()) + .await .expect("store.find operation failed"); // There should be 1 user returned in results @@ -939,9 +995,12 @@ async fn check_basic_revert(store: Arc, deployment: &DeploymentLoca #[test] fn revert_block_basic_user() { run_test(|store, _, deployment| async move { - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; check_basic_revert(store.clone(), &deployment).await; - assert_eq!(count, get_entity_count(store.clone(), &deployment.hash)); + assert_eq!( + count, + get_entity_count(store.clone(), &deployment.hash).await + ); }) } @@ -969,14 +1028,18 @@ fn revert_block_with_delete() { .unwrap(); // Revert deletion - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; revert_block(&store, &deployment, &TEST_BLOCK_2_PTR).await; - assert_eq!(count + 1, get_entity_count(store.clone(), &deployment.hash)); + assert_eq!( + count + 1, + get_entity_count(store.clone(), &deployment.hash).await + ); // Query after revert let returned_entities = store .subgraph_store() .find(this_query.clone()) + .await .expect("store.find operation failed"); // There should be 1 entity returned in results @@ -999,7 +1062,11 @@ fn revert_block_with_partial_update() { let partial_entity = entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null, vid: 5i64 }; - let original_entity = writable.get(&entity_key).unwrap().expect("missing entity"); + let original_entity = writable + .get(&entity_key) + .await + .unwrap() + .expect("missing entity"); // Set test entity; as the entity already exists an update should be performed transact_entity_operations( @@ -1015,12 +1082,19 @@ fn revert_block_with_partial_update() { .unwrap(); // Perform revert operation, reversing the partial update - let count = get_entity_count(store.clone(), &deployment.hash); + let count = get_entity_count(store.clone(), &deployment.hash).await; revert_block(&store, &deployment, &TEST_BLOCK_2_PTR).await; - assert_eq!(count, get_entity_count(store.clone(), &deployment.hash)); + assert_eq!( + count, + get_entity_count(store.clone(), &deployment.hash).await + ); // Obtain the reverted entity from the store - let reverted_entity = writable.get(&entity_key).unwrap().expect("missing entity"); + let reverted_entity = writable + .get(&entity_key) + .await + .unwrap() + .expect("missing entity"); // Verify that the entity has been returned to its original state assert_eq!(reverted_entity, original_entity); @@ -1089,7 +1163,11 @@ fn revert_block_with_dynamic_data_source_operations() { entity! { schema => id: "1", name: "Johnny Boy", email: Value::Null, vid: 5i64 }; // Get the original user for comparisons - let original_user = writable.get(&user_key).unwrap().expect("missing entity"); + let original_user = writable + .get(&user_key) + .await + .unwrap() + .expect("missing entity"); // Create operations to add a dynamic data source let mut data_source = mock_data_source(); @@ -1115,7 +1193,11 @@ fn revert_block_with_dynamic_data_source_operations() { // Verify that the user is no longer the original assert_ne!( - writable.get(&user_key).unwrap().expect("missing entity"), + writable + .get(&user_key) + .await + .unwrap() + .expect("missing entity"), original_user ); @@ -1135,7 +1217,11 @@ fn revert_block_with_dynamic_data_source_operations() { // Verify that the user is the original again assert_eq!( - writable.get(&user_key).unwrap().expect("missing entity"), + writable + .get(&user_key) + .await + .unwrap() + .expect("missing entity"), original_user ); @@ -1154,6 +1240,7 @@ fn subgraph_schema_types_have_subgraph_id_directive() { let schema = store .subgraph_store() .api_schema(&deployment.hash, &Default::default()) + .await .expect("test subgraph should have a schema"); for typedef in schema .definitions() @@ -1260,6 +1347,7 @@ fn handle_large_string_with_index() { let ids: Vec<_> = store .subgraph_store() .find(query) + .await .expect("Could not find entity") .iter() .map(|e| e.id()) @@ -1278,6 +1366,7 @@ fn handle_large_string_with_index() { let ids: Vec<_> = store .subgraph_store() .find(query) + .await .expect("Could not find entity") .iter() .map(|e| e.id()) @@ -1365,6 +1454,7 @@ fn handle_large_bytea_with_index() { let ids: Vec<_> = store .subgraph_store() .find(query) + .await .expect("Could not find entity") .iter() .map(|e| e.id()) @@ -1383,6 +1473,7 @@ fn handle_large_bytea_with_index() { let ids: Vec<_> = store .subgraph_store() .find(query) + .await .expect("Could not find entity") .iter() .map(|e| e.id()) @@ -1480,12 +1571,13 @@ impl WindowQuery { WindowQuery(query, self.1).default_window() } - fn expect(&self, mut expected_ids: Vec<&str>, qid: &str) { + async fn expect(&self, mut expected_ids: Vec<&str>, qid: &str) { let query = self.0.clone(); let store = &self.1; let unordered = matches!(query.order, EntityOrder::Unordered); let mut entity_ids = store .find(query) + .await .expect("store.find failed to execute query") .into_iter() .map(|entity| match entity.get("id") { @@ -1554,35 +1646,41 @@ fn window() { // Get the first 2 entries in each 'color group' WindowQuery::new(&store) .first(2) - .expect(vec!["10", "11", "4", "5", "2", "7", "9"], "q1"); + .expect(vec!["10", "11", "4", "5", "2", "7", "9"], "q1") + .await; WindowQuery::new(&store) .first(1) - .expect(vec!["10", "4", "2", "9"], "q2"); + .expect(vec!["10", "4", "2", "9"], "q2") + .await; WindowQuery::new(&store) .first(1) .skip(1) - .expect(vec!["11", "5", "7"], "q3"); + .expect(vec!["11", "5", "7"], "q3") + .await; WindowQuery::new(&store) .first(1) .skip(1) .desc("id") - .expect(vec!["10", "5", "7"], "q4"); + .expect(vec!["10", "5", "7"], "q4") + .await; WindowQuery::new(&store) .first(1) .skip(1) .desc("favorite_color") - .expect(vec!["10", "5", "7"], "q5"); + .expect(vec!["10", "5", "7"], "q5") + .await; WindowQuery::new(&store) .first(1) .skip(1) .desc("favorite_color") .above(25) - .expect(vec!["4", "2"], "q6"); + .expect(vec!["4", "2"], "q6") + .await; // Check queries for interfaces WindowQuery::new(&store) @@ -1591,14 +1689,16 @@ fn window() { .desc("favorite_color") .above(12) .against_color_and_age() - .expect(vec!["10", "5", "8"], "q7"); + .expect(vec!["10", "5", "8"], "q7") + .await; WindowQuery::new(&store) .first(1) .asc("age") .above(12) .against_color_and_age() - .expect(vec!["11", "5", "p2", "9"], "q8"); + .expect(vec!["11", "5", "p2", "9"], "q8") + .await; WindowQuery::new(&store) .unordered() @@ -1607,7 +1707,8 @@ fn window() { .expect( vec!["10", "11", "2", "4", "5", "6", "7", "8", "9", "p2"], "q9", - ); + ) + .await; }); } @@ -1623,6 +1724,7 @@ fn find_at_block() { let entities = store .subgraph_store() .find(query) + .await .expect("store.find failed to execute query"); assert_eq!(1, entities.len()); @@ -1658,9 +1760,11 @@ fn cleanup_cached_blocks() { let chain_store = store .block_store() .chain_store(NETWORK_NAME) + .await .expect("fake chain store"); let cleaned = chain_store .cleanup_cached_blocks(10) + .await .expect("cleanup succeeds"); assert_eq!(Some((2, 1)), cleaned); }) @@ -1690,6 +1794,7 @@ fn parse_timestamp() { let chain_store = store .block_store() .chain_store(NETWORK_NAME) + .await .expect("fake chain store"); let (_network, number, timestamp, _) = chain_store @@ -1724,6 +1829,7 @@ fn parse_timestamp_firehose() { let chain_store = store .block_store() .chain_store(NETWORK_NAME) + .await .expect("fake chain store"); let (_network, number, timestamp, _) = chain_store @@ -1758,6 +1864,7 @@ fn parse_null_timestamp() { let chain_store = store .block_store() .chain_store(NETWORK_NAME) + .await .expect("fake chain store"); let (_network, number, timestamp, _) = chain_store diff --git a/store/test-store/tests/postgres/subgraph.rs b/store/test-store/tests/postgres/subgraph.rs index c66d34e27c7..5cd31c93e44 100644 --- a/store/test-store/tests/postgres/subgraph.rs +++ b/store/test-store/tests/postgres/subgraph.rs @@ -1,24 +1,20 @@ -use graph::futures03; use graph::{ components::{ server::index_node::VersionInfo, store::{DeploymentId, DeploymentLocator, StatusStore}, }, - data::query::QueryTarget, - data::subgraph::{schema::SubgraphHealth, SubgraphFeature}, - data::subgraph::{ - schema::{DeploymentCreate, SubgraphError}, - DeploymentFeatures, + data::{ + query::QueryTarget, + subgraph::{ + schema::{DeploymentCreate, SubgraphError, SubgraphHealth}, + DeploymentFeatures, SubgraphFeature, + }, + }, + prelude::{ + AssignmentChange, BlockPtr, CheapClone, DeploymentHash, NodeId, QueryStoreManager, + StoreError, StoreEvent, SubgraphManifest, SubgraphName, SubgraphStore as _, + SubgraphVersionSwitchingMode, UnfailOutcome, }, - prelude::AssignmentChange, - prelude::BlockPtr, - prelude::QueryStoreManager, - prelude::StoreEvent, - prelude::SubgraphManifest, - prelude::SubgraphName, - prelude::SubgraphVersionSwitchingMode, - prelude::UnfailOutcome, - prelude::{CheapClone, DeploymentHash, NodeId, SubgraphStore as _}, schema::InputSchema, semver::Version, }; @@ -66,16 +62,16 @@ fn unassigned(deployment: &DeploymentLocator) -> AssignmentChange { AssignmentChange::removed(deployment.clone()) } -fn get_version_info(store: &Store, subgraph_name: &str) -> VersionInfo { - let mut primary = primary_connection(); - let (current, _) = primary.versions_for_subgraph(subgraph_name).unwrap(); +async fn get_version_info(store: &Store, subgraph_name: &str) -> VersionInfo { + let mut primary = primary_connection().await; + let (current, _) = primary.versions_for_subgraph(subgraph_name).await.unwrap(); let current = current.unwrap(); - store.version_info(¤t).unwrap() + store.version_info(¤t).await.unwrap() } -fn get_subgraph_features(id: String) -> Option { - let mut primary = primary_connection(); - primary.get_subgraph_features(id).unwrap() +async fn get_subgraph_features(id: String) -> Option { + let mut primary = primary_connection().await; + primary.get_subgraph_features(id).await.unwrap() } async fn latest_block(store: &Store, deployment_id: DeploymentId) -> BlockPtr { @@ -92,13 +88,17 @@ async fn latest_block(store: &Store, deployment_id: DeploymentId) -> BlockPtr { fn reassign_subgraph() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new("reassignSubgraph").unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } - fn find_assignment(store: &SubgraphStore, deployment: &DeploymentLocator) -> Option { + async fn find_assignment( + store: &SubgraphStore, + deployment: &DeploymentLocator, + ) -> Option { store .assigned_node(deployment) + .await .unwrap() .map(|node| node.to_string()) } @@ -108,7 +108,7 @@ fn reassign_subgraph() { let store = store.subgraph_store(); // Check our setup - let node = find_assignment(store.as_ref(), &id); + let node = find_assignment(store.as_ref(), &id).await; let placement = place("test").expect("the test config places deployments"); if let Some((_, nodes)) = placement { // If the test config does not have deployment rules, we can't check @@ -125,8 +125,9 @@ fn reassign_subgraph() { let node = NodeId::new("left").unwrap(); let expected = vec![StoreEvent::new(vec![assigned(&id)])]; - let (_, events) = tap_store_events(|| store.reassign_subgraph(&id, &node).unwrap()); - let node = find_assignment(store.as_ref(), &id); + let (_, events) = + tap_store_events(async || store.reassign_subgraph(&id, &node).await.unwrap()).await; + let node = find_assignment(store.as_ref(), &id).await; assert_eq!(Some("left"), node.as_deref()); assert_eq!(expected, events); } @@ -138,21 +139,31 @@ fn create_subgraph() { const SUBGRAPH_NAME: &str = "create/subgraph"; // Return the versions (not deployments) for a subgraph - fn subgraph_versions(primary: &mut Primary) -> (Option, Option) { - primary.versions_for_subgraph(SUBGRAPH_NAME).unwrap() + async fn subgraph_versions(primary: &mut Primary) -> (Option, Option) { + primary.versions_for_subgraph(SUBGRAPH_NAME).await.unwrap() + } + + async fn deployment_for_version( + primary: &mut Primary, + name: Option, + ) -> Result, StoreError> { + match name { + None => Ok(None), + Some(name) => primary.deployment_for_version(&name).await, + } } /// Return the deployment for the current and the pending version of the /// subgraph with the given `entity_id` - fn subgraph_deployments(primary: &mut Primary) -> (Option, Option) { - let (current, pending) = subgraph_versions(primary); + async fn subgraph_deployments(primary: &mut Primary) -> (Option, Option) { + let (current, pending) = subgraph_versions(primary).await; ( - current.and_then(|v| primary.deployment_for_version(&v).unwrap()), - pending.and_then(|v| primary.deployment_for_version(&v).unwrap()), + deployment_for_version(primary, current).await.unwrap(), + deployment_for_version(primary, pending).await.unwrap(), ) } - fn deploy( + async fn deploy( store: &SubgraphStore, id: &str, mode: SubgraphVersionSwitchingMode, @@ -177,7 +188,7 @@ fn create_subgraph() { let deployment = DeploymentCreate::new(String::new(), &manifest, None); let node_id = NodeId::new("left").unwrap(); - let (deployment, events) = tap_store_events(|| { + let (deployment, events) = tap_store_events(async || { store .create_subgraph_deployment( name, @@ -187,8 +198,10 @@ fn create_subgraph() { NETWORK_NAME.to_string(), mode, ) + .await .unwrap() - }); + }) + .await; let events = events .into_iter() .flat_map(|event| event.changes.into_iter()) @@ -202,24 +215,24 @@ fn create_subgraph() { changes } - fn deployment_synced( + async fn deployment_synced( store: &Arc, deployment: &DeploymentLocator, block_ptr: BlockPtr, ) { - futures03::executor::block_on(store.cheap_clone().writable( - LOGGER.clone(), - deployment.id, - Arc::new(Vec::new()), - )) - .expect("can get writable") - .deployment_synced(block_ptr) - .unwrap(); + store + .cheap_clone() + .writable(LOGGER.clone(), deployment.id, Arc::new(Vec::new())) + .await + .expect("can get writable") + .deployment_synced(block_ptr) + .await + .unwrap(); } // Test VersionSwitchingMode::Instant run_test_sequentially(|store| async move { - remove_subgraphs(); + remove_subgraphs().await; let store = store.subgraph_store(); const MODE: SubgraphVersionSwitchingMode = SubgraphVersionSwitchingMode::Instant; @@ -227,51 +240,52 @@ fn create_subgraph() { const ID2: &str = "instant2"; const ID3: &str = "instant3"; - let mut primary = primary_connection(); + let mut primary = primary_connection().await; let name = SubgraphName::new(SUBGRAPH_NAME.to_string()).unwrap(); - let (_, events) = tap_store_events(|| store.create_subgraph(name.clone()).unwrap()); - let (current, pending) = subgraph_deployments(&mut primary); + let (_, events) = + tap_store_events(async || store.create_subgraph(name.clone()).await.unwrap()).await; + let (current, pending) = subgraph_deployments(&mut primary).await; assert!(events.is_empty()); assert!(current.is_none()); assert!(pending.is_none()); // Deploy - let (deployment1, events) = deploy(store.as_ref(), ID1, MODE); + let (deployment1, events) = deploy(store.as_ref(), ID1, MODE).await; let expected = deploy_event(&deployment1); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID1), current.as_deref()); assert!(pending.is_none()); // Deploying again overwrites current - let (deployment2, events) = deploy(store.as_ref(), ID2, MODE); + let (deployment2, events) = deploy(store.as_ref(), ID2, MODE).await; let mut expected = deploy_event(&deployment2); expected.insert(unassigned(&deployment1)); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID2), current.as_deref()); assert!(pending.is_none()); // Sync deployment - deployment_synced(&store, &deployment2, GENESIS_PTR.clone()); + deployment_synced(&store, &deployment2, GENESIS_PTR.clone()).await; // Deploying again still overwrites current - let (deployment3, events) = deploy(store.as_ref(), ID3, MODE); + let (deployment3, events) = deploy(store.as_ref(), ID3, MODE).await; let mut expected = deploy_event(&deployment3); expected.insert(unassigned(&deployment2)); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID3), current.as_deref()); assert!(pending.is_none()); }); // Test VersionSwitchingMode::Synced run_test_sequentially(|store| async move { - remove_subgraphs(); + remove_subgraphs().await; let store = store.subgraph_store(); const MODE: SubgraphVersionSwitchingMode = SubgraphVersionSwitchingMode::Synced; @@ -279,60 +293,61 @@ fn create_subgraph() { const ID2: &str = "synced2"; const ID3: &str = "synced3"; - let mut primary = primary_connection(); + let mut primary = primary_connection().await; let name = SubgraphName::new(SUBGRAPH_NAME.to_string()).unwrap(); - let (_, events) = tap_store_events(|| store.create_subgraph(name.clone()).unwrap()); - let (current, pending) = subgraph_deployments(&mut primary); + let (_, events) = + tap_store_events(async || store.create_subgraph(name.clone()).await.unwrap()).await; + let (current, pending) = subgraph_deployments(&mut primary).await; assert!(events.is_empty()); assert!(current.is_none()); assert!(pending.is_none()); // Deploy - let (deployment1, events) = deploy(store.as_ref(), ID1, MODE); + let (deployment1, events) = deploy(store.as_ref(), ID1, MODE).await; let expected = deploy_event(&deployment1); assert_eq!(expected, events); - let versions = subgraph_versions(&mut primary); - let (current, pending) = subgraph_deployments(&mut primary); + let versions = subgraph_versions(&mut primary).await; + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID1), current.as_deref()); assert!(pending.is_none()); // Deploying the same thing again does nothing - let (deployment1_again, events) = deploy(store.as_ref(), ID1, MODE); + let (deployment1_again, events) = deploy(store.as_ref(), ID1, MODE).await; assert!(events.is_empty()); assert_eq!(&deployment1, &deployment1_again); - let versions2 = subgraph_versions(&mut primary); + let versions2 = subgraph_versions(&mut primary).await; assert_eq!(versions, versions2); // Deploy again, current is not synced, so it gets replaced - let (deployment2, events) = deploy(store.as_ref(), ID2, MODE); + let (deployment2, events) = deploy(store.as_ref(), ID2, MODE).await; let mut expected = deploy_event(&deployment2); expected.insert(unassigned(&deployment1)); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID2), current.as_deref()); assert!(pending.is_none()); // Deploy when current is synced leaves current alone and adds pending - deployment_synced(&store, &deployment2, GENESIS_PTR.clone()); - let (deployment3, events) = deploy(store.as_ref(), ID3, MODE); + deployment_synced(&store, &deployment2, GENESIS_PTR.clone()).await; + let (deployment3, events) = deploy(store.as_ref(), ID3, MODE).await; let expected = deploy_event(&deployment3); assert_eq!(expected, events); - let versions = subgraph_versions(&mut primary); - let (current, pending) = subgraph_deployments(&mut primary); + let versions = subgraph_versions(&mut primary).await; + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID2), current.as_deref()); assert_eq!(Some(ID3), pending.as_deref()); // Deploying that same thing again changes nothing - let (deployment3_again, events) = deploy(store.as_ref(), ID3, MODE); + let (deployment3_again, events) = deploy(store.as_ref(), ID3, MODE).await; assert!(events.is_empty()); assert_eq!(&deployment3, &deployment3_again); - let versions2 = subgraph_versions(&mut primary); + let versions2 = subgraph_versions(&mut primary).await; assert_eq!(versions, versions2); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID2), current.as_deref()); assert_eq!(Some(ID3), pending.as_deref()); @@ -342,22 +357,22 @@ fn create_subgraph() { let mut expected = HashSet::new(); expected.insert(unassigned(&deployment3)); - let (deployment2_again, events) = deploy(store.as_ref(), ID2, MODE); + let (deployment2_again, events) = deploy(store.as_ref(), ID2, MODE).await; assert_eq!(&deployment2, &deployment2_again); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID2), current.as_deref()); assert_eq!(None, pending.as_deref()); // Mark `ID3` as synced and deploy that again - deployment_synced(&store, &deployment3, GENESIS_PTR.clone()); + deployment_synced(&store, &deployment3, GENESIS_PTR.clone()).await; let expected = HashSet::from([unassigned(&deployment2), assigned(&deployment3)]); - let (deployment3_again, events) = deploy(store.as_ref(), ID3, MODE); + let (deployment3_again, events) = deploy(store.as_ref(), ID3, MODE).await; assert_eq!(&deployment3, &deployment3_again); assert_eq!(expected, events); - let (current, pending) = subgraph_deployments(&mut primary); + let (current, pending) = subgraph_deployments(&mut primary).await; assert_eq!(Some(ID3), current.as_deref()); assert_eq!(None, pending.as_deref()); }) @@ -370,7 +385,7 @@ fn status() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; let deployment = create_test_subgraph(&id, SUBGRAPH_GQL).await; create_test_subgraph(&DeploymentHash::new(OTHER).unwrap(), SUBGRAPH_GQL).await; deployment @@ -386,13 +401,17 @@ fn status() { "notASubgraph".to_string(), "not-even-a-valid-id".to_string(), ])) + .await .unwrap(); assert_eq!(1, infos.len()); let info = infos.first().unwrap(); assert_eq!(NAME, info.subgraph); assert!(!info.synced); - let infos = store.status(status::Filter::Deployments(vec![])).unwrap(); + let infos = store + .status(status::Filter::Deployments(vec![])) + .await + .unwrap(); assert_eq!(2, infos.len()); let info = infos .into_iter() @@ -403,6 +422,7 @@ fn status() { let infos = store .status(status::Filter::SubgraphName(NAME.to_string())) + .await .unwrap(); assert_eq!(1, infos.len()); let info = infos.first().unwrap(); @@ -411,6 +431,7 @@ fn status() { let infos = store .status(status::Filter::SubgraphVersion(NAME.to_string(), true)) + .await .unwrap(); assert_eq!(1, infos.len()); let info = infos.first().unwrap(); @@ -419,16 +440,19 @@ fn status() { let infos = store .status(status::Filter::SubgraphVersion(NAME.to_string(), false)) + .await .unwrap(); assert!(infos.is_empty()); let infos = store .status(status::Filter::SubgraphName("invalid name".to_string())) + .await .unwrap(); assert_eq!(0, infos.len()); let infos = store .status(status::Filter::SubgraphName("notASubgraph".to_string())) + .await .unwrap(); assert_eq!(0, infos.len()); @@ -437,6 +461,7 @@ fn status() { "notASubgraph".to_string(), true, )) + .await .unwrap(); assert_eq!(0, infos.len()); @@ -461,6 +486,7 @@ fn status() { .status(status::Filter::Deployments(vec![deployment .hash .to_string()])) + .await .unwrap(); assert_eq!(1, infos.len()); let info = infos.first().unwrap(); @@ -479,7 +505,7 @@ fn version_info() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; block_store::set_chain(vec![], NETWORK_NAME).await; create_test_subgraph(&id, SUBGRAPH_GQL).await } @@ -495,7 +521,7 @@ fn version_info() { .await .unwrap(); - let vi = get_version_info(&store, NAME); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.synced); assert_eq!(false, vi.failed); @@ -521,7 +547,7 @@ fn subgraph_features() { const NAME: &str = "subgraph_features"; let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; block_store::set_chain(vec![], NETWORK_NAME).await; create_test_subgraph_with_features(&id, SUBGRAPH_FEATURES_GQL).await; @@ -537,7 +563,7 @@ fn subgraph_features() { has_bytes_as_ids, immutable_entities, has_aggregations, - } = get_subgraph_features(id.to_string()).unwrap(); + } = get_subgraph_features(id.to_string()).await.unwrap(); assert_eq!(NAME, subgraph_id.as_str()); assert_eq!("1.3.0", spec_version); @@ -562,8 +588,8 @@ fn subgraph_features() { vec!["User2".to_string(), "Data".to_string()] ); - test_store::remove_subgraph(&id); - let features = get_subgraph_features(id.to_string()); + test_store::remove_subgraph(&id).await; + let features = get_subgraph_features(id.to_string()).await; // Subgraph was removed, so we expect the entry to be removed from `subgraph_features` table assert!(features.is_none()); }) @@ -576,9 +602,9 @@ fn subgraph_error() { let deployment = test_store::create_test_subgraph(&subgraph_id, "type Foo @entity { id: ID! }").await; - let count = || -> usize { + let count = async || -> usize { let store = store.subgraph_store(); - store.error_count(&subgraph_id).unwrap() + store.error_count(&subgraph_id).await.unwrap() }; let error = SubgraphError { @@ -589,12 +615,12 @@ fn subgraph_error() { deterministic: false, }; - assert!(count() == 0); + assert!(count().await == 0); transact_errors(&store, &deployment, BLOCKS[1].clone(), vec![error], false) .await .unwrap(); - assert!(count() == 1); + assert!(count().await == 1); let error = SubgraphError { subgraph_id: subgraph_id.clone(), @@ -608,7 +634,7 @@ fn subgraph_error() { transact_errors(&store, &deployment, BLOCKS[2].clone(), vec![error], false) .await .unwrap(); - assert!(count() == 1); + assert!(count().await == 1); let error2 = SubgraphError { subgraph_id: subgraph_id.clone(), @@ -621,9 +647,9 @@ fn subgraph_error() { transact_errors(&store, &deployment, BLOCKS[3].clone(), vec![error2], false) .await .unwrap(); - assert!(count() == 2); + assert!(count().await == 2); - test_store::remove_subgraph(&subgraph_id); + test_store::remove_subgraph(&subgraph_id).await; }) } @@ -635,9 +661,9 @@ fn subgraph_non_fatal_error() { let deployment = test_store::create_test_subgraph(&subgraph_id, "type Foo @entity { id: ID! }").await; - let count = || -> usize { + let count = async || -> usize { let store = store.subgraph_store(); - let count = store.error_count(&subgraph_id).unwrap(); + let count = store.error_count(&subgraph_id).await.unwrap(); println!("count: {}", count); count }; @@ -650,14 +676,14 @@ fn subgraph_non_fatal_error() { deterministic: true, }; - assert!(count() == 0); + assert!(count().await == 0); transact_errors(&store, &deployment, BLOCKS[1].clone(), vec![error], true) .await .unwrap(); - assert!(count() == 1); + assert!(count().await == 1); - let info = subgraph_store.status_for_id(deployment.id); + let info = subgraph_store.status_for_id(deployment.id).await; assert!(info.non_fatal_errors.len() == 1); assert!(info.health == SubgraphHealth::Unhealthy); @@ -674,14 +700,14 @@ fn subgraph_non_fatal_error() { transact_errors(&store, &deployment, BLOCKS[2].clone(), vec![error2], false) .await .unwrap(); - assert!(count() == 2); + assert!(count().await == 2); - let info = subgraph_store.status_for_id(deployment.id); + let info = subgraph_store.status_for_id(deployment.id).await; assert!(info.non_fatal_errors.len() == 1); assert!(info.health == SubgraphHealth::Unhealthy); - test_store::remove_subgraph(&subgraph_id); + test_store::remove_subgraph(&subgraph_id).await; }) } @@ -689,7 +715,7 @@ fn subgraph_non_fatal_error() { fn fatal_vs_non_fatal() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new("failUnfail").unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } @@ -749,7 +775,7 @@ fn fail_unfail_deterministic_error() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } @@ -777,7 +803,7 @@ fn fail_unfail_deterministic_error() { // We don't have any errors and the subgraph is healthy. let state = query_store.deployment_state().await.unwrap(); assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); - let vi = get_version_info(&store, NAME); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -795,7 +821,7 @@ fn fail_unfail_deterministic_error() { // Still no fatal errors. let state = query_store.deployment_state().await.unwrap(); assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); - let vi = get_version_info(&store, NAME); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -820,7 +846,7 @@ fn fail_unfail_deterministic_error() { // Now we have a fatal error because the subgraph failed. let state = query_store.deployment_state().await.unwrap(); assert!(state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); - let vi = get_version_info(&store, NAME); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -835,12 +861,12 @@ fn fail_unfail_deterministic_error() { assert_eq!(outcome, UnfailOutcome::Unfailed); let state = query_store.deployment_state().await.unwrap(); assert!(!state.has_deterministic_errors(&latest_block(&store, deployment.id).await)); - let vi = get_version_info(&store, NAME); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); - test_store::remove_subgraphs(); + test_store::remove_subgraphs().await; }) } @@ -850,16 +876,16 @@ fn fail_unfail_deterministic_error_noop() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } run_test_sequentially(|store| async move { let deployment = setup().await; - let count = || -> usize { + let count = async || -> usize { let store = store.subgraph_store(); - store.error_count(&deployment.hash).unwrap() + store.error_count(&deployment.hash).await.unwrap() }; // Process the first block. @@ -873,8 +899,8 @@ fn fail_unfail_deterministic_error_noop() { .unwrap(); // We don't have any errors and the subgraph is healthy. - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -890,8 +916,8 @@ fn fail_unfail_deterministic_error_noop() { .unwrap(); // Still no fatal errors. - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -910,8 +936,8 @@ fn fail_unfail_deterministic_error_noop() { // Nothing to unfail, state continues the same. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -928,8 +954,8 @@ fn fail_unfail_deterministic_error_noop() { writable.fail_subgraph(error).await.unwrap(); // Now we have a fatal error because the subgraph failed. - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -943,8 +969,8 @@ fn fail_unfail_deterministic_error_noop() { // State continues the same, nothing happened. // Neither the block got reverted or error deleted. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -970,13 +996,13 @@ fn fail_unfail_deterministic_error_noop() { // State continues the same. // Neither the block got reverted or error deleted. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 2); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 2); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); - test_store::remove_subgraphs(); + test_store::remove_subgraphs().await; }) } @@ -986,16 +1012,16 @@ fn fail_unfail_non_deterministic_error() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } run_test_sequentially(|store| async move { let deployment = setup().await; - let count = || -> usize { + let count = async || -> usize { let store = store.subgraph_store(); - store.error_count(&deployment.hash).unwrap() + store.error_count(&deployment.hash).await.unwrap() }; // Process the first block. @@ -1009,8 +1035,8 @@ fn fail_unfail_non_deterministic_error() { .unwrap(); // We don't have any errors. - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -1033,8 +1059,8 @@ fn fail_unfail_non_deterministic_error() { writable.fail_subgraph(error).await.unwrap(); // Now we have a fatal error because the subgraph failed. - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -1050,24 +1076,27 @@ fn fail_unfail_non_deterministic_error() { .unwrap(); // Subgraph failed but it's deployment head pointer advanced. - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Unfail the subgraph and delete the fatal error. - let outcome = writable.unfail_non_deterministic_error(&BLOCKS[1]).unwrap(); + let outcome = writable + .unfail_non_deterministic_error(&BLOCKS[1]) + .await + .unwrap(); // We don't have fatal errors anymore and the subgraph is healthy. assert_eq!(outcome, UnfailOutcome::Unfailed); - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); - test_store::remove_subgraphs(); + test_store::remove_subgraphs().await; }) } @@ -1077,16 +1106,16 @@ fn fail_unfail_non_deterministic_error_noop() { async fn setup() -> DeploymentLocator { let id = DeploymentHash::new(NAME).unwrap(); - remove_subgraphs(); + remove_subgraphs().await; create_test_subgraph(&id, SUBGRAPH_GQL).await } run_test_sequentially(|store| async move { let deployment = setup().await; - let count = || -> usize { + let count = async || -> usize { let store = store.subgraph_store(); - store.error_count(&deployment.hash).unwrap() + store.error_count(&deployment.hash).await.unwrap() }; // Process the first block. @@ -1100,8 +1129,8 @@ fn fail_unfail_non_deterministic_error_noop() { .unwrap(); // We don't have any errors and the subgraph is healthy. - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(0), vi.latest_ethereum_block_number); @@ -1117,8 +1146,8 @@ fn fail_unfail_non_deterministic_error_noop() { .unwrap(); // Still no errors. - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1130,12 +1159,15 @@ fn fail_unfail_non_deterministic_error_noop() { .expect("can get writable"); // Running unfail without any errors will do nothing. - let outcome = writable.unfail_non_deterministic_error(&BLOCKS[1]).unwrap(); + let outcome = writable + .unfail_non_deterministic_error(&BLOCKS[1]) + .await + .unwrap(); // State continues the same, nothing happened. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 0); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 0); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(false, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1152,19 +1184,22 @@ fn fail_unfail_non_deterministic_error_noop() { writable.fail_subgraph(error).await.unwrap(); // We now have a fatal error because the subgraph failed. - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); // Running unfail_non_deterministic_error will be NOOP, the error is deterministic. - let outcome = writable.unfail_non_deterministic_error(&BLOCKS[1]).unwrap(); + let outcome = writable + .unfail_non_deterministic_error(&BLOCKS[1]) + .await + .unwrap(); // Nothing happeened, state continues the same. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 1); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 1); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); @@ -1181,16 +1216,19 @@ fn fail_unfail_non_deterministic_error_noop() { writable.fail_subgraph(error).await.unwrap(); // Since the block range of the block won't match the deployment head, this will be NOOP. - let outcome = writable.unfail_non_deterministic_error(&BLOCKS[1]).unwrap(); + let outcome = writable + .unfail_non_deterministic_error(&BLOCKS[1]) + .await + .unwrap(); // State continues the same besides a new error added to the database. assert_eq!(outcome, UnfailOutcome::Noop); - assert_eq!(count(), 2); - let vi = get_version_info(&store, NAME); + assert_eq!(count().await, 2); + let vi = get_version_info(&store, NAME).await; assert_eq!(NAME, vi.deployment_id.as_str()); assert_eq!(true, vi.failed); assert_eq!(Some(1), vi.latest_ethereum_block_number); - test_store::remove_subgraphs(); + test_store::remove_subgraphs().await; }) } diff --git a/store/test-store/tests/postgres/writable.rs b/store/test-store/tests/postgres/writable.rs index d83ec8cbf48..da828e8784f 100644 --- a/store/test-store/tests/postgres/writable.rs +++ b/store/test-store/tests/postgres/writable.rs @@ -5,6 +5,7 @@ use graph::data_source::CausalityRegion; use graph::schema::{EntityKey, EntityType, InputSchema}; use lazy_static::lazy_static; use std::collections::{BTreeMap, BTreeSet}; +use std::future::Future; use std::marker::PhantomData; use std::ops::Range; use test_store::*; @@ -102,16 +103,10 @@ async fn insert_test_data(store: Arc) -> DeploymentLocator NETWORK_NAME.to_string(), SubgraphVersionSwitchingMode::Instant, ) + .await .unwrap() } -/// Removes test data from the database behind the store. -fn remove_test_data(store: Arc) { - store - .delete_all_entities_for_test_use_only() - .expect("deleting test entities succeeds"); -} - /// Test harness for running database integration tests. fn run_test(test: F) where @@ -123,12 +118,12 @@ where ) -> R + Send + 'static, - R: std::future::Future + Send + 'static, + R: Future + Send + 'static, { run_test_sequentially(|store| async move { let subgraph_store = store.subgraph_store(); // Reset state before starting - remove_test_data(subgraph_store.clone()); + remove_subgraphs().await; // Seed database with test data let deployment = insert_test_data(subgraph_store.clone()).await; @@ -214,17 +209,18 @@ async fn pause_writer(deployment: &DeploymentLocator) { /// /// `read_count` lets us look up entities in different ways to exercise /// different methods in `WritableStore` -fn get_with_pending(batch: bool, read_count: F) +fn get_with_pending(batch: bool, read_count: F) where - F: Send + Fn(&dyn WritableStore) -> i32 + Sync + 'static, + F: Send + Fn(Arc) -> R + Sync + 'static, + R: Future + Send + 'static, { run_test(move |store, writable, _, deployment| async move { let subgraph_store = store.subgraph_store(); - let read_count = || read_count(writable.as_ref()); + let read_count = || read_count(writable.cheap_clone()); if !batch { - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); } for count in 1..4 { @@ -236,10 +232,10 @@ where for count in 4..7 { insert_count(&subgraph_store, &deployment, count, count, false).await; } - assert_eq!(6, read_count()); + assert_eq!(6, read_count().await); writable.flush().await.unwrap(); - assert_eq!(6, read_count()); + assert_eq!(6, read_count().await); // Test reading back with pending writes and a pending revert for count in 7..10 { @@ -250,28 +246,34 @@ where .await .unwrap(); - assert_eq!(2, read_count()); + assert_eq!(2, read_count().await); writable.flush().await.unwrap(); - assert_eq!(2, read_count()); + assert_eq!(2, read_count().await); }) } /// Get the count using `WritableStore::get_many` -fn count_get_many(writable: &dyn WritableStore) -> i32 { +async fn count_get_many(writable: Arc) -> i32 { let key = count_key("1"); let keys = BTreeSet::from_iter(vec![key.clone()]); - let counter = writable.get_many(keys).unwrap().get(&key).unwrap().clone(); + let counter = writable + .get_many(keys) + .await + .unwrap() + .get(&key) + .unwrap() + .clone(); counter.get("count").unwrap().as_int().unwrap() } /// Get the count using `WritableStore::get` -fn count_get(writable: &dyn WritableStore) -> i32 { - let counter = writable.get(&count_key("1")).unwrap().unwrap(); +async fn count_get(writable: Arc) -> i32 { + let counter = writable.get(&count_key("1")).await.unwrap().unwrap(); counter.get("count").unwrap().as_int().unwrap() } -fn count_get_derived(writable: &dyn WritableStore) -> i32 { +async fn count_get_derived(writable: Arc) -> i32 { let key = count_key("1"); let query = DerivedEntityQuery { entity_type: key.entity_type.clone(), @@ -279,7 +281,7 @@ fn count_get_derived(writable: &dyn WritableStore) -> i32 { value: key.entity_id.clone(), causality_region: CausalityRegion::ONCHAIN, }; - let map = writable.get_derived(&query).unwrap(); + let map = writable.get_derived(&query).await.unwrap(); let counter = map.get(&key).unwrap(); counter.get("count").unwrap().as_int().unwrap() } @@ -318,7 +320,7 @@ fn get_derived_nobatch() { fn restart() { run_test(|store, writable, _, deployment| async move { let subgraph_store = store.subgraph_store(); - let schema = subgraph_store.input_schema(&deployment.hash).unwrap(); + let schema = subgraph_store.input_schema(&deployment.hash).await.unwrap(); // Cause an error by leaving out the non-nullable `count` attribute let entity_ops = vec![EntityOperation::Set { @@ -378,18 +380,19 @@ fn read_range_test() { r#"(7, [EntitySourceOperation { entity_op: Delete, entity_type: EntityType(Counter), entity: Entity { count: Int(12), id: String("1"), vid: Int8(6) }, vid: 6 }])"#, ]; let subgraph_store = store.subgraph_store(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); for count in 1..=5 { insert_count(&subgraph_store, &deployment, count, 2 * count, false).await; } writable.flush().await.unwrap(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); let br: Range = 0..18; let entity_types = vec![COUNTER_TYPE.clone(), COUNTER2_TYPE.clone()]; let e: BTreeMap> = sourceable .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .await .unwrap(); assert_eq!(e.len(), 5); for en in &e { @@ -401,9 +404,10 @@ fn read_range_test() { insert_count(&subgraph_store, &deployment, count, 2 * count, false).await; } writable.flush().await.unwrap(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); let e: BTreeMap> = sourceable .get_range(entity_types, CausalityRegion::ONCHAIN, br) + .await .unwrap(); assert_eq!(e.len(), 7); for en in &e { @@ -418,17 +422,18 @@ fn read_range_test() { fn read_immutable_only_range_test() { run_test(|store, writable, sourceable, deployment| async move { let subgraph_store = store.subgraph_store(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); for count in 1..=4 { insert_count(&subgraph_store, &deployment, count, 2 * count, true).await; } writable.flush().await.unwrap(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); let br: Range = 0..18; let entity_types = vec![COUNTER2_TYPE.clone()]; let e: BTreeMap> = sourceable .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .await .unwrap(); assert_eq!(e.len(), 4); }) @@ -444,7 +449,7 @@ fn read_range_pool_created_test() { // Rest of the test remains the same let subgraph_store = store.subgraph_store(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); let pool_created_type = TEST_SUBGRAPH_SCHEMA.entity_type("PoolCreated").unwrap(); let entity_types = vec![pool_created_type.clone()]; @@ -490,11 +495,12 @@ fn read_range_pool_created_test() { .unwrap(); } writable.flush().await.unwrap(); - writable.deployment_synced(block_pointer(0)).unwrap(); + writable.deployment_synced(block_pointer(0)).await.unwrap(); let br: Range = 0..18; let e: BTreeMap> = sourceable .get_range(entity_types.clone(), CausalityRegion::ONCHAIN, br.clone()) + .await .unwrap(); assert_eq!(e.len(), 2); for en in &e { diff --git a/tests/Cargo.toml b/tests/Cargo.toml index 3d6a3771a93..1f937d4a1e3 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true anyhow = "1.0" assert-json-diff = "2.0.2" async-stream = "0.3.6" +async-trait = { workspace = true } graph = { path = "../graph" } graph-chain-ethereum = { path = "../chain/ethereum" } graph-chain-substreams= {path = "../chain/substreams"} diff --git a/tests/src/fixture/mod.rs b/tests/src/fixture/mod.rs index 362cef37f44..3554624d477 100644 --- a/tests/src/fixture/mod.rs +++ b/tests/src/fixture/mod.rs @@ -9,6 +9,7 @@ use std::time::{Duration, Instant}; use anyhow::Error; use async_stream::stream; +use async_trait::async_trait; use graph::blockchain::block_stream::{ BlockRefetcher, BlockStream, BlockStreamBuilder, BlockStreamError, BlockStreamEvent, BlockWithTriggers, FirehoseCursor, @@ -40,10 +41,10 @@ use graph::ipfs::{IpfsClient, IpfsMetrics}; use graph::prelude::ethabi::ethereum_types::H256; use graph::prelude::serde_json::{self, json}; use graph::prelude::{ - async_trait, lazy_static, q, r, ApiVersion, BigInt, BlockNumber, DeploymentHash, - GraphQlRunner as _, IpfsResolver, LinkResolver, LoggerFactory, NodeId, QueryError, - SubgraphAssignmentProvider, SubgraphCountMetric, SubgraphName, SubgraphRegistrar, - SubgraphStore as _, SubgraphVersionSwitchingMode, TriggerProcessor, + lazy_static, q, r, ApiVersion, BigInt, BlockNumber, DeploymentHash, GraphQlRunner as _, + IpfsResolver, LinkResolver, LoggerFactory, NodeId, QueryError, SubgraphAssignmentProvider, + SubgraphCountMetric, SubgraphName, SubgraphRegistrar, SubgraphStore as _, + SubgraphVersionSwitchingMode, TriggerProcessor, }; use graph::schema::InputSchema; use graph_chain_ethereum::chain::RuntimeAdapterBuilder; @@ -357,18 +358,28 @@ impl TestContext { query_res.indexing_status_for_current_version } - pub fn rewind(&self, block_ptr_to: BlockPtr) { + pub async fn rewind(&self, block_ptr_to: BlockPtr) { self.store .rewind(self.deployment.hash.clone(), block_ptr_to) + .await .unwrap() } } impl Drop for TestContext { fn drop(&mut self) { - if let Err(e) = cleanup(&self.store, &self.subgraph_name, &self.deployment.hash) { - crit!(self.logger, "error cleaning up test subgraph"; "error" => e.to_string()); - } + // Using drop to clean up the subgraph after the test is too clever + // by half and should really be done with an explicit method, + // something like `TestContext::cleanup(self)`. + let store = self.store.cheap_clone(); + let subgraph_name = self.subgraph_name.clone(); + let deployment_hash = self.deployment.hash.clone(); + let logger = self.logger.cheap_clone(); + graph::spawn(async move { + if let Err(e) = cleanup(&store, &subgraph_name, &deployment_hash).await { + crit!(logger, "error cleaning up test subgraph"; "error" => e.to_string()); + } + }); } } @@ -423,7 +434,7 @@ pub async fn stores(test_name: &str, store_config_path: &str) -> Stores { .into(); let chain_head_listener = store_builder.chain_head_update_listener(); let network_identifiers: Vec = vec![network_name.clone()].into_iter().collect(); - let network_store = store_builder.network_store(network_identifiers); + let network_store = store_builder.network_store(network_identifiers).await; let ident = ChainIdentifier { net_version: "".into(), genesis_block_hash: test_ptr(0).hash, @@ -431,6 +442,7 @@ pub async fn stores(test_name: &str, store_config_path: &str) -> Stores { let chain_store = network_store .block_store() .create_chain_store(&network_name, ident) + .await .unwrap_or_else(|_| panic!("No chain store for {}", &network_name)); Stores { @@ -499,7 +511,9 @@ pub async fn setup_inner( // Make sure we're starting from a clean state. let subgraph_store = stores.network_store.subgraph_store(); - cleanup(&subgraph_store, &test_info.subgraph_name, &test_info.hash).unwrap(); + cleanup(&subgraph_store, &test_info.subgraph_name, &test_info.hash) + .await + .unwrap(); let mut blockchain_map = BlockchainMap::new(); blockchain_map.insert(stores.network_name.clone(), chain.chain()); @@ -631,15 +645,15 @@ pub async fn setup_inner( } } -pub fn cleanup( +pub async fn cleanup( subgraph_store: &SubgraphStore, name: &SubgraphName, hash: &DeploymentHash, ) -> Result<(), Error> { - let locators = subgraph_store.locators(hash)?; - subgraph_store.remove_subgraph(name.clone())?; + let locators = subgraph_store.locators(hash).await?; + subgraph_store.remove_subgraph(name.clone()).await?; for locator in locators { - subgraph_store.remove_deployment(locator.id.into())?; + subgraph_store.remove_deployment(locator.id.into()).await?; } Ok(()) } @@ -692,7 +706,7 @@ pub async fn wait_for_sync( } }; info!(logger, "TEST: sync status: {:?}", block_ptr); - let status = store.status_for_id(deployment.id); + let status = store.status_for_id(deployment.id).await; if let Some(fatal_error) = status.fatal_error { return Err(fatal_error); diff --git a/tests/tests/file_link_resolver.rs b/tests/tests/file_link_resolver.rs index 1b12aef64c4..0213c209457 100644 --- a/tests/tests/file_link_resolver.rs +++ b/tests/tests/file_link_resolver.rs @@ -8,7 +8,7 @@ use graph_tests::{ recipe::RunnerTestRecipe, }; -#[tokio::test] +#[graph::test] async fn file_link_resolver() -> anyhow::Result<()> { std::env::set_var("GRAPH_NODE_DISABLE_DEPLOYMENT_HASH_VALIDATION", "true"); let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new_with_file_link_resolver( diff --git a/tests/tests/gnd_tests.rs b/tests/tests/gnd_tests.rs index aa823a7324d..3e3a2e0f448 100644 --- a/tests/tests/gnd_tests.rs +++ b/tests/tests/gnd_tests.rs @@ -13,7 +13,7 @@ use integration_tests::{ }; /// The main test entrypoint. -#[tokio::test] +#[graph::test] async fn gnd_tests() -> anyhow::Result<()> { set_dev_mode(true); diff --git a/tests/tests/integration_tests.rs b/tests/tests/integration_tests.rs index 3bfbe95ff8f..db459972bc3 100644 --- a/tests/tests/integration_tests.rs +++ b/tests/tests/integration_tests.rs @@ -1307,7 +1307,7 @@ async fn wait_for_blockchain_block(block_number: i32) -> bool { } /// The main test entrypoint. -#[tokio::test] +#[graph::test] async fn integration_tests() -> anyhow::Result<()> { let test_name_to_run = std::env::var("TEST_CASE").ok(); diff --git a/tests/tests/runner_tests.rs b/tests/tests/runner_tests.rs index cd2c059e2dc..83d9625ae8a 100644 --- a/tests/tests/runner_tests.rs +++ b/tests/tests/runner_tests.rs @@ -55,7 +55,7 @@ fn assert_eq_ignore_backtrace(err: &SubgraphError, expected: &SubgraphError) { } } -#[tokio::test] +#[graph::test] async fn data_source_revert() -> anyhow::Result<()> { *TEST_WITH_NO_REORG.lock().unwrap() = true; @@ -174,7 +174,7 @@ async fn data_source_long_revert() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[graph::test] async fn typename() -> anyhow::Result<()> { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("typename", "typename").await; @@ -202,7 +202,7 @@ async fn typename() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[graph::test] async fn api_version_0_0_7() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new_with_custom_cmd( "api_version_0_0_7", @@ -244,7 +244,7 @@ async fn api_version_0_0_7() { ); } -#[tokio::test] +#[graph::test] async fn api_version_0_0_8() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new_with_custom_cmd( "api_version_0_0_8", @@ -278,7 +278,7 @@ async fn api_version_0_0_8() { assert_eq_ignore_backtrace(&err, &expected_err); } -#[tokio::test] +#[graph::test] async fn derived_loaders() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("derived_loaders", "derived-loaders").await; @@ -438,7 +438,7 @@ async fn derived_loaders() { // changed the way TriggerFilters were built // A bug was introduced in the PR which resulted in filters for substreams not being included // This test tests that the TriggerFilter is built correctly for substreams -#[tokio::test] +#[graph::test] async fn substreams_trigger_filter_construction() -> anyhow::Result<()> { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("substreams", "substreams").await; @@ -465,7 +465,7 @@ async fn substreams_trigger_filter_construction() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[graph::test] async fn end_block() -> anyhow::Result<()> { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("end_block", "end-block").await; @@ -542,7 +542,7 @@ async fn end_block() -> anyhow::Result<()> { // Simulate a chain reorg and ensure the filter rebuilds accurately post-reorg. { - ctx.rewind(test_ptr(6)); + ctx.rewind(test_ptr(6)).await; let mut blocks = blocks[0..8].to_vec().clone(); @@ -589,7 +589,7 @@ async fn end_block() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[graph::test] async fn file_data_sources() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("file-data-sourcess", "file-data-sources").await; @@ -727,7 +727,7 @@ async fn file_data_sources() { // Should not allow creating conflicting entity. ie: Entity created in offchain handler cannot be created in onchain handler { - ctx.rewind(test_ptr(4)); + ctx.rewind(test_ptr(4)).await; let mut blocks = blocks.clone(); blocks.retain(|block| block.block.number() <= 4); @@ -752,7 +752,7 @@ async fn file_data_sources() { // Should not allow accessing entities created in offchain handlers in onchain handlers { - ctx.rewind(test_ptr(4)); + ctx.rewind(test_ptr(4)).await; let mut blocks = blocks.clone(); blocks.retain(|block| block.block.number() <= 4); @@ -785,7 +785,7 @@ async fn file_data_sources() { // Prevent access to entities created by offchain handlers when using derived loaders in onchain handlers. { - ctx.rewind(test_ptr(4)); + ctx.rewind(test_ptr(4)).await; let mut blocks = blocks.clone(); blocks.retain(|block| block.block.number() <= 4); @@ -828,7 +828,7 @@ async fn file_data_sources() { // Should not allow creating entity that is not declared in the manifest for the offchain datasource { - ctx.rewind(test_ptr(4)); + ctx.rewind(test_ptr(4)).await; let mut blocks = blocks.clone(); blocks.retain(|block| block.block.number() <= 4); @@ -847,7 +847,7 @@ async fn file_data_sources() { } } -#[tokio::test] +#[graph::test] async fn block_handlers() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("block_handlers", "block-handlers").await; @@ -971,7 +971,7 @@ async fn block_handlers() { ); } -#[tokio::test] +#[graph::test] async fn template_static_filters_false_positives() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new( "template_static_filters_false_positives", @@ -1011,7 +1011,7 @@ async fn template_static_filters_false_positives() { ); } -#[tokio::test] +#[graph::test] async fn parse_data_source_context() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("parse_data_source_context", "data-sources").await; @@ -1039,7 +1039,7 @@ async fn parse_data_source_context() { ); } -#[tokio::test] +#[graph::test] async fn retry_create_ds() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("retry_create_ds", "data-source-revert2").await; @@ -1096,7 +1096,7 @@ async fn retry_create_ds() { assert_eq!(runner.context().hosts_len(), 2); } -#[tokio::test] +#[graph::test] async fn fatal_error() -> anyhow::Result<()> { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("fatal_error", "fatal-error").await; @@ -1153,7 +1153,7 @@ async fn fatal_error() -> anyhow::Result<()> { assert!(poi2 != poi100); // Test that rewind unfails the subgraph. - ctx.rewind(test_ptr(1)); + ctx.rewind(test_ptr(1)).await; let status = ctx.indexing_status().await; assert!(status.health == SubgraphHealth::Healthy); assert!(status.fatal_error.is_none()); @@ -1161,7 +1161,7 @@ async fn fatal_error() -> anyhow::Result<()> { Ok(()) } -#[tokio::test] +#[graph::test] async fn arweave_file_data_sources() { let RunnerTestRecipe { stores, test_info } = RunnerTestRecipe::new("arweave_file_data_sources", "arweave-file-data-sources").await;