From 9f5da8a2543a59488dd164aeed0efb8fe250d1bc Mon Sep 17 00:00:00 2001 From: Daniel Harvey Date: Fri, 20 Dec 2024 17:08:46 +0000 Subject: [PATCH] Remove unused constructors and packages, add cargo machete job to CI --- .github/workflows/cargo-machete.yaml | 28 +++ architecture.md | 173 ------------------ crates/cli/Cargo.toml | 1 - crates/configuration/Cargo.toml | 7 - crates/connectors/ndc-bigquery/Cargo.toml | 5 - .../ndc-bigquery/src/error/convert.rs | 9 - .../ndc-bigquery/src/error/record.rs | 8 +- crates/connectors/ndc-bigquery/src/state.rs | 2 - crates/query-engine/execution/Cargo.toml | 5 - crates/query-engine/execution/src/error.rs | 31 ---- crates/query-engine/execution/src/metrics.rs | 112 ++---------- crates/query-engine/metadata/Cargo.toml | 2 - crates/query-engine/sql/Cargo.toml | 2 - crates/query-engine/translation/Cargo.toml | 3 +- crates/tests/tests-common/Cargo.toml | 11 -- 15 files changed, 41 insertions(+), 358 deletions(-) create mode 100644 .github/workflows/cargo-machete.yaml delete mode 100644 architecture.md diff --git a/.github/workflows/cargo-machete.yaml b/.github/workflows/cargo-machete.yaml new file mode 100644 index 00000000..c05dfa5e --- /dev/null +++ b/.github/workflows/cargo-machete.yaml @@ -0,0 +1,28 @@ +name: find unused dependencies + +on: + push: + +jobs: + cargo-machete: + name: find unused dependencies with cargo machete + runs-on: ubuntu-latest + env: + CARGO_NET_GIT_FETCH_WITH_CLI: "true" + RUSTFLAGS: "-D warnings" # fail on warnings + steps: + - uses: actions/checkout@v4 + + - name: install tools + run: | + rustup show + cargo install cargo-machete + + - uses: Swatinem/rust-cache@v2 + with: + shared-key: "build" # share the cache across jobs + save-if: false + + - name: find unused dependencies + run: | + cargo machete --with-metadata diff --git a/architecture.md b/architecture.md deleted file mode 100644 index 2aa9d5e9..00000000 --- a/architecture.md +++ /dev/null @@ -1,173 +0,0 @@ -# General architecture - -## Query Engine - -The query engine's job is to take a `QueryRequest`, which contains information about the query a user would like to run, -translate it to PostgreSQL SQL, execute it against the database, and return the results as a `QueryResponse`. - -One place in particular that uses the Query Engine is the `/query` endpoint (defined in the `ndc-hub` repository). - -`/query` endpoints receives a `QueryRequest`, and calls the `translate` function from the Query Engine -with it and with the information about the tables tracked in the metadata to receive and `ExecutionPlan`. -It then calls the `execute` function from the Query Engine with the same `ExecutionPlan` -(which then runs it against postgres) and gets back a `QueryResponse` which it can then return to the caller. - -API: - -```rs -pub fn translate( - metadata: &metadata::Metadata, - query_request: models::QueryRequest, -) -> Result -``` - -```rs -pub async fn execute( - pool: sqlx::PgPool, - plan: translation::ExecutionPlan, -) -> Result -``` - -### Translation - -The translation step is essentially side-effect free - we use information from the request, as well as the information -about the metadata to translate the query request into steps to run against the database. - -This process is currently found in the [translation/query/](/crates/query-engine/translation/src/translation/query/) directory -and are split to several modules roughly mimicing the query parts as they are specified in the spec. The API -is the following function in the [mod.rs](/crates/query-engine/translation/src/translation/query/mod.rs) file: - -```rs -pub fn translate( - metadata: &metadata::Metadata, - query_request: models::QueryRequest, -) -> Result -``` - -The `translate` function returns a `ExecutionPlan`. - -```rs -pub struct ExecutionPlan { - pub root_field: String, - /// Run before the query. Should be a sql::ast in the future. - pub pre: Vec, - /// The query. - pub query: sql::ast::Select, - /// Run after the query. Should be a sql::ast in the future. - pub post: Vec, -} -``` - -Right now we don't expect `pre` and `post` to be populated, but it could be used for things like Stored Procedures. - -### SQL AST - -We maintain a SQL AST represented as Rust data types in [sql/ast.rs](/crates/query-engine/sql/src/sql/ast.rs). -We implement our own representation because we want more control over this core component of our application, -and we want to implement exactly what we want and not more or less. Other external libraries such as `sqlparser` -do not have the same goals as us, and we'll have to make compromises that will affect our codebase's complexity -and development velocity. - -We have a few guidelines for the SQL AST: - -#### The SQL AST should mimic PostgreSQL directly - -The SQL AST should look like a **subset** of PostgreSQL SQL, and not contain any high-level abstractions, or try to abstract -multiple SQL ASTs. We should implement exactly what we need, and be precise about it. - -Should we need a higher-level of abstraction, and additional IR should be constructed that will sit before the SQL AST. - -#### Implement what you need and not more - -The SQL AST should contain structures we actively use, and not contain structures we don't use. - -One such example is window functions - we don't need to include them in the AST currently because we don't have features -that use them from GraphQL. - -#### Use sql::helpers to build the AST - -Sometimes we'd like a shorthand to build specific repeating patterns, -such as `SELECT coalesce(json_agg(row_to_json()), '[]') AS FROM as `. -The [sql/helpers.rs](/crates/query-engine/sql/src/sql/helpers.rs) module can come in handy to help -codify certain SQL AST generation patterns. If you end up meeting a repeating long pattern that is used in multiple places, -it might be a good candidate to codify it as a `helpers` function. - -### SQL string - -The SQL string is a stringify representation of the SQL AST. It can be found in [sql/string.rs](/crates/query-engine/sql/src/sql/string.rs). - -We separate the SQL to AST and string representation so we can write transformations and optimizations on the SQL AST. - -The SQL string representation should be generated from the SQL AST by pretty printing the result. -The result of converting ([sql/convert.rs](/crates/query-engine/sql/src/sql/convert.rs)) a sql ast to string should produce -a query string that can be run against postgres as a parameterized query, as well as the parameters that are supplied by the user. - -Please use the API provided by the `SQL` type. It provides functions for constructing SQL strings in an easy way, such as appending syntax (like keywords and punctuation), -identifiers, and params. Don't use `append_syntax` for things that are not syntax. - -### Query Execution - -The query execution receives a pool and a plan and executes it against postgres. It then returns the results from the query part -back to the caller of the function. -The code can be found in [execution.rs](/crates/query-engine/execution/src/execution.rs) - -```rs -pub async fn execute( - pool: sqlx::PgPool, - plan: translation::ExecutionPlan, -) -> Result -``` - -## Patterns and guiding principles - -Here are a few ideas we want to maintain and why: - -### Optimize for inspectability - -One of the most important tools we have to figure out how a program behaves is run it and examine -the way data changes throughout the program. To do so we want to make sure the flow of the program -is easy to follow, and we can examine the relevant data in any given point. Here are a few suggestions: - -1. Prefer functions over abstract trait methods - makes jump to definition easier. -2. Annotate data types with `Debug` so that they can be traced, and prefer data shapes that can be `Debug`ged over those that can't. -3. Prefer to encode information in a way that is inspectable at runtime. - -### Prefer flexibility over rigidness until knowing better - -Abstractions should come up from usage pain points rather than theoretical ideas. -Often we don't know exactly what a good solution looks like in terms of code, and it is better to do a simple thing first and even -duplicate code when in doubt. -Abstracting or extracting duplicate code to a single place too early may lead to rigidness to a codebase before we understand the trade-offs, -so if there's no real necessity (that arises from usage) it might not be worth to "fix" it. - -A possible rule of thumb is that creating an abstraction to build a new thing might raise an alarm bell, -and building an abstraction in the middle of building yet another thing using the same pattern might be more appropriate. -Though still, You Might Not Need It. - -### Simplify testing and avoid infrastructure code - -At this point in time we've settled on a fairly sweet spot for testing, where infrastructure code is very limited and contained in one place. -Since testing can very easily become complex and "another project to maintain", we want to deliberately put limitations to how wildly it can grow, -and contain all infrastructure code to a single module - `tests/common/mod.rs` (in each relevant crate), without infra code in tests. - -We do this so that we will notice when it grows too large and requires some explicit and calculated attention. - -We would also like, in general, to keep infrastructure code to the required minimum, and not add things we'll need to maintain before using them. - -### Consider the generated SQL - -When working on a feature or fixing a bug, consider the generated SQL first. -What does it currently look like? What would you like it to look like? - -Compose a simplified yet runnable example and run it against the database (`just repl-postgres` can be helpful here), -find a query that returns the results you expect, examine edge cases, and then consider the rust implementation of this idea. - -### General coding style ideas - -- Use meaningful names (not shorthands). No need to skimp on vowels. -- Comment code with intention and reasons when those are not trivial. Comment modules, functions and types with a summary of what they are supposed to do. - Often writing summary comments helps us understand our code better and where it can be improved. -- Avoid using dummy values. Prefer throwing an error when in doubt. -- Prefer to avoid generics, traits and macros if possible. The majority of functions and types have a single usecase, and the ones that have multiple - use-cases probably don't have more than a handful of them. Duplicating code is often cheaper than the wrong abstraction, and we can always refactor. - Simple functions can go a long way. diff --git a/crates/cli/Cargo.toml b/crates/cli/Cargo.toml index d652979b..a24a3228 100644 --- a/crates/cli/Cargo.toml +++ b/crates/cli/Cargo.toml @@ -13,7 +13,6 @@ ndc-bigquery-configuration = { path = "../configuration" } anyhow = { workspace = true } clap = { workspace = true, features = ["derive", "env"] } serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } serde_yaml = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } diff --git a/crates/configuration/Cargo.toml b/crates/configuration/Cargo.toml index 22169b0e..f319495b 100644 --- a/crates/configuration/Cargo.toml +++ b/crates/configuration/Cargo.toml @@ -10,20 +10,13 @@ workspace = true [dependencies] ndc-models = { workspace = true } query-engine-metadata = { path = "../query-engine/metadata" } -query-engine-sql = { path = "../query-engine/sql" } anyhow = { workspace = true } -# We only use clap for the derive. -clap = { workspace = true, features = ["derive", "env"] } -prometheus = {workspace = true } schemars = { workspace = true, features = ["smol_str", "preserve_order"] } serde = { workspace = true } serde_json = { workspace = true, features = ["raw_value"] } -smol_str = { workspace = true } -sqlx = { workspace = true, features = ["json", "postgres", "runtime-tokio-rustls"] } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } -tracing = { workspace = true } yup-oauth2 = { workspace = true } gcp-bigquery-client = { workspace = true } diff --git a/crates/connectors/ndc-bigquery/Cargo.toml b/crates/connectors/ndc-bigquery/Cargo.toml index 8f970b30..aeaf580b 100644 --- a/crates/connectors/ndc-bigquery/Cargo.toml +++ b/crates/connectors/ndc-bigquery/Cargo.toml @@ -24,14 +24,9 @@ ndc-bigquery-configuration = { path = "../../configuration" } ndc-sdk = { workspace = true } async-trait = { workspace = true } -enum-iterator = { workspace = true } gcp-bigquery-client = { workspace = true } prometheus = { workspace = true } -schemars = { workspace = true, features = ["smol_str", "preserve_order"] } -serde = { workspace = true } -serde_derive = { workspace = true } serde_json = { workspace = true, features = ["raw_value"] } -sqlx = { workspace = true, features = [ "json", "postgres", "runtime-tokio-rustls" ] } tokio = { workspace = true, features = ["full"] } tracing = { workspace = true } thiserror = { workspace = true } diff --git a/crates/connectors/ndc-bigquery/src/error/convert.rs b/crates/connectors/ndc-bigquery/src/error/convert.rs index 223f7fba..44321c01 100644 --- a/crates/connectors/ndc-bigquery/src/error/convert.rs +++ b/crates/connectors/ndc-bigquery/src/error/convert.rs @@ -13,16 +13,7 @@ pub fn execution_error_to_response(error: query_engine_execution::error::Error) QueryError::NotSupported(_) => { connector::QueryError::new_unsupported_operation(&query_error.to_string()).into() } - QueryError::DBError(_) => { - connector::QueryError::new_unprocessable_content(&query_error.to_string()).into() - } - QueryError::DBConstraintError(_) | QueryError::MutationConstraintFailed => { - connector::MutationError::new_constraint_not_met(&query_error.to_string()).into() - } }, - Error::DB(_) => { - ErrorResponse::new_internal_with_details(serde_json::Value::String(error.to_string())) - } } } diff --git a/crates/connectors/ndc-bigquery/src/error/record.rs b/crates/connectors/ndc-bigquery/src/error/record.rs index 82294d6f..1f337e6f 100644 --- a/crates/connectors/ndc-bigquery/src/error/record.rs +++ b/crates/connectors/ndc-bigquery/src/error/record.rs @@ -8,19 +8,13 @@ pub fn execution_error(error: &query_engine_execution::error::Error, metrics: &m tracing::error!("{}", error); match error { Error::Query(err) => match &err { - QueryError::VariableNotFound(_) - | QueryError::DBError(_) - | QueryError::MutationConstraintFailed - | QueryError::DBConstraintError(_) => { + QueryError::VariableNotFound(_) => { metrics.error_metrics.record_invalid_request(); } QueryError::NotSupported(_) => { metrics.error_metrics.record_unsupported_feature(); } }, - Error::DB(_) => { - metrics.error_metrics.record_database_error(); - } } } diff --git a/crates/connectors/ndc-bigquery/src/state.rs b/crates/connectors/ndc-bigquery/src/state.rs index 5830b7b4..5f4446d5 100644 --- a/crates/connectors/ndc-bigquery/src/state.rs +++ b/crates/connectors/ndc-bigquery/src/state.rs @@ -53,8 +53,6 @@ pub async fn create_state( /// State initialization error. #[derive(Debug, Error)] pub enum InitializationError { - #[error("unable to initialize connection pool: {0}")] - UnableToCreatePool(sqlx::Error), #[error("error initializing metrics: {0}")] MetricsError(prometheus::Error), } diff --git a/crates/query-engine/execution/Cargo.toml b/crates/query-engine/execution/Cargo.toml index 2d825a91..31f7c94d 100644 --- a/crates/query-engine/execution/Cargo.toml +++ b/crates/query-engine/execution/Cargo.toml @@ -8,15 +8,10 @@ license.workspace = true workspace = true [dependencies] -ndc-models = { workspace = true } - query-engine-sql = { path = "../sql" } gcp-bigquery-client = { workspace = true } prometheus = { workspace = true } serde_json = { workspace = true } -sqlformat = { workspace = true } -sqlx = { workspace = true, features = [ "json", "postgres", "runtime-tokio-rustls", "uuid" ] } thiserror = { workspace = true } -tracing = { workspace = true } bytes = { workspace = true } diff --git a/crates/query-engine/execution/src/error.rs b/crates/query-engine/execution/src/error.rs index a0c5e519..75f1f39c 100644 --- a/crates/query-engine/execution/src/error.rs +++ b/crates/query-engine/execution/src/error.rs @@ -3,8 +3,6 @@ pub enum Error { #[error("{0}")] Query(QueryError), - #[error("{0}")] - DB(sqlx::Error), } /// Query planning error. @@ -14,33 +12,4 @@ pub enum QueryError { VariableNotFound(String), #[error("{0} are not supported.")] NotSupported(String), - #[error("{0}")] - DBError(sqlx::Error), - #[error("{0}")] - DBConstraintError(sqlx::Error), - #[error("Mutation constraint failed.")] - MutationConstraintFailed, -} - -impl From for Error { - fn from(err: sqlx::Error) -> Error { - match err - .as_database_error() - .and_then(|e| e.try_downcast_ref()) - .map(|e: &sqlx::postgres::PgDatabaseError| e.code()) - { - None => Error::DB(err), - Some(code) => { - // We want to map data and constraint exceptions to query errors - // https://www.postgresql.org/docs/current/errcodes-appendix.html - if code.starts_with("22") { - Error::Query(QueryError::DBError(err)) - } else if code.starts_with("23") { - Error::Query(QueryError::DBConstraintError(err)) - } else { - Error::DB(err) - } - } - } - } } diff --git a/crates/query-engine/execution/src/metrics.rs b/crates/query-engine/execution/src/metrics.rs index 0e388f39..8df01350 100644 --- a/crates/query-engine/execution/src/metrics.rs +++ b/crates/query-engine/execution/src/metrics.rs @@ -1,8 +1,6 @@ //! Metrics setup and update for our connector. -use std::time::Duration; - -use prometheus::{Gauge, Histogram, HistogramTimer, IntCounter, IntGauge, Registry}; +use prometheus::{Histogram, HistogramTimer, IntCounter, Registry}; /// The collection of all metrics exposed through the `/metrics` endpoint. #[derive(Debug, Clone)] @@ -11,11 +9,6 @@ pub struct Metrics { explain_total: IntCounter, query_plan_time: Histogram, query_execution_time: Histogram, - pool_max_connections: IntGauge, - pool_min_connections: IntGauge, - pool_acquire_timeout: Gauge, - pool_max_lifetime: Gauge, - pool_idle_timeout: Gauge, pub error_metrics: ErrorMetrics, } @@ -24,58 +17,28 @@ impl Metrics { pub fn initialize(metrics_registry: &mut Registry) -> Result { let query_total = add_int_counter_metric( metrics_registry, - "postgres_ndc_query_total", + "bigquery_ndc_query_total", "Total successful queries.", )?; let explain_total = add_int_counter_metric( metrics_registry, - "postgres_ndc_explain_total", + "bigquery_ndc_explain_total", "Total successful explains.", )?; let query_plan_time = add_histogram_metric( metrics_registry, - "postgres_ndc_query_plan_time", + "bigquery_ndc_query_plan_time", "Time taken to plan a query for execution, in seconds.", )?; let query_execution_time = add_histogram_metric( metrics_registry, - "postgres_ndc_query_execution_time", + "bigquery_ndc_query_execution_time", "Time taken to execute an already-planned query, in seconds.", )?; - let pool_max_connections = add_int_gauge_metric( - metrics_registry, - "postgres_ndc_pool_max_connections", - "The maximum number of connections that this pool should maintain.", - )?; - - let pool_min_connections = add_int_gauge_metric( - metrics_registry, - "postgres_ndc_pool_min_connections", - "The minimum number of connections that this pool should maintain.", - )?; - - let pool_acquire_timeout = add_gauge_metric( - metrics_registry, - "postgres_ndc_pool_acquire_timeout", - "Get the maximum amount of time to spend waiting for a connection, in seconds.", - )?; - - let pool_idle_timeout = add_gauge_metric( - metrics_registry, - "postgres_ndc_pool_idle_timeout", - "Get the maximum idle duration for individual connections, in seconds.", - )?; - - let pool_max_lifetime = add_gauge_metric( - metrics_registry, - "postgres_ndc_pool_max_lifetime", - "Get the maximum lifetime of individual connections, in seconds.", - )?; - let error_metrics = ErrorMetrics::initialize(metrics_registry)?; Ok(Self { @@ -83,11 +46,6 @@ impl Metrics { explain_total, query_plan_time, query_execution_time, - pool_max_connections, - pool_min_connections, - pool_acquire_timeout, - pool_max_lifetime, - pool_idle_timeout, error_metrics, }) } @@ -107,34 +65,6 @@ impl Metrics { pub fn time_query_execution(&self) -> Timer { Timer(self.query_execution_time.start_timer()) } - - // Set the metrics populated from the pool options. - // - // This only needs to be called once, as the options don't change. - pub fn set_pool_options_metrics(&self, pool_options: &sqlx::pool::PoolOptions) { - let max_connections: i64 = pool_options.get_max_connections().into(); - self.pool_max_connections.set(max_connections); - - let min_connections: i64 = pool_options.get_min_connections().into(); - self.pool_min_connections.set(min_connections); - - let acquire_timeout: f64 = pool_options.get_acquire_timeout().as_secs_f64(); - self.pool_acquire_timeout.set(acquire_timeout); - - // if nothing is set, return 0 - let idle_timeout: f64 = pool_options - .get_idle_timeout() - .unwrap_or(Duration::ZERO) - .as_secs_f64(); - self.pool_idle_timeout.set(idle_timeout); - - // if nothing is set, return 0 - let max_lifetime: f64 = pool_options - .get_max_lifetime() - .unwrap_or(Duration::ZERO) - .as_secs_f64(); - self.pool_max_lifetime.set(max_lifetime); - } } /// Create a new int counter metric and register it with the provided Prometheus Registry @@ -148,26 +78,6 @@ fn add_int_counter_metric( register_collector(metrics_registry, int_counter) } -/// Create a new int gauge metric and register it with the provided Prometheus Registry -fn add_int_gauge_metric( - metrics_registry: &mut Registry, - metric_name: &str, - metric_description: &str, -) -> Result { - let int_gauge = IntGauge::with_opts(prometheus::Opts::new(metric_name, metric_description))?; - register_collector(metrics_registry, int_gauge) -} - -/// Create a new gauge metric and register it with the provided Prometheus Registry -fn add_gauge_metric( - metrics_registry: &mut Registry, - metric_name: &str, - metric_description: &str, -) -> Result { - let gauge = Gauge::with_opts(prometheus::Opts::new(metric_name, metric_description))?; - register_collector(metrics_registry, gauge) -} - /// Create a new histogram metric using the default buckets, and register it with the provided /// Prometheus Registry. fn add_histogram_metric( @@ -249,37 +159,37 @@ impl ErrorMetrics { ) -> Result { let invalid_request_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_invalid_request_total_count", + "ndc_bigquery_error_invalid_request_total_count", "Total number of invalid requests encountered.", )?; let unsupported_capability_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_unsupported_capability_total_count", + "ndc_bigquery_error_unsupported_capability_total_count", "Total number of invalid requests with unsupported capabilities encountered.", )?; let unsupported_feature_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_unsupported_capabilities_total_count", + "ndc_bigquery_error_unsupported_capabilities_total_count", "Total number of invalid requests with unsupported capabilities encountered.", )?; let connector_error_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_connector_error_total_count", + "ndc_bigquery_error_connector_error_total_count", "Total number of requests failed due to an internal conenctor error.", )?; let database_error_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_database_error_total_count", + "ndc_bigquery_error_database_error_total_count", "Total number of requests failed due to a database error.", )?; let connection_acquisition_error_total = add_int_counter_metric( metrics_registry, - "ndc_postgres_error_connection_acquisition_error_total_count", + "ndc_bigquery_error_connection_acquisition_error_total_count", "Total number of failures to acquire a database connection.", )?; diff --git a/crates/query-engine/metadata/Cargo.toml b/crates/query-engine/metadata/Cargo.toml index da4519b2..6cf6cf29 100644 --- a/crates/query-engine/metadata/Cargo.toml +++ b/crates/query-engine/metadata/Cargo.toml @@ -11,7 +11,5 @@ workspace = true ndc-models = { workspace = true } smol_str = { workspace = true } -enum-iterator = { workspace = true } schemars = { workspace = true, features = ["smol_str"] } serde = { workspace = true, features = ["derive"] } -serde_json = { workspace = true } diff --git a/crates/query-engine/sql/Cargo.toml b/crates/query-engine/sql/Cargo.toml index 7abc9e84..e539c8c9 100644 --- a/crates/query-engine/sql/Cargo.toml +++ b/crates/query-engine/sql/Cargo.toml @@ -10,6 +10,4 @@ workspace = true [dependencies] ndc-models = { workspace = true } -schemars = { workspace = true, features = ["smol_str", "preserve_order"] } -serde = { workspace = true } serde_json = { workspace = true } diff --git a/crates/query-engine/translation/Cargo.toml b/crates/query-engine/translation/Cargo.toml index 8f143a75..5c5e5e32 100644 --- a/crates/query-engine/translation/Cargo.toml +++ b/crates/query-engine/translation/Cargo.toml @@ -17,10 +17,9 @@ multimap = { workspace = true } ref-cast = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true } -tracing = { workspace = true } anyhow = { workspace = true } tokio = { workspace = true } [dev-dependencies] insta = { workspace = true, features = ["json"] } -sqlformat = { workspace = true } \ No newline at end of file +sqlformat = { workspace = true } diff --git a/crates/tests/tests-common/Cargo.toml b/crates/tests/tests-common/Cargo.toml index 7f478fe9..2fb34cb6 100644 --- a/crates/tests/tests-common/Cargo.toml +++ b/crates/tests/tests-common/Cargo.toml @@ -12,22 +12,11 @@ name = "tests_common" path = "src/lib.rs" [dependencies] -ndc-bigquery = { path = "../../connectors/ndc-bigquery" } - ndc-sdk = { workspace = true } -ndc-test = { workspace = true } -anyhow = { workspace = true } axum = { workspace = true } axum-test-helper = { workspace = true } -env_logger = { workspace = true } -hyper = { workspace = true, features = ["tcp"] } -reqwest = { workspace = true } serde = { workspace = true } serde_json = { workspace = true, features = ["raw_value"] } serde_derive = { workspace = true } -sqlx = { workspace = true, features = [ "json", "postgres", "runtime-tokio-rustls" ] } -tokio = { workspace = true, features = ["full"] } -tokio-postgres = { workspace = true } tracing = { workspace = true } -uuid = { workspace = true, features = [ "v4", "fast-rng", "macro-diagnostics" ] }