Skip to content

Commit

Permalink
feat: sdk should export its dependency (#3135)
Browse files Browse the repository at this point in the history
  • Loading branch information
tychoish authored Aug 13, 2024
1 parent 464a3f9 commit d2c43c7
Show file tree
Hide file tree
Showing 9 changed files with 55 additions and 44 deletions.
6 changes: 2 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 0 additions & 4 deletions bindings/nodejs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,10 @@ crate-type = ["cdylib"]
workspace = true

[dependencies]
arrow_util = { path = "../../crates/arrow_util" }
glaredb = { path = "../../crates/glaredb" }
terminal_util = { path = "../../crates/terminal_util" }
futures = { workspace = true }
datafusion = { workspace = true }
thiserror = { workspace = true }
url = { workspace = true }
async-trait = { workspace = true }
lzma-sys = { version = "*", features = ["static"] } # Prevent dynamic linking of lzma, which comes from datafusion
napi = { version = "2.16.8", default-features = false, features = ["full"] }
napi-derive = "2.16.10"
Expand Down
2 changes: 1 addition & 1 deletion bindings/nodejs/index.d.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ export interface ConnectOptions {
storageOptions?: Record<string, string>
}
/** Connect to a GlareDB database. */
export function connect(dataDirOrCloudUrl?: string | undefined | null, options?: ConnectOptions | undefined | null): Promise<Connection>
export declare function connect(dataDirOrCloudUrl?: string | undefined | null, options?: ConnectOptions | undefined | null): Promise<Connection>
/** A connected session to a GlareDB database. */
export class Connection {
/**
Expand Down
16 changes: 10 additions & 6 deletions bindings/nodejs/src/execution.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use std::sync::{Arc, Mutex};

use arrow_util::pretty;
use datafusion::arrow::ipc::writer::FileWriter;
use futures::stream::StreamExt;
use glaredb::{DatabaseError, RecordStream, SendableRecordBatchStream};
use glaredb::ext::datafusion::arrow::ipc::writer::FileWriter;
use glaredb::ext::SendableRecordBatchStream;
use glaredb::{DatabaseError, RecordStream};

use crate::error::JsDatabaseError;

Expand Down Expand Up @@ -96,9 +96,13 @@ async fn print_record_batches(stream: SendableRecordBatchStream) -> Result<(), J
let mut stream: RecordStream = stream.into();
let batches = stream.to_vec().await?;

let disp =
pretty::pretty_format_batches(&schema, &batches, Some(terminal_util::term_width()), None)
.map_err(DatabaseError::from)?;
let disp = glaredb::ext::tools::pretty_format_batches(
&schema,
&batches,
Some(glaredb::ext::tools::term_width()),
None,
)
.map_err(DatabaseError::from)?;

println!("{}", disp);
Ok(())
Expand Down
6 changes: 2 additions & 4 deletions bindings/python/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,12 @@ telemetry = { path = "../../crates/telemetry" }
arrow_util = { path = "../../crates/arrow_util" }
terminal_util = { path = "../../crates/terminal_util" }
glaredb = { path = "../../crates/glaredb" }
datafusion = { workspace = true, features = ["pyarrow"] }
tokio = { workspace = true }
thiserror = { workspace = true }
url = { workspace = true }
futures = { workspace = true }
async-trait = { workspace = true }
datafusion = { workspace = true, features = ["pyarrow"] } # override workspace features
lzma-sys = { version = "*", features = ["static"] } # prevent dynamic linking of lzma, which comes from datafusion
pyo3 = { version = "0.20.3", features = ["abi3-py37", "extension-module"] }
once_cell = "1.19.0"

# Prevent dynamic linking of lzma, which comes from datafusion
lzma-sys = { version = "*", features = ["static"] }
7 changes: 3 additions & 4 deletions bindings/python/src/environment.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
use std::sync::Arc;

use datafusion::arrow::array::RecordBatch;
use datafusion::arrow::pyarrow::PyArrowType;
use datafusion::datasource::{MemTable, TableProvider};
use glaredb::EnvironmentReader;
use glaredb::ext::datafusion::arrow::pyarrow::PyArrowType;
use glaredb::ext::datafusion::datasource::{MemTable, TableProvider};
use glaredb::ext::{EnvironmentReader, RecordBatch};
use pyo3::prelude::*;
use pyo3::types::{IntoPyDict, PyTuple, PyType};

Expand Down
30 changes: 15 additions & 15 deletions bindings/python/src/execution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,21 +2,21 @@ use std::any::Any;
use std::fmt::Debug;
use std::sync::{Arc, Mutex};

use arrow_util::pretty;
use async_trait::async_trait;
use datafusion::arrow::datatypes::{Schema, SchemaRef};
use datafusion::arrow::pyarrow::ToPyArrow;
use datafusion::datasource::TableProvider;
use datafusion::error::DataFusionError;
use datafusion::execution::context::SessionState;
use datafusion::execution::TaskContext;
use datafusion::logical_expr::{TableProviderFilterPushDown, TableType};
use datafusion::physical_plan::stream::RecordBatchStreamAdapter;
use datafusion::physical_plan::streaming::{PartitionStream, StreamingTableExec};
use datafusion::physical_plan::ExecutionPlan;
use datafusion::prelude::Expr;
use futures::StreamExt;
use glaredb::{DatabaseError, Operation, RecordBatch, SendableRecordBatchStream};
use glaredb::ext::datafusion::arrow::datatypes::{Schema, SchemaRef};
use glaredb::ext::datafusion::arrow::pyarrow::ToPyArrow;
use glaredb::ext::datafusion::datasource::TableProvider;
use glaredb::ext::datafusion::error::DataFusionError;
use glaredb::ext::datafusion::execution::context::SessionState;
use glaredb::ext::datafusion::execution::TaskContext;
use glaredb::ext::datafusion::logical_expr::{TableProviderFilterPushDown, TableType};
use glaredb::ext::datafusion::physical_plan::stream::RecordBatchStreamAdapter;
use glaredb::ext::datafusion::physical_plan::streaming::{PartitionStream, StreamingTableExec};
use glaredb::ext::datafusion::physical_plan::ExecutionPlan;
use glaredb::ext::datafusion::prelude::Expr;
use glaredb::ext::{RecordBatch, SendableRecordBatchStream};
use glaredb::{DatabaseError, Operation};
use pyo3::exceptions::PyRuntimeError;
use pyo3::prelude::*;
use pyo3::types::PyTuple;
Expand Down Expand Up @@ -101,10 +101,10 @@ impl PyExecutionOutput {
pub fn show(&mut self, py: Python) -> PyResult<()> {
let (schema, batches) = self.resolve_operation(py)?;

let disp = pretty::pretty_format_batches(
let disp = glaredb::ext::tools::pretty_format_batches(
&schema,
&batches,
Some(terminal_util::term_width()),
Some(glaredb::ext::tools::term_width()),
None,
)
.map_err(|e| PyRuntimeError::new_err(e.to_string()))?;
Expand Down
2 changes: 2 additions & 0 deletions crates/glaredb/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ test = false
[dependencies]
sqlexec = { path = "../sqlexec" }
metastore = { path = "../metastore" }
arrow_util = { path = "../arrow_util" }
terminal_util = { path = "../terminal_util" }
url = { workspace = true }
datafusion = { workspace = true }
futures = { workspace = true }
Expand Down
26 changes: 20 additions & 6 deletions crates/glaredb/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,27 +19,41 @@ use std::task::{Context, Poll};
use datafusion::arrow::array::{StringArray, UInt64Array};
use datafusion::arrow::datatypes::{DataType, Field, Schema};
use datafusion::arrow::error::ArrowError;
// public re-export so downstream users of this package don't have to
// directly depend on DF (and our version no-less) to use our interfaces.
pub use datafusion::arrow::record_batch::RecordBatch;
use datafusion::error::DataFusionError;
use datafusion::logical_expr::LogicalPlan;
use datafusion::physical_plan::stream::RecordBatchStreamAdapter;
pub use datafusion::physical_plan::SendableRecordBatchStream;
pub use datafusion::scalar::ScalarValue;
use derive_builder::Builder;
use futures::lock::Mutex;
use futures::stream::{self, Stream, StreamExt};
use futures::TryStreamExt;
use metastore::errors::MetastoreError;
use sqlexec::engine::{Engine, EngineStorage, TrackedSession};
pub use sqlexec::environment::EnvironmentReader;
use sqlexec::errors::ExecError;
use sqlexec::remote::client::RemoteClientType;
use sqlexec::session::ExecutionResult;
use sqlexec::OperationInfo;
use url::Url;

// public re-export so downstream users of this package don't have to
// directly depend on DF (and our version no-less) to use our interfaces.
pub mod ext {
pub use datafusion;
pub use datafusion::arrow;
pub use datafusion::arrow::record_batch::RecordBatch;
pub use datafusion::physical_plan::SendableRecordBatchStream;
pub use datafusion::scalar::ScalarValue;
pub use sqlexec::environment::EnvironmentReader;

// public exports for some quasi-internal tools used by external and
// downstream dependencies to reduce friction/dependencies.
pub mod tools {
pub use arrow_util::pretty::pretty_format_batches;
pub use terminal_util::term_width;
}
}

use crate::ext::{EnvironmentReader, RecordBatch, ScalarValue, SendableRecordBatchStream};

/// ConnectOptions are the set of options to configure a GlareDB
/// instance, and are an analogue to the commandline arguments to
/// produce a "running database". The ConnectOptionsBuilder provides a
Expand Down

0 comments on commit d2c43c7

Please sign in to comment.