Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Consistent sync/async handling, allow more functions to be async for wasm. #1936

Merged
merged 12 commits into from
Jul 2, 2024
67 changes: 47 additions & 20 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 4 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,6 @@ readme = "README.md"
license = "MIT OR Apache-2.0"

[workspace.dependencies]
async-trait = "0.1.80"
bytemuck = "1.16.1"
candle-core = { version = "0.5.1" }
clap = { version = "4.5.7", features = ["derive"] }
Expand Down Expand Up @@ -83,14 +82,16 @@ tracing-subscriber = "0.3.18"
web-time = "1.1.0"
zip = "2.1.3"

# Async handling
pollster = "0.3"
async-channel = "2.3"

# Terminal UI
ratatui = "0.26.3"
crossterm = "0.27.0"

# WGPU stuff
futures-intrusive = "0.5.0"
text_placeholder = "0.5.0"
pollster = "0.3.0"
wgpu = "0.20.1"

# Benchmarks and Burnbench
Expand Down
20 changes: 7 additions & 13 deletions crates/burn-autodiff/src/ops/bool_tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::{checkpoint::strategy::CheckpointStrategy, tensor::AutodiffTensor, Au
use burn_tensor::{
backend::Backend,
ops::{BoolTensor, BoolTensorOps, IntTensor},
Device, Reader, Shape, TensorData,
Device, Shape, TensorData,
};

impl<B: Backend, C: CheckpointStrategy> BoolTensorOps<Self> for Autodiff<B, C> {
Expand All @@ -15,12 +15,8 @@ impl<B: Backend, C: CheckpointStrategy> BoolTensorOps<Self> for Autodiff<B, C> {
B::bool_shape(tensor)
}

fn bool_to_data<const D: usize>(tensor: &BoolTensor<B, D>) -> Reader<TensorData> {
B::bool_to_data(tensor)
}

fn bool_into_data<const D: usize>(tensor: BoolTensor<B, D>) -> Reader<TensorData> {
B::bool_into_data(tensor)
async fn bool_into_data<const D: usize>(tensor: BoolTensor<B, D>) -> TensorData {
B::bool_into_data(tensor).await
}

fn bool_into_int<const D: usize>(tensor: BoolTensor<B, D>) -> IntTensor<B, D> {
Expand Down Expand Up @@ -121,14 +117,12 @@ impl<B: Backend, C: CheckpointStrategy> BoolTensorOps<Self> for Autodiff<B, C> {
B::bool_flip(tensor, axes)
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn bool_argwhere<const D: usize>(tensor: BoolTensor<B, D>) -> IntTensor<B, 2> {
B::bool_argwhere(tensor)
async fn bool_argwhere<const D: usize>(tensor: BoolTensor<B, D>) -> IntTensor<B, 2> {
B::bool_argwhere(tensor).await
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn bool_nonzero<const D: usize>(tensor: BoolTensor<B, D>) -> Vec<IntTensor<B, 1>> {
B::bool_nonzero(tensor)
async fn bool_nonzero<const D: usize>(tensor: BoolTensor<B, D>) -> Vec<IntTensor<B, 1>> {
B::bool_nonzero(tensor).await
}

fn bool_expand<const D: usize, const D2: usize>(
Expand Down
13 changes: 3 additions & 10 deletions crates/burn-autodiff/src/ops/int_tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ use crate::{checkpoint::strategy::CheckpointStrategy, tensor::AutodiffTensor, Au
use burn_tensor::{
backend::Backend,
ops::{BoolTensor, IntTensor, IntTensorOps},
Device, Distribution, Reader, Shape, TensorData,
Device, Distribution, Shape, TensorData,
};

impl<B: Backend, C: CheckpointStrategy> IntTensorOps<Self> for Autodiff<B, C> {
Expand All @@ -15,12 +15,8 @@ impl<B: Backend, C: CheckpointStrategy> IntTensorOps<Self> for Autodiff<B, C> {
B::int_shape(tensor)
}

fn int_to_data<const D: usize>(tensor: &IntTensor<B, D>) -> Reader<TensorData> {
B::int_to_data(tensor)
}

fn int_into_data<const D: usize>(tensor: IntTensor<B, D>) -> Reader<TensorData> {
B::int_into_data(tensor)
async fn int_into_data<const D: usize>(tensor: IntTensor<B, D>) -> TensorData {
B::int_into_data(tensor).await
}

fn int_to_device<const D: usize>(
Expand Down Expand Up @@ -380,7 +376,6 @@ impl<B: Backend, C: CheckpointStrategy> IntTensorOps<Self> for Autodiff<B, C> {
B::int_expand(tensor, shape)
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn int_sort<const D: usize>(
tensor: IntTensor<Self, D>,
dim: usize,
Expand All @@ -389,7 +384,6 @@ impl<B: Backend, C: CheckpointStrategy> IntTensorOps<Self> for Autodiff<B, C> {
B::int_sort(tensor, dim, descending)
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn int_sort_with_indices<const D: usize>(
tensor: IntTensor<Self, D>,
dim: usize,
Expand All @@ -398,7 +392,6 @@ impl<B: Backend, C: CheckpointStrategy> IntTensorOps<Self> for Autodiff<B, C> {
B::int_sort_with_indices(tensor, dim, descending)
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn int_argsort<const D: usize>(
tensor: IntTensor<Self, D>,
dim: usize,
Expand Down
13 changes: 3 additions & 10 deletions crates/burn-autodiff/src/ops/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ use crate::{
use burn_tensor::{
backend::Backend,
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, IntTensor},
Device, ElementConversion, Reader, Shape, Tensor, TensorData,
Device, ElementConversion, Shape, Tensor, TensorData,
};

use super::maxmin::MaxMinDim;
Expand Down Expand Up @@ -50,12 +50,8 @@ impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>
B::float_shape(&tensor.primitive)
}

fn float_to_data<const D: usize>(tensor: &FloatTensor<Self, D>) -> Reader<TensorData> {
B::float_to_data(&tensor.primitive)
}

fn float_into_data<const D: usize>(tensor: FloatTensor<Self, D>) -> Reader<TensorData> {
B::float_into_data(tensor.primitive)
async fn float_into_data<const D: usize>(tensor: FloatTensor<Self, D>) -> TensorData {
B::float_into_data(tensor.primitive).await
}

fn float_device<const D: usize>(tensor: &FloatTensor<Self, D>) -> Device<Self> {
Expand Down Expand Up @@ -2364,7 +2360,6 @@ impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>
}
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn float_sort<const D: usize>(
tensor: FloatTensor<Self, D>,
dim: usize,
Expand All @@ -2387,7 +2382,6 @@ impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>
}
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn float_sort_with_indices<const D: usize>(
tensor: FloatTensor<Self, D>,
dim: usize,
Expand Down Expand Up @@ -2416,7 +2410,6 @@ impl<B: Backend, C: CheckpointStrategy> FloatTensorOps<Self> for Autodiff<B, C>
}
}

#[cfg(any(feature = "wasm-sync", not(target_family = "wasm")))]
fn float_argsort<const D: usize>(
tensor: FloatTensor<Self, D>,
dim: usize,
Expand Down
2 changes: 1 addition & 1 deletion crates/burn-candle/src/ops/base.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::marker::PhantomData;

use burn_tensor::{backend::Backend, Reader, Shape, TensorData};
use burn_tensor::{backend::Backend, Shape, TensorData};

use crate::{
element::{CandleElement, FloatCandleElement, IntCandleElement},
Expand Down
8 changes: 3 additions & 5 deletions crates/burn-candle/src/ops/bool_tensor.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use burn_tensor::{
ops::{BoolTensor, BoolTensorOps, FloatTensor, IntTensor},
Device, Reader, Shape, TensorData,
Device, Shape, TensorData,
};

use crate::{
Expand All @@ -19,12 +19,10 @@ impl<F: FloatCandleElement, I: IntCandleElement> BoolTensorOps<Self> for Candle<
super::base::shape(tensor)
}

fn bool_into_data<const D: usize>(tensor: BoolTensor<Self, D>) -> Reader<TensorData> {
async fn bool_into_data<const D: usize>(tensor: BoolTensor<Self, D>) -> TensorData {
let x: Vec<u8> = tensor.tensor.flatten_all().unwrap().to_vec1().unwrap();
let y = x.iter().map(|b| !matches!(b, 0)).collect();
let data = TensorData::new(y, tensor.shape());

Reader::Concrete(data)
TensorData::new(y, tensor.shape())
}

fn bool_from_data<const D: usize>(
Expand Down
6 changes: 3 additions & 3 deletions crates/burn-candle/src/ops/int_tensor.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use burn_tensor::{
ops::{BoolTensor, FloatTensor, IntElem, IntTensor, IntTensorOps},
Bool, Device, Distribution, ElementConversion, Reader, Shape, TensorData,
Bool, Device, Distribution, ElementConversion, Shape, TensorData,
};

use crate::{
Expand All @@ -19,8 +19,8 @@ impl<F: FloatCandleElement, I: IntCandleElement> IntTensorOps<Self> for Candle<F
super::base::shape(tensor)
}

fn int_into_data<const D: usize>(tensor: IntTensor<Self, D>) -> Reader<TensorData> {
Reader::Concrete(super::base::into_data(tensor))
async fn int_into_data<const D: usize>(tensor: IntTensor<Self, D>) -> TensorData {
super::base::into_data(tensor)
}

fn int_from_data<const D: usize>(
Expand Down
6 changes: 3 additions & 3 deletions crates/burn-candle/src/ops/tensor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use std::borrow::Borrow;

use burn_tensor::{
ops::{BoolTensor, FloatElem, FloatTensor, FloatTensorOps, FullPrecisionBackend, IntTensor},
Device, Distribution, ElementConversion, Reader, Shape, TensorData,
Device, Distribution, ElementConversion, Shape, TensorData,
};
use candle_core::{backend::BackendStorage, shape, Tensor};

Expand Down Expand Up @@ -59,8 +59,8 @@ impl<F: FloatCandleElement, I: IntCandleElement> FloatTensorOps<Self> for Candle
super::base::shape(tensor)
}

fn float_into_data<const D: usize>(tensor: CandleTensor<F, D>) -> Reader<TensorData> {
Reader::Concrete(super::base::into_data(tensor))
async fn float_into_data<const D: usize>(tensor: CandleTensor<F, D>) -> TensorData {
super::base::into_data(tensor)
}

fn float_device<const D: usize>(tensor: &CandleTensor<F, D>) -> Device<Self> {
Expand Down
Loading
Loading