Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bump ndarray from 0.15.3 to 0.16 #1492

Merged
merged 1 commit into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -151,8 +151,8 @@ log = "0.4.14"
maplit = "1.0.2"
memmap2 = "0.9"
metal = { version = "0.27.0", features = ["mps"] }
ndarray = "0.15.3"
ndarray-npy = { version = "0.8.0", features = [ "compressed_npz" ] }
ndarray = "0.16"
ndarray-npy = { version = "0.9.1", features = [ "compressed_npz" ] }
nom = "7.0.0"
nu-ansi-term = "0.46"
num-complex = "0.4.0"
Expand Down
6 changes: 3 additions & 3 deletions core/src/ops/array/gather_nd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,19 +35,19 @@ impl GatherNd {
let remaining = indices.shape().iter().skip(batch_dims).rev().skip(1).product();
let indices_shape_op = tvec!(batch_size, remaining, n);
let reshaped_indices: ArrayViewD<i32> =
indices.view().into_shape(&*indices_shape_op).unwrap();
indices.view().into_shape_with_order(&*indices_shape_op).unwrap();

let mut data_shape_op: TVec<usize> =
data.shape().iter().skip(batch_dims).copied().collect();
data_shape_op.insert(0, batch_size);
let reshaped_data =
data.to_array_view_unchecked::<T>().into_shape(&*data_shape_op).unwrap();
data.to_array_view_unchecked::<T>().into_shape_with_order(&*data_shape_op).unwrap();

let mut output_shape_op: TVec<usize> =
data.shape().iter().skip(n + batch_dims).copied().collect();
output_shape_op.insert(0, batch_size * remaining);
let mut output =
output.to_array_view_mut_unchecked::<T>().into_shape(&*output_shape_op).unwrap();
output.to_array_view_mut_unchecked::<T>().into_shape_with_order(&*output_shape_op).unwrap();

for b in 0..batch_size {
let mut i = reshaped_data.view();
Expand Down
2 changes: 1 addition & 1 deletion core/src/ops/einsum/eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ pub fn eval_q(expr: &AxesMapping, qp: DatumType, inputs: TVec<TValue>) -> TractR
} else {
let mut bias_shape = tvec!(1; output.ndim());
bias_shape[expr.axis((InOut::In(2), 0))?.outputs[0][0]] = bias.len();
let bias = bias.to_array_view::<i32>()?.into_shape(&*bias_shape)?;
let bias = bias.to_array_view::<i32>()?.into_shape_with_order(&*bias_shape)?;
output = output + bias;
}

Expand Down
43 changes: 11 additions & 32 deletions data/src/tensor/litteral.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,27 +3,24 @@ use crate::datum::Datum;
use ndarray::*;
use std::sync::Arc;

pub fn arr4<A, V, U, T>(xs: &[V]) -> Array4<A>
pub fn arr4<A, const N: usize, const M: usize, const T: usize>(xs: &[[[[A;T];M];N]]) -> Array4<A>
where
V: FixedInitializer<Elem = U> + Clone,
U: FixedInitializer<Elem = T> + Clone,
T: FixedInitializer<Elem = A> + Clone,
A: Clone,
{
use ndarray::*;
let xs = xs.to_vec();
let dim = Ix4(xs.len(), V::len(), U::len(), T::len());
let dim = Ix4(xs.len(), N, M, T);
let len = xs.len();
let cap = xs.capacity();
let expand_len = len * V::len() * U::len() * T::len();
let expand_len = len * N * M * T;
let ptr = Box::into_raw(xs.into_boxed_slice());
unsafe {
let v = if ::std::mem::size_of::<A>() == 0 {
Vec::from_raw_parts(ptr as *mut A, expand_len, expand_len)
} else if V::len() == 0 || U::len() == 0 || T::len() == 0 {
} else if N == 0 || M == 0 || T == 0 {
Vec::new()
} else {
let expand_cap = cap * V::len() * U::len() * T::len();
let expand_cap = cap * N * M * T;
Vec::from_raw_parts(ptr as *mut A, expand_len, expand_cap)
};
ArrayBase::from_shape_vec_unchecked(dim, v)
Expand All @@ -38,26 +35,17 @@ pub fn tensor1<A: Datum>(xs: &[A]) -> Tensor {
Tensor::from(arr1(xs))
}

pub fn tensor2<A: Datum, T>(xs: &[T]) -> Tensor
where
T: FixedInitializer<Elem = A> + Clone,
pub fn tensor2<A: Datum, const N: usize>(xs: &[[A;N]]) -> Tensor
{
Tensor::from(arr2(xs))
}

pub fn tensor3<A: Datum, T, U>(xs: &[U]) -> Tensor
where
U: FixedInitializer<Elem = T> + Clone,
T: FixedInitializer<Elem = A> + Clone,
pub fn tensor3<A: Datum, const N: usize, const M: usize>(xs: &[[[A;M];N]]) -> Tensor
{
Tensor::from(arr3(xs))
}

pub fn tensor4<A: Datum, T, U, V>(xs: &[V]) -> Tensor
where
V: FixedInitializer<Elem = U> + Clone,
U: FixedInitializer<Elem = T> + Clone,
T: FixedInitializer<Elem = A> + Clone,
pub fn tensor4<A: Datum, const N: usize, const M: usize, const T: usize>(xs: &[[[[A;T];M];N]]) -> Tensor
{
Tensor::from(arr4(xs))
}
Expand All @@ -70,26 +58,17 @@ pub fn rctensor1<A: Datum>(xs: &[A]) -> Arc<Tensor> {
Arc::new(Tensor::from(arr1(xs)))
}

pub fn rctensor2<A: Datum, T>(xs: &[T]) -> Arc<Tensor>
where
T: FixedInitializer<Elem = A> + Clone,
pub fn rctensor2<A: Datum, const N: usize>(xs: &[[A;N]]) -> Arc<Tensor>
{
Arc::new(Tensor::from(arr2(xs)))
}

pub fn rctensor3<A: Datum, T, U>(xs: &[U]) -> Arc<Tensor>
where
U: FixedInitializer<Elem = T> + Clone,
T: FixedInitializer<Elem = A> + Clone,
pub fn rctensor3<A: Datum, const N: usize, const M: usize>(xs: &[[[A;M];N]]) -> Arc<Tensor>
{
Arc::new(Tensor::from(arr3(xs)))
}

pub fn rctensor4<A: Datum, T, U, V>(xs: &[V]) -> Arc<Tensor>
where
V: FixedInitializer<Elem = U> + Clone,
U: FixedInitializer<Elem = T> + Clone,
T: FixedInitializer<Elem = A> + Clone,
pub fn rctensor4<A: Datum, const N: usize, const M: usize, const T: usize>(xs: &[[[[A;T];M];N]]) -> Arc<Tensor>
{
Arc::new(Tensor::from(arr4(xs)))
}
2 changes: 1 addition & 1 deletion harness/core-proptest-pulse/src/delay_plus_downsample.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ struct DelayPlusDownsampleProblem {
}

fn t(n: usize) -> ArrayD<f32> {
arr1(&(0..n).map(|x| x as f32).collect_vec()).into_shape(vec![1, n, 1]).unwrap()
arr1(&(0..n).map(|x| x as f32).collect_vec()).into_shape_with_order(vec![1, n, 1]).unwrap()
}

impl Arbitrary for DelayPlusDownsampleProblem {
Expand Down
2 changes: 1 addition & 1 deletion harness/core-proptest-pulse/src/delay_plus_pool.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ impl DelayPlusPoolProblem {
);
let pool = model.wire_node("pool", MaxPool::new(pool_spec, None), &crop).unwrap();
model.set_output_outlets(&pool).unwrap();
let input = arr1(&self.input).into_shape((1, self.input.len(), 1)).unwrap().into_dyn();
let input = arr1(&self.input).into_shape_with_order((1, self.input.len(), 1)).unwrap().into_dyn();
proptest_regular_against_pulse(model, self.pulse as _, input, 1)
}
}
Expand Down
9 changes: 5 additions & 4 deletions harness/lstm-proptest-onnx-vs-tf/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use proptest::prelude::*;

use tract_hir::internal::*;
use tract_ndarray::prelude::*;
use tract_ndarray::Order;
use tract_onnx::prelude::*;
use tract_onnx::tract_hir;

Expand Down Expand Up @@ -39,9 +40,9 @@ impl LstmProblem {
.slice_axis_mut(Axis(0), (s * icfo..s * (icfo + 1)).into())
.assign(&self.b_icfo.slice_axis(Axis(0), (s * iofc..s * (iofc + 1)).into()));
}
let w_iofc = w_iofc.t().into_shape((1, 4 * s, s))?.to_owned();
let r_iofc = r_iofc.t().into_shape((1, 4 * s, s))?.to_owned();
let b_iofc = b_iofc.into_shape((1, 8 * s))?;
let w_iofc = w_iofc.t().into_shape_with_order(((1, 4 * s, s), Order::ColumnMajor))?.to_owned();
let r_iofc = r_iofc.t().into_shape_with_order(((1, 4 * s, s), Order::ColumnMajor))?.to_owned();
let b_iofc = b_iofc.into_shape_with_order(((1, 8 * s), Order::ColumnMajor))?;

let x = model.add_source("x", self.x.datum_type().fact(self.x.shape()).into())?;
let op = tract_onnx::ops::rec::common::CommonRec {
Expand Down Expand Up @@ -190,7 +191,7 @@ impl LstmProblem {
let plan = SimplePlan::new(model)?;
let mut state = SimpleState::new(plan)?;
let y = state.run(tvec!(self.x.clone()))?.remove(0).into_tensor().into_array::<f32>()?;
let y = y.into_shape((self.length, self.batch_size, self.cell_size)).unwrap();
let y = y.into_shape_with_order((self.length, self.batch_size, self.cell_size)).unwrap();
Ok(y.into_tvalue())
}

Expand Down
2 changes: 1 addition & 1 deletion harness/tf-moz-deepspeech/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ fn parse_tensor<T: Datum + FromStr>(s: &str) -> TractResult<Tensor> {
let shape = &shape[1..shape.len() - 1];
let shape: Vec<usize> = shape.split(',').map(|s| s.parse().unwrap()).collect();
Ok(tract_ndarray::Array1::from(tokens.filter_map(|s| s.parse::<T>().ok()).collect::<Vec<_>>())
.into_shape(shape)?
.into_shape_with_order(shape)?
.into())
}

Expand Down
2 changes: 1 addition & 1 deletion linalg/src/frame/mmm/pack.rs
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,7 @@ mod test {
self.mn_range.clone(),
)
};
output.into_array::<u32>().unwrap().into_shape((panels, panel_len)).unwrap()
output.into_array::<u32>().unwrap().into_shape_with_order((panels, panel_len)).unwrap()
}

fn reference(&self) -> Array2<u32> {
Expand Down
2 changes: 1 addition & 1 deletion linalg/tests/virtual_im2col.rs
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ impl EagerIm2colSpec {
[kh, kw, ci, h, w],
|(kh, kw, ci, h, w)| *input.at([ci, h + kh, w + kw]).unwrap(),
)
.into_shape([k, n])
.into_shape_with_order([k, n])
.unwrap();
Box::new(EagerIm2col { im2col: im2col.into_tensor(), packer: self.packer.clone(), k })
}
Expand Down
8 changes: 4 additions & 4 deletions onnx/src/ops/nn/batch_norm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,10 @@ impl BatchNorm {
T: Datum + tract_num_traits::Float,
f32: AsPrimitive<T>,
{
let scale = scale.to_array_view::<T>()?.into_shape((c_dim,))?;
let beta = beta.to_array_view::<T>()?.into_shape((c_dim,))?;
let mean = mean.to_array_view::<T>()?.into_shape((c_dim,))?;
let var = var.to_array_view::<T>()?.into_shape((c_dim,))?;
let scale = scale.to_array_view::<T>()?.into_shape_with_order((c_dim,))?;
let beta = beta.to_array_view::<T>()?.into_shape_with_order((c_dim,))?;
let mean = mean.to_array_view::<T>()?.into_shape_with_order((c_dim,))?;
let var = var.to_array_view::<T>()?.into_shape_with_order((c_dim,))?;

let denominator = var.mapv(|x| (x + self.epsilon.as_()).sqrt());

Expand Down
2 changes: 1 addition & 1 deletion tensorflow/src/conform/tf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ impl From<Tensor> for TensorHolder {

fn tensor_to_array<T: ::tensorflow::TensorType>(tensor: &tf::Tensor<T>) -> TractResult<ArrayD<T>> {
let shape: Vec<usize> = tensor.dims().iter().map(|d| *d as _).collect();
Ok(Array::from(tensor.into_iter().cloned().collect::<Vec<_>>()).into_shape(shape)?)
Ok(Array::from(tensor.into_iter().cloned().collect::<Vec<_>>()).into_shape_with_order(shape)?)
}

impl Tensorflow {
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/src/ops/nn/conv2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ mod tests {

fn mk(sizes: &[usize]) -> Tensor {
Array::range(1f32, sizes.iter().product::<usize>() as f32 + 1.0, 1.0)
.into_shape(sizes)
.into_shape_with_order(sizes)
.unwrap()
.into()
}
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/src/ops/nn/s2b/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ fn batch_to_space<T: Copy + Datum + Zero>(
let batches = data.shape()[0] / block_size;
unflatten_blocked_shape.push(batches);
unflatten_blocked_shape.extend(&data.shape()[1..]);
let data = data.into_shape(&*unflatten_blocked_shape)?;
let data = data.into_shape_with_order(&*unflatten_blocked_shape)?;
let mut permuted_axes = vec![block_shape.len()];
let mut padded_shape = vec![batches];
for i in 0..block_shape.shape()[0] {
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/tests/ops_nn_conv2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,11 @@ fn img_and_ker() -> BoxedStrategy<(Tensor, Tensor, (usize, usize))> {
.prop_map(|(img_shape, ker_shape, img, ker, strides)| {
(
tract_ndarray::Array::from(img.into_iter().map(|i| i as f32).collect::<Vec<_>>())
.into_shape(img_shape)
.into_shape_with_order(img_shape)
.unwrap()
.into(),
tract_ndarray::Array::from(ker.into_iter().map(|i| i as f32).collect::<Vec<_>>())
.into_shape(ker_shape)
.into_shape_with_order(ker_shape)
.unwrap()
.into(),
strides,
Expand Down
4 changes: 2 additions & 2 deletions tensorflow/tests/ops_nn_dwconv2d.rs
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@ fn img_and_ker() -> BoxedStrategy<(Array4<f32>, Array4<f32>, usize)> {
.prop_map(|(img_shape, ker_shape, img, ker, stride)| {
(
Array::from(img.into_iter().map(|i| i as f32).collect::<Vec<_>>())
.into_shape(img_shape)
.into_shape_with_order(img_shape)
.unwrap(),
Array::from(ker.into_iter().map(|i| i as f32).collect::<Vec<_>>())
.into_shape(ker_shape)
.into_shape_with_order(ker_shape)
.unwrap(),
stride,
)
Expand Down
2 changes: 1 addition & 1 deletion tensorflow/tests/ops_nn_pools.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ fn img_and_pool() -> BoxedStrategy<(Array4<f32>, (usize, usize), String, usize)>
)
})
.prop_map(|(img_shape, k, img, padding, stride)| {
(Array::from(img).into_shape(img_shape).unwrap(), k, padding, stride)
(Array::from(img).into_shape_with_order(img_shape).unwrap(), k, padding, stride)
})
.boxed()
}
Expand Down
2 changes: 1 addition & 1 deletion test-rt/suite-unit/src/conv_f32.rs
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ impl ConvProblem {
if let Some(bias) = &self.bias {
let mut shape = vec![1; out.ndim()];
shape[shape_out.c_axis()] = bias.len();
out += &bias.clone().into_shape(shape).unwrap();
out += &bias.clone().into_shape_with_order(shape).unwrap();
}
out
}
Expand Down
2 changes: 1 addition & 1 deletion test-rt/suite-unit/src/conv_q.rs
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ impl QConvProblem {
if let Some(bias) = &self.bias {
let mut shape = vec![1; temp.ndim()];
shape[shape_out.c_axis()] = bias.len();
temp += &bias.clone().into_shape(shape).unwrap();
temp += &bias.clone().into_shape_with_order(shape).unwrap();
}
let cdt = self.output_dt();
temp.axis_iter_mut(Axis(shape_out.c_axis())).zip(k_scale).for_each(
Expand Down
4 changes: 2 additions & 2 deletions test-rt/suite-unit/src/deconv.rs
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ impl DeconvProblem {
if let Some(b) = &self.bias {
let mut bias_shape = tvec!(1; output_shape.rank());
bias_shape[output_shape.c_axis()] = co;
let b = b.clone().into_shape(&*bias_shape)?;
let b = b.clone().into_shape_with_order(&*bias_shape)?;
output += &b;
}
let co_per_group = co / self.group;
Expand Down Expand Up @@ -599,7 +599,7 @@ pub fn suite() -> TractResult<TestSuite> {
padding: PaddingSpec::Valid,
input: arr4(&[[[[0.0, 0.0, 0.0, 1.0]]]]).into_dyn(),
kernel: arr1(&[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0])
.into_shape(vec![2, 2, 1, 2, 1])
.into_shape_with_order(vec![2, 2, 1, 2, 1])
.unwrap()
.into_dyn(),
bias: None,
Expand Down
Loading