diff --git a/Cargo.toml b/Cargo.toml index 717add728a..43e9c68595 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -151,8 +151,8 @@ log = "0.4.14" maplit = "1.0.2" memmap2 = "0.9" metal = { version = "0.27.0", features = ["mps"] } -ndarray = "0.15.3" -ndarray-npy = { version = "0.8.0", features = [ "compressed_npz" ] } +ndarray = "0.16" +ndarray-npy = { version = "0.9.1", features = [ "compressed_npz" ] } nom = "7.0.0" nu-ansi-term = "0.46" num-complex = "0.4.0" diff --git a/core/src/ops/array/gather_nd.rs b/core/src/ops/array/gather_nd.rs index c370110f14..f4eeb3b8ec 100644 --- a/core/src/ops/array/gather_nd.rs +++ b/core/src/ops/array/gather_nd.rs @@ -35,19 +35,19 @@ impl GatherNd { let remaining = indices.shape().iter().skip(batch_dims).rev().skip(1).product(); let indices_shape_op = tvec!(batch_size, remaining, n); let reshaped_indices: ArrayViewD = - indices.view().into_shape(&*indices_shape_op).unwrap(); + indices.view().into_shape_with_order(&*indices_shape_op).unwrap(); let mut data_shape_op: TVec = data.shape().iter().skip(batch_dims).copied().collect(); data_shape_op.insert(0, batch_size); let reshaped_data = - data.to_array_view_unchecked::().into_shape(&*data_shape_op).unwrap(); + data.to_array_view_unchecked::().into_shape_with_order(&*data_shape_op).unwrap(); let mut output_shape_op: TVec = data.shape().iter().skip(n + batch_dims).copied().collect(); output_shape_op.insert(0, batch_size * remaining); let mut output = - output.to_array_view_mut_unchecked::().into_shape(&*output_shape_op).unwrap(); + output.to_array_view_mut_unchecked::().into_shape_with_order(&*output_shape_op).unwrap(); for b in 0..batch_size { let mut i = reshaped_data.view(); diff --git a/core/src/ops/einsum/eval.rs b/core/src/ops/einsum/eval.rs index 044c7070c2..5e81adb7e8 100644 --- a/core/src/ops/einsum/eval.rs +++ b/core/src/ops/einsum/eval.rs @@ -131,7 +131,7 @@ pub fn eval_q(expr: &AxesMapping, qp: DatumType, inputs: TVec) -> TractR } else { let mut bias_shape = tvec!(1; output.ndim()); bias_shape[expr.axis((InOut::In(2), 0))?.outputs[0][0]] = bias.len(); - let bias = bias.to_array_view::()?.into_shape(&*bias_shape)?; + let bias = bias.to_array_view::()?.into_shape_with_order(&*bias_shape)?; output = output + bias; } diff --git a/data/src/tensor/litteral.rs b/data/src/tensor/litteral.rs index d2e4c5cb9f..d8e4c5bd02 100644 --- a/data/src/tensor/litteral.rs +++ b/data/src/tensor/litteral.rs @@ -3,27 +3,24 @@ use crate::datum::Datum; use ndarray::*; use std::sync::Arc; -pub fn arr4(xs: &[V]) -> Array4 +pub fn arr4(xs: &[[[[A;T];M];N]]) -> Array4 where - V: FixedInitializer + Clone, - U: FixedInitializer + Clone, - T: FixedInitializer + Clone, A: Clone, { use ndarray::*; let xs = xs.to_vec(); - let dim = Ix4(xs.len(), V::len(), U::len(), T::len()); + let dim = Ix4(xs.len(), N, M, T); let len = xs.len(); let cap = xs.capacity(); - let expand_len = len * V::len() * U::len() * T::len(); + let expand_len = len * N * M * T; let ptr = Box::into_raw(xs.into_boxed_slice()); unsafe { let v = if ::std::mem::size_of::() == 0 { Vec::from_raw_parts(ptr as *mut A, expand_len, expand_len) - } else if V::len() == 0 || U::len() == 0 || T::len() == 0 { + } else if N == 0 || M == 0 || T == 0 { Vec::new() } else { - let expand_cap = cap * V::len() * U::len() * T::len(); + let expand_cap = cap * N * M * T; Vec::from_raw_parts(ptr as *mut A, expand_len, expand_cap) }; ArrayBase::from_shape_vec_unchecked(dim, v) @@ -38,26 +35,17 @@ pub fn tensor1(xs: &[A]) -> Tensor { Tensor::from(arr1(xs)) } -pub fn tensor2(xs: &[T]) -> Tensor -where - T: FixedInitializer + Clone, +pub fn tensor2(xs: &[[A;N]]) -> Tensor { Tensor::from(arr2(xs)) } -pub fn tensor3(xs: &[U]) -> Tensor -where - U: FixedInitializer + Clone, - T: FixedInitializer + Clone, +pub fn tensor3(xs: &[[[A;M];N]]) -> Tensor { Tensor::from(arr3(xs)) } -pub fn tensor4(xs: &[V]) -> Tensor -where - V: FixedInitializer + Clone, - U: FixedInitializer + Clone, - T: FixedInitializer + Clone, +pub fn tensor4(xs: &[[[[A;T];M];N]]) -> Tensor { Tensor::from(arr4(xs)) } @@ -70,26 +58,17 @@ pub fn rctensor1(xs: &[A]) -> Arc { Arc::new(Tensor::from(arr1(xs))) } -pub fn rctensor2(xs: &[T]) -> Arc -where - T: FixedInitializer + Clone, +pub fn rctensor2(xs: &[[A;N]]) -> Arc { Arc::new(Tensor::from(arr2(xs))) } -pub fn rctensor3(xs: &[U]) -> Arc -where - U: FixedInitializer + Clone, - T: FixedInitializer + Clone, +pub fn rctensor3(xs: &[[[A;M];N]]) -> Arc { Arc::new(Tensor::from(arr3(xs))) } -pub fn rctensor4(xs: &[V]) -> Arc -where - V: FixedInitializer + Clone, - U: FixedInitializer + Clone, - T: FixedInitializer + Clone, +pub fn rctensor4(xs: &[[[[A;T];M];N]]) -> Arc { Arc::new(Tensor::from(arr4(xs))) } diff --git a/harness/core-proptest-pulse/src/delay_plus_downsample.rs b/harness/core-proptest-pulse/src/delay_plus_downsample.rs index 62e12b6786..699e8fbbd0 100644 --- a/harness/core-proptest-pulse/src/delay_plus_downsample.rs +++ b/harness/core-proptest-pulse/src/delay_plus_downsample.rs @@ -15,7 +15,7 @@ struct DelayPlusDownsampleProblem { } fn t(n: usize) -> ArrayD { - arr1(&(0..n).map(|x| x as f32).collect_vec()).into_shape(vec![1, n, 1]).unwrap() + arr1(&(0..n).map(|x| x as f32).collect_vec()).into_shape_with_order(vec![1, n, 1]).unwrap() } impl Arbitrary for DelayPlusDownsampleProblem { diff --git a/harness/core-proptest-pulse/src/delay_plus_pool.rs b/harness/core-proptest-pulse/src/delay_plus_pool.rs index f65d02bc7a..c25f7cbf45 100644 --- a/harness/core-proptest-pulse/src/delay_plus_pool.rs +++ b/harness/core-proptest-pulse/src/delay_plus_pool.rs @@ -60,7 +60,7 @@ impl DelayPlusPoolProblem { ); let pool = model.wire_node("pool", MaxPool::new(pool_spec, None), &crop).unwrap(); model.set_output_outlets(&pool).unwrap(); - let input = arr1(&self.input).into_shape((1, self.input.len(), 1)).unwrap().into_dyn(); + let input = arr1(&self.input).into_shape_with_order((1, self.input.len(), 1)).unwrap().into_dyn(); proptest_regular_against_pulse(model, self.pulse as _, input, 1) } } diff --git a/harness/lstm-proptest-onnx-vs-tf/src/lib.rs b/harness/lstm-proptest-onnx-vs-tf/src/lib.rs index 580880e20e..56dc82a2bf 100644 --- a/harness/lstm-proptest-onnx-vs-tf/src/lib.rs +++ b/harness/lstm-proptest-onnx-vs-tf/src/lib.rs @@ -4,6 +4,7 @@ use proptest::prelude::*; use tract_hir::internal::*; use tract_ndarray::prelude::*; +use tract_ndarray::Order; use tract_onnx::prelude::*; use tract_onnx::tract_hir; @@ -39,9 +40,11 @@ impl LstmProblem { .slice_axis_mut(Axis(0), (s * icfo..s * (icfo + 1)).into()) .assign(&self.b_icfo.slice_axis(Axis(0), (s * iofc..s * (iofc + 1)).into())); } - let w_iofc = w_iofc.t().into_shape((1, 4 * s, s))?.to_owned(); - let r_iofc = r_iofc.t().into_shape((1, 4 * s, s))?.to_owned(); - let b_iofc = b_iofc.into_shape((1, 8 * s))?; + // let (shape, order) = w_iofc.t().into_shape_and_order(); + + let w_iofc = w_iofc.t().into_shape_with_order(((1, 4 * s, s), Order::ColumnMajor))?.to_owned(); + let r_iofc = r_iofc.t().into_shape_with_order(((1, 4 * s, s), Order::ColumnMajor))?.to_owned(); + let b_iofc = b_iofc.into_shape_with_order(((1, 8 * s), Order::ColumnMajor))?; let x = model.add_source("x", self.x.datum_type().fact(self.x.shape()).into())?; let op = tract_onnx::ops::rec::common::CommonRec { @@ -190,7 +193,7 @@ impl LstmProblem { let plan = SimplePlan::new(model)?; let mut state = SimpleState::new(plan)?; let y = state.run(tvec!(self.x.clone()))?.remove(0).into_tensor().into_array::()?; - let y = y.into_shape((self.length, self.batch_size, self.cell_size)).unwrap(); + let y = y.into_shape_with_order((self.length, self.batch_size, self.cell_size)).unwrap(); Ok(y.into_tvalue()) } diff --git a/harness/tf-moz-deepspeech/src/lib.rs b/harness/tf-moz-deepspeech/src/lib.rs index 63183f4e05..c83f971e1b 100644 --- a/harness/tf-moz-deepspeech/src/lib.rs +++ b/harness/tf-moz-deepspeech/src/lib.rs @@ -39,7 +39,7 @@ fn parse_tensor(s: &str) -> TractResult { let shape = &shape[1..shape.len() - 1]; let shape: Vec = shape.split(',').map(|s| s.parse().unwrap()).collect(); Ok(tract_ndarray::Array1::from(tokens.filter_map(|s| s.parse::().ok()).collect::>()) - .into_shape(shape)? + .into_shape_with_order(shape)? .into()) } diff --git a/linalg/src/frame/mmm/pack.rs b/linalg/src/frame/mmm/pack.rs index 941a044541..c4d315415a 100644 --- a/linalg/src/frame/mmm/pack.rs +++ b/linalg/src/frame/mmm/pack.rs @@ -555,7 +555,7 @@ mod test { self.mn_range.clone(), ) }; - output.into_array::().unwrap().into_shape((panels, panel_len)).unwrap() + output.into_array::().unwrap().into_shape_with_order((panels, panel_len)).unwrap() } fn reference(&self) -> Array2 { diff --git a/linalg/tests/virtual_im2col.rs b/linalg/tests/virtual_im2col.rs index 2107995eeb..91c27acf9d 100644 --- a/linalg/tests/virtual_im2col.rs +++ b/linalg/tests/virtual_im2col.rs @@ -231,7 +231,7 @@ impl EagerIm2colSpec { [kh, kw, ci, h, w], |(kh, kw, ci, h, w)| *input.at([ci, h + kh, w + kw]).unwrap(), ) - .into_shape([k, n]) + .into_shape_with_order([k, n]) .unwrap(); Box::new(EagerIm2col { im2col: im2col.into_tensor(), packer: self.packer.clone(), k }) } diff --git a/onnx/src/ops/nn/batch_norm.rs b/onnx/src/ops/nn/batch_norm.rs index a561081e5a..f6440dd033 100644 --- a/onnx/src/ops/nn/batch_norm.rs +++ b/onnx/src/ops/nn/batch_norm.rs @@ -23,10 +23,10 @@ impl BatchNorm { T: Datum + tract_num_traits::Float, f32: AsPrimitive, { - let scale = scale.to_array_view::()?.into_shape((c_dim,))?; - let beta = beta.to_array_view::()?.into_shape((c_dim,))?; - let mean = mean.to_array_view::()?.into_shape((c_dim,))?; - let var = var.to_array_view::()?.into_shape((c_dim,))?; + let scale = scale.to_array_view::()?.into_shape_with_order((c_dim,))?; + let beta = beta.to_array_view::()?.into_shape_with_order((c_dim,))?; + let mean = mean.to_array_view::()?.into_shape_with_order((c_dim,))?; + let var = var.to_array_view::()?.into_shape_with_order((c_dim,))?; let denominator = var.mapv(|x| (x + self.epsilon.as_()).sqrt()); diff --git a/tensorflow/src/conform/tf.rs b/tensorflow/src/conform/tf.rs index d23d0047ba..ef64f686fd 100644 --- a/tensorflow/src/conform/tf.rs +++ b/tensorflow/src/conform/tf.rs @@ -106,7 +106,7 @@ impl From for TensorHolder { fn tensor_to_array(tensor: &tf::Tensor) -> TractResult> { let shape: Vec = tensor.dims().iter().map(|d| *d as _).collect(); - Ok(Array::from(tensor.into_iter().cloned().collect::>()).into_shape(shape)?) + Ok(Array::from(tensor.into_iter().cloned().collect::>()).into_shape_with_order(shape)?) } impl Tensorflow { diff --git a/tensorflow/src/ops/nn/conv2d.rs b/tensorflow/src/ops/nn/conv2d.rs index 807b1ee35b..fa33962b09 100644 --- a/tensorflow/src/ops/nn/conv2d.rs +++ b/tensorflow/src/ops/nn/conv2d.rs @@ -24,7 +24,7 @@ mod tests { fn mk(sizes: &[usize]) -> Tensor { Array::range(1f32, sizes.iter().product::() as f32 + 1.0, 1.0) - .into_shape(sizes) + .into_shape_with_order(sizes) .unwrap() .into() } diff --git a/tensorflow/src/ops/nn/s2b/mod.rs b/tensorflow/src/ops/nn/s2b/mod.rs index dad0a06076..3fb7eb7f43 100644 --- a/tensorflow/src/ops/nn/s2b/mod.rs +++ b/tensorflow/src/ops/nn/s2b/mod.rs @@ -82,7 +82,7 @@ fn batch_to_space( let batches = data.shape()[0] / block_size; unflatten_blocked_shape.push(batches); unflatten_blocked_shape.extend(&data.shape()[1..]); - let data = data.into_shape(&*unflatten_blocked_shape)?; + let data = data.into_shape_with_order(&*unflatten_blocked_shape)?; let mut permuted_axes = vec![block_shape.len()]; let mut padded_shape = vec![batches]; for i in 0..block_shape.shape()[0] { diff --git a/tensorflow/tests/ops_nn_conv2d.rs b/tensorflow/tests/ops_nn_conv2d.rs index be3fa8b05b..503753c117 100644 --- a/tensorflow/tests/ops_nn_conv2d.rs +++ b/tensorflow/tests/ops_nn_conv2d.rs @@ -54,11 +54,11 @@ fn img_and_ker() -> BoxedStrategy<(Tensor, Tensor, (usize, usize))> { .prop_map(|(img_shape, ker_shape, img, ker, strides)| { ( tract_ndarray::Array::from(img.into_iter().map(|i| i as f32).collect::>()) - .into_shape(img_shape) + .into_shape_with_order(img_shape) .unwrap() .into(), tract_ndarray::Array::from(ker.into_iter().map(|i| i as f32).collect::>()) - .into_shape(ker_shape) + .into_shape_with_order(ker_shape) .unwrap() .into(), strides, diff --git a/tensorflow/tests/ops_nn_dwconv2d.rs b/tensorflow/tests/ops_nn_dwconv2d.rs index 17413c4018..e63dc1a6c7 100644 --- a/tensorflow/tests/ops_nn_dwconv2d.rs +++ b/tensorflow/tests/ops_nn_dwconv2d.rs @@ -52,10 +52,10 @@ fn img_and_ker() -> BoxedStrategy<(Array4, Array4, usize)> { .prop_map(|(img_shape, ker_shape, img, ker, stride)| { ( Array::from(img.into_iter().map(|i| i as f32).collect::>()) - .into_shape(img_shape) + .into_shape_with_order(img_shape) .unwrap(), Array::from(ker.into_iter().map(|i| i as f32).collect::>()) - .into_shape(ker_shape) + .into_shape_with_order(ker_shape) .unwrap(), stride, ) diff --git a/tensorflow/tests/ops_nn_pools.rs b/tensorflow/tests/ops_nn_pools.rs index 0e4c535675..779994902a 100644 --- a/tensorflow/tests/ops_nn_pools.rs +++ b/tensorflow/tests/ops_nn_pools.rs @@ -32,7 +32,7 @@ fn img_and_pool() -> BoxedStrategy<(Array4, (usize, usize), String, usize)> ) }) .prop_map(|(img_shape, k, img, padding, stride)| { - (Array::from(img).into_shape(img_shape).unwrap(), k, padding, stride) + (Array::from(img).into_shape_with_order(img_shape).unwrap(), k, padding, stride) }) .boxed() } diff --git a/test-rt/suite-unit/src/conv_f32.rs b/test-rt/suite-unit/src/conv_f32.rs index bc948cee46..0ab23f493f 100644 --- a/test-rt/suite-unit/src/conv_f32.rs +++ b/test-rt/suite-unit/src/conv_f32.rs @@ -154,7 +154,7 @@ impl ConvProblem { if let Some(bias) = &self.bias { let mut shape = vec![1; out.ndim()]; shape[shape_out.c_axis()] = bias.len(); - out += &bias.clone().into_shape(shape).unwrap(); + out += &bias.clone().into_shape_with_order(shape).unwrap(); } out } diff --git a/test-rt/suite-unit/src/conv_q.rs b/test-rt/suite-unit/src/conv_q.rs index c7f0d4215e..8c83acebfa 100644 --- a/test-rt/suite-unit/src/conv_q.rs +++ b/test-rt/suite-unit/src/conv_q.rs @@ -189,7 +189,7 @@ impl QConvProblem { if let Some(bias) = &self.bias { let mut shape = vec![1; temp.ndim()]; shape[shape_out.c_axis()] = bias.len(); - temp += &bias.clone().into_shape(shape).unwrap(); + temp += &bias.clone().into_shape_with_order(shape).unwrap(); } let cdt = self.output_dt(); temp.axis_iter_mut(Axis(shape_out.c_axis())).zip(k_scale).for_each( diff --git a/test-rt/suite-unit/src/deconv.rs b/test-rt/suite-unit/src/deconv.rs index c3860219cb..369ecadf30 100644 --- a/test-rt/suite-unit/src/deconv.rs +++ b/test-rt/suite-unit/src/deconv.rs @@ -204,7 +204,7 @@ impl DeconvProblem { if let Some(b) = &self.bias { let mut bias_shape = tvec!(1; output_shape.rank()); bias_shape[output_shape.c_axis()] = co; - let b = b.clone().into_shape(&*bias_shape)?; + let b = b.clone().into_shape_with_order(&*bias_shape)?; output += &b; } let co_per_group = co / self.group; @@ -599,7 +599,7 @@ pub fn suite() -> TractResult { padding: PaddingSpec::Valid, input: arr4(&[[[[0.0, 0.0, 0.0, 1.0]]]]).into_dyn(), kernel: arr1(&[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0]) - .into_shape(vec![2, 2, 1, 2, 1]) + .into_shape_with_order(vec![2, 2, 1, 2, 1]) .unwrap() .into_dyn(), bias: None,