From 46eaab8c0e2494d4c0fbf2b34ea4ebbb2b90728b Mon Sep 17 00:00:00 2001 From: Joe McCain III Date: Tue, 2 Apr 2024 13:54:50 -0500 Subject: [PATCH] update Signed-off-by: Joe McCain III --- acme/Cargo.toml | 1 + acme/benches/tensor.rs | 20 +++- core/src/ops/binary/kinds.rs | 2 + graphs/src/ops/arithmetic.rs | 84 +++++++------- graphs/src/ops/mod.rs | 36 +++++- tensor/src/actions/iter/iterator.rs | 7 ++ tensor/src/actions/iter/mod.rs | 3 +- tensor/src/actions/iter/strides.rs | 41 ++----- tensor/src/impls/create.rs | 12 +- tensor/src/impls/grad.rs | 28 ++++- tensor/src/impls/iter.rs | 22 ++++ tensor/src/impls/linalg.rs | 14 +-- tensor/src/impls/ops/binary.rs | 167 ++++++++++++++++++++-------- tensor/src/impls/ops/unary.rs | 14 +-- tensor/src/impls/reshape.rs | 8 +- tensor/src/lib.rs | 1 + tensor/src/shape/shape.rs | 133 +++++++++++----------- tensor/src/tensor.rs | 120 ++++++++------------ tensor/src/utils.rs | 4 +- tensor/tests/iter.rs | 22 ++++ tensor/tests/tensor.rs | 29 ++++- 21 files changed, 472 insertions(+), 296 deletions(-) create mode 100644 tensor/src/impls/iter.rs create mode 100644 tensor/tests/iter.rs diff --git a/acme/Cargo.toml b/acme/Cargo.toml index 0e1b20f4..1c40aeff 100644 --- a/acme/Cargo.toml +++ b/acme/Cargo.toml @@ -100,6 +100,7 @@ acme-tensor = { optional = true, path = "../tensor", version = "0.3.0" } [dev-dependencies] approx = "0.5" +lazy_static = "1" num = "0.4" rand = "0.8" diff --git a/acme/benches/tensor.rs b/acme/benches/tensor.rs index ae189667..fb084100 100644 --- a/acme/benches/tensor.rs +++ b/acme/benches/tensor.rs @@ -6,15 +6,29 @@ extern crate acme; extern crate test; -use acme::prelude::{IntoShape, Tensor}; +use acme::prelude::{IntoShape, Shape, Tensor}; +use lazy_static::lazy_static; use test::Bencher; +lazy_static! { + static ref SHAPE_3D: Shape = SHAPE_3D_PATTERN.into_shape(); +} +const SHAPE_3D_PATTERN: (usize, usize, usize) = (100, 10, 1); #[bench] -fn tensor_iter(b: &mut Bencher) { - let shape = (20, 20, 20).into_shape(); +fn bench_iter(b: &mut Bencher) { + let shape = SHAPE_3D.clone(); let n = shape.size(); let tensor = Tensor::linspace(0f64, n as f64, n); b.iter(|| tensor.strided().take(n)) } + +#[bench] +fn bench_iter_rev(b: &mut Bencher) { + let shape = SHAPE_3D.clone(); + let n = shape.size(); + let tensor = Tensor::linspace(0f64, n as f64, n); + b.iter(|| tensor.strided().rev().take(n)) +} + diff --git a/core/src/ops/binary/kinds.rs b/core/src/ops/binary/kinds.rs index 2bd277b2..7ae562e6 100644 --- a/core/src/ops/binary/kinds.rs +++ b/core/src/ops/binary/kinds.rs @@ -56,3 +56,5 @@ impl BinaryOp { } } } + + diff --git a/graphs/src/ops/arithmetic.rs b/graphs/src/ops/arithmetic.rs index 4dffc381..b66ee0bd 100644 --- a/graphs/src/ops/arithmetic.rs +++ b/graphs/src/ops/arithmetic.rs @@ -2,7 +2,7 @@ Appellation: arithmetic Contrib: FL03 */ -use super::BinaryOperation; +use super::{BinaryOperation, Operator}; use num::traits::NumOps; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; @@ -24,6 +24,12 @@ macro_rules! operator { stringify!($op).to_lowercase() } } + + impl Operator for $op { + fn name(&self) -> String { + self.name() + } + } }; ($($op:ident),*) => { $( @@ -34,10 +40,7 @@ macro_rules! operator { } macro_rules! operators { - (class $group:ident; {$($op:ident: $variant:ident),*}) => { - $( - operator!($op); - )* + ($group:ident; {$($variant:ident: $op:ident => $method:ident),*}) => { #[derive( Clone, Copy, @@ -65,11 +68,35 @@ macro_rules! operators { $variant($op), )* } + + impl $group { + $( + pub fn $method() -> Self { + Self::$variant($op::new()) + } + )* + + pub fn name(&self) -> String { + match self { + $( + $group::$variant(op) => op.name(), + )* + } + } + } }; } macro_rules! impl_binary_op { + ($(($op:ident, $bound:ident, $operator:tt)),*) => { + $( + impl_binary_op!($op, $bound, $operator); + )* + + }; ($op:ident, $bound:ident, $operator:tt) => { + operator!($op); + impl BinaryOperation for $op where A: core::ops::$bound, @@ -82,6 +109,8 @@ macro_rules! impl_binary_op { } }; (expr $op:ident, $bound:ident, $exp:expr) => { + operator!($op); + impl BinaryOperation for $op where A: core::ops::$bound, @@ -95,45 +124,21 @@ macro_rules! impl_binary_op { }; } -// operator!(Addition, Division, Multiplication, Subtraction); -operators!(class Arithmetic; {Addition: Add, Division: Div, Multiplication: Mul, Remainder: Rem, Subtraction: Sub}); - -impl_binary_op!(Addition, Add, +); +operators!(Arithmetic; {Add: Addition => add, Div: Division => div, Mul: Multiplication => mul, Rem: Remainder => rem, Sub: Subtraction => sub}); -impl_binary_op!(Division, Div, /); +impl_binary_op!((Addition, Add, +), (Division, Div, /), (Multiplication, Mul, *), (Remainder, Rem, %), (Subtraction, Sub, -)); -impl_binary_op!(Multiplication, Mul, *); - -impl_binary_op!(Remainder, Rem, %); - -impl_binary_op!(Subtraction, Sub, -); impl Arithmetic { pub fn new(op: Arithmetic) -> Self { op } - pub fn add() -> Self { - Self::Add(Addition::new()) - } - - pub fn div() -> Self { - Self::Div(Division::new()) - } - - pub fn mul() -> Self { - Self::Mul(Multiplication::new()) - } - - pub fn sub() -> Self { - Self::Sub(Subtraction::new()) - } - - pub fn op(&self) -> Box> + pub fn into_op(self) -> Box> where A: NumOps, { - match self.clone() { + match self { Arithmetic::Add(op) => Box::new(op), Arithmetic::Div(op) => Box::new(op), Arithmetic::Mul(op) => Box::new(op), @@ -142,14 +147,11 @@ impl Arithmetic { } } - pub fn name(&self) -> String { - match self { - Arithmetic::Add(op) => op.name(), - Arithmetic::Div(op) => op.name(), - Arithmetic::Mul(op) => op.name(), - Arithmetic::Rem(op) => op.name(), - Arithmetic::Sub(op) => op.name(), - } + pub fn op(&self) -> Box> + where + A: NumOps, + { + self.into_op() } pub fn eval(&self, lhs: A, rhs: B) -> C diff --git a/graphs/src/ops/mod.rs b/graphs/src/ops/mod.rs index ea089ec6..969c3831 100644 --- a/graphs/src/ops/mod.rs +++ b/graphs/src/ops/mod.rs @@ -10,14 +10,44 @@ pub use self::{arithmetic::*, kinds::*}; pub(crate) mod arithmetic; pub(crate) mod kinds; -pub trait BinaryOperation { +pub trait BinaryOperation { type Output; fn eval(&self, lhs: A, rhs: B) -> Self::Output; } +impl BinaryOperation for S +where + S: Fn(A, B) -> C, +{ + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self(lhs, rhs) + } +} + +impl BinaryOperation for Box> { + type Output = C; + + fn eval(&self, lhs: A, rhs: B) -> Self::Output { + self.as_ref().eval(lhs, rhs) + } +} + pub trait Operator { - type Output; + fn boxed(self) -> Box + where + Self: Sized + 'static, + { + Box::new(self) + } + fn name(&self) -> String; +} + +impl Operator for Box { - fn kind(&self) -> String; + fn name(&self) -> String { + self.as_ref().name() + } } diff --git a/tensor/src/actions/iter/iterator.rs b/tensor/src/actions/iter/iterator.rs index 9b2765f7..842e03a6 100644 --- a/tensor/src/actions/iter/iterator.rs +++ b/tensor/src/actions/iter/iterator.rs @@ -17,3 +17,10 @@ impl Iter { self.order } } + +pub struct BaseIter<'a, T> { + iter: &'a Iter, + data: &'a [T], + index: usize, +} + diff --git a/tensor/src/actions/iter/mod.rs b/tensor/src/actions/iter/mod.rs index 2447409c..29bc0385 100644 --- a/tensor/src/actions/iter/mod.rs +++ b/tensor/src/actions/iter/mod.rs @@ -48,7 +48,7 @@ mod tests { #[test] fn test_strided() { - let shape = Shape::from_iter([2, 2]); + let shape = Shape::from_iter([2, 2, 2, 2]); let n = shape.size(); let exp = Vec::linspace(0f64, n as f64, n); let tensor = Tensor::linspace(0f64, n as f64, n).reshape(shape).unwrap(); @@ -58,7 +58,6 @@ mod tests { } #[test] - #[ignore = "not implemented"] fn test_strided_rev() { let shape = Shape::from_iter([2, 2]); let n = shape.size(); diff --git a/tensor/src/actions/iter/strides.rs b/tensor/src/actions/iter/strides.rs index 1b4ebf4a..0d0ab1e0 100644 --- a/tensor/src/actions/iter/strides.rs +++ b/tensor/src/actions/iter/strides.rs @@ -49,8 +49,8 @@ impl<'a, T> From<&'a TensorBase> for StrideIter<'a, T> { pub struct Strided<'a> { next: Option, position: Vec, - pub(crate) shape: &'a Shape, - pub(crate) stride: &'a Stride, + shape: &'a Shape, + stride: &'a Stride, } impl<'a> Strided<'a> { @@ -70,8 +70,9 @@ impl<'a> Strided<'a> { } } - pub fn index(&self, index: &[usize]) -> usize { + pub(crate) fn index(&self, index: impl AsRef<[usize]>) -> usize { index + .as_ref() .iter() .zip(self.stride.iter()) .map(|(i, s)| i * s) @@ -81,33 +82,15 @@ impl<'a> Strided<'a> { impl<'a> DoubleEndedIterator for Strided<'a> { fn next_back(&mut self) -> Option { - - let scope = match self.next { - None => return None, - Some(storage_index) => storage_index, + let (pos, _idx) = if let Some(item) = self.next() { + item + } else { + return None; }; - let mut updated = false; - let mut next = scope; - for ((pos, max_i), stride) in self - .position - .iter_mut() - .zip(self.shape.iter()) - .zip(self.stride.iter()) - { - let next_i = *pos - 1; - if next_i > *max_i { - *pos = next_i; - updated = true; - next -= stride; - break; - } else { - next += *pos * stride; - *pos = 0 - } - } - self.next = if updated { Some(next) } else { None }; - println!("{:?}", &self.position); - Some((self.position.clone(), scope)) + let position = self.shape.iter().zip(pos.iter()).map(|(s, p)| s - p).collect(); + let scope = self.index(&position); + println!("{:?}", &position); + Some((position, scope)) // unimplemented!() } } diff --git a/tensor/src/impls/create.rs b/tensor/src/impls/create.rs index a8aa220c..6e9c22b1 100644 --- a/tensor/src/impls/create.rs +++ b/tensor/src/impls/create.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::prelude::IntoShape; -use crate::tensor::{from_vec, TensorBase}; +use crate::tensor::{from_vec_with_kind, TensorBase}; use num::traits::real::Real; use num::traits::{FromPrimitive, NumAssign, One, Zero}; @@ -30,7 +30,7 @@ where pub fn fill(shape: impl IntoShape, value: T) -> Self { let shape = shape.into_shape(); let store = vec![value; shape.size()]; - from_vec(false, shape, store) + from_vec_with_kind(false, shape, store) } /// Create a tensor, filled with some value, from the current shape pub fn fill_like(&self, value: T) -> Self { @@ -53,7 +53,7 @@ where store.push(value); value += step; } - from_vec(false, store.len(), store) + Self::from_vec(store) } /// Create an identity matrix of a certain size pub fn eye(size: usize) -> Self { @@ -63,7 +63,7 @@ where store.push(if i == j { T::one() } else { T::zero() }); } } - from_vec(false, (size, size), store) + Self::from_shape_vec((size, size), store) } /// Create a tensor with a certain number of elements, evenly spaced /// between the provided start and end values @@ -88,7 +88,7 @@ where store.push(value.exp2()); value += step; } - from_vec(false, (store.len(),), store) + from_vec_with_kind(false, (store.len(),), store) } pub fn geomspace(start: T, end: T, steps: usize) -> Self @@ -104,7 +104,7 @@ where store.push(value.exp()); value += step; } - from_vec(false, (store.len(),), store) + from_vec_with_kind(false, (store.len(),), store) } } diff --git a/tensor/src/impls/grad.rs b/tensor/src/impls/grad.rs index 1b57c57f..ad4e913a 100644 --- a/tensor/src/impls/grad.rs +++ b/tensor/src/impls/grad.rs @@ -114,24 +114,42 @@ where } _ => todo!(), }, + TensorExpr::BinaryScalar(lhs, rhs, kind) => match kind { + BinaryOp::Add => { + *entry!(store, lhs) += &grad; + } + BinaryOp::Div => { + *entry!(store, lhs) += &grad / *rhs; + } + BinaryOp::Mul => { + *entry!(store, lhs) += &grad * *rhs; + } + BinaryOp::Pow => { + *entry!(store, lhs) += &grad * *rhs * lhs.pow(*rhs - T::one()); + } + BinaryOp::Sub => { + *entry!(store, lhs) += &grad; + } + _ => todo!(), + }, TensorExpr::Unary(val, kind) => match kind { UnaryOp::Cos => { - *entry!(store, val) -= &grad * val.clone().sin(); + *entry!(store, val) -= &grad * val.sin(); } UnaryOp::Cosh => { - *entry!(store, val) += &grad * val.clone().sinh(); + *entry!(store, val) += &grad * val.sinh(); } UnaryOp::Exp => { - *entry!(store, val) += &grad * val.clone().exp(); + *entry!(store, val) += &grad * val.exp(); } UnaryOp::Neg => { *entry!(store, val) -= &grad; } UnaryOp::Sin => { - *entry!(store, val) += &grad * val.clone().cos(); + *entry!(store, val) += &grad * val.cos(); } UnaryOp::Sinh => { - *entry!(store, val) += &grad * val.clone().cosh(); + *entry!(store, val) += &grad * val.cosh(); } UnaryOp::Sqrt => { *entry!(store, val) += diff --git a/tensor/src/impls/iter.rs b/tensor/src/impls/iter.rs new file mode 100644 index 00000000..46f8c69d --- /dev/null +++ b/tensor/src/impls/iter.rs @@ -0,0 +1,22 @@ +/* + Appellation: iter + Contrib: FL03 +*/ +use crate::prelude::Scalar; +use crate::tensor::TensorBase; + +impl TensorBase where T: Scalar { + pub fn sum(&self) -> T { + self.data().iter().copied().sum() + } + + pub fn product(&self) -> T { + self.data().iter().copied().product() + } +} + +impl FromIterator for TensorBase { + fn from_iter>(iter: I) -> Self { + Self::from_vec(Vec::from_iter(iter)) + } +} diff --git a/tensor/src/impls/linalg.rs b/tensor/src/impls/linalg.rs index 134cc591..2ec903a3 100644 --- a/tensor/src/impls/linalg.rs +++ b/tensor/src/impls/linalg.rs @@ -10,7 +10,7 @@ use crate::tensor::{self, TensorBase}; use acme::prelude::UnaryOp; use num::traits::{Num, Signed}; -pub fn inverse(tensor: &TensorBase) -> TensorResult> +fn inverse_impl(tensor: &TensorBase) -> TensorResult> where T: Copy + Num + PartialOrd + Signed, { @@ -88,7 +88,7 @@ where let rank = *self.rank(); let store = (0..rank).map(|i| self[vec![i; rank]]).collect::>(); - tensor::from_vec(false, self.shape().diagonalize(), store) + tensor::from_vec_with_kind(false, self.shape().diagonalize(), store) } pub fn det(&self) -> Result { @@ -96,12 +96,12 @@ where return Err(ShapeError::InvalidShape.into()); } let shape = self.shape(); - let n = *shape.first().unwrap(); + let n = shape.nrows(); if n == 1 { return Ok(T::zero()); } if n == 2 { - let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; + let res = self[vec![0, 0]] * self[vec![1, 1]] - self[vec![0, 1]] * self[vec![1, 0]]; return Ok(res); } let mut det = T::zero(); @@ -119,14 +119,14 @@ where k += 1; } } - let sub_tensor = tensor::from_vec(false, cur_shape.clone(), sub); + let sub_tensor = tensor::from_vec_with_kind(false, cur_shape.clone(), sub); let sign = if i % 2 == 0 { T::one() } else { -T::one() }; det = det + sign * self[vec![0, i]] * sub_tensor.det()?; } Ok(det) } pub fn inv(&self) -> TensorResult { - inverse(self) + inverse_impl(self) } } @@ -144,7 +144,7 @@ where for j in 0..other.shape()[1] { for k in 0..self.shape()[1] { result[i * other.shape()[1] + j] += - self.store[i * self.shape()[1] + k] * other.store[k * other.shape()[1] + j]; + self.data[i * self.shape()[1] + k] * other.data[k * other.shape()[1] + j]; } } } diff --git a/tensor/src/impls/ops/binary.rs b/tensor/src/impls/ops/binary.rs index 3c2c82c3..a8373521 100644 --- a/tensor/src/impls/ops/binary.rs +++ b/tensor/src/impls/ops/binary.rs @@ -2,13 +2,36 @@ Appellation: arith Contrib: FL03 */ -use crate::prelude::TensorExpr; +use crate::prelude::{Scalar, TensorExpr}; use crate::tensor::{from_vec_with_op, TensorBase}; use acme::ops::binary::BinaryOp; use core::ops; use num::traits::Pow; -macro_rules! cmp { + +pub(crate) fn broadcast_scalar_op(lhs: &TensorBase, rhs: &TensorBase, op: BinaryOp, f: F) -> TensorBase where F: Fn(T, T) -> T, T: Copy + Default { + let mut lhs = lhs.clone(); + let mut rhs = rhs.clone(); + if lhs.is_scalar() { + lhs = lhs.broadcast(rhs.shape()); + } + if rhs.is_scalar() { + rhs = rhs.broadcast(lhs.shape()); + } + let shape = lhs.shape().clone(); + let store = lhs.data().iter().zip(rhs.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let op = TensorExpr::binary(lhs, rhs, op); + from_vec_with_op(false, op, shape, store) +} + +fn check_shapes_or_scalar(lhs: &TensorBase, rhs: &TensorBase) where T: Clone + Default { + let is_scalar = lhs.is_scalar() || rhs.is_scalar(); + debug_assert!(is_scalar || lhs.shape() == rhs.shape(), "Shape Mismatch: {:?} != {:?}", lhs.shape(), rhs.shape()); + + +} + +macro_rules! check { (ne: $lhs:expr, $rhs:expr) => { if $lhs != $rhs { panic!("Shape Mismatch: {:?} != {:?}", $lhs, $rhs); @@ -16,6 +39,34 @@ macro_rules! cmp { }; } +impl TensorBase where T: Scalar { + pub fn apply_binary(&self, other: &Self, op: BinaryOp) -> Self { + check_shapes_or_scalar(self, other); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a + *b).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), op); + from_vec_with_op(false, op, shape, store) + } + + pub fn apply_binaryf(&self, other: &Self, op: BinaryOp, f: F) -> Self where F: Fn(T, T) -> T { + check_shapes_or_scalar(self, other); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| f(*a, *b)).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), op); + from_vec_with_op(false, op, shape, store) + } +} + +impl TensorBase where T: Scalar { + pub fn pow(&self, exp: T) -> Self { + let shape = self.shape(); + let store = self.data().iter().copied().map(|a| a.pow(exp)).collect(); + let op = TensorExpr::binary_scalar(self.clone(), exp, BinaryOp::Pow); + from_vec_with_op(false, op, shape, store) + } + +} + impl Pow for TensorBase where T: Copy + Pow, @@ -44,10 +95,44 @@ where } } -macro_rules! impl_arithmetic { - (op: $trait:ident, $method:ident, $op:tt) => { - impl_scalar_arith!($trait, $method, $op); +macro_rules! impl_binary_op { + ($(($trait:ident, $method:ident, $op:tt)),*) => { + $( impl_binary_op!($trait, $method, $op); )* + }; + ($trait:ident, $method:ident, $op:tt) => { + impl_binary_op!(scalar: $trait, $method, $op); + impl_binary_op!(tensor: $trait, $method, $op); + }; + (scalar: $trait:ident, $method:ident, $op:tt) => { + impl ops::$trait for TensorBase + where + T: Copy + ops::$trait, + { + type Output = Self; + fn $method(self, other: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| *a $op other).collect(); + let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); + from_vec_with_op(false, op, shape, store) + } + } + + impl<'a, T> ops::$trait for &'a TensorBase + where + T: Copy + ops::$trait, + { + type Output = TensorBase; + + fn $method(self, other: T) -> Self::Output { + let shape = self.shape().clone(); + let store = self.data().iter().map(|a| *a $op other).collect(); + let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); + from_vec_with_op(false, op, shape, store) + } + } + }; + (tensor: $trait:ident, $method:ident, $op:tt) => { impl ops::$trait for TensorBase where T: Copy + ops::$trait, @@ -55,7 +140,7 @@ macro_rules! impl_arithmetic { type Output = Self; fn $method(self, other: Self) -> Self::Output { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self, other, BinaryOp::$trait); @@ -114,42 +199,7 @@ macro_rules! impl_arithmetic { } } }; - ($(($trait:ident, $method:ident, $op:tt)),*) => { - $( impl_arithmetic!(op: $trait, $method, $op); )* - }; -} - -macro_rules! impl_scalar_arith { - ($trait:ident, $method:ident, $op:tt) => { - - impl ops::$trait for TensorBase - where - T: Copy + ops::$trait, - { - type Output = Self; - - fn $method(self, other: T) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self, other, BinaryOp::$trait); - from_vec_with_op(false, op, shape, store) - } - } - - impl<'a, T> ops::$trait for &'a TensorBase - where - T: Copy + ops::$trait, - { - type Output = TensorBase; - - fn $method(self, other: T) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().map(|a| *a $op other).collect(); - let op = TensorExpr::binary_scalar(self.clone(), other, BinaryOp::$trait); - from_vec_with_op(false, op, shape, store) - } - } - }; + } macro_rules! impl_assign_op { @@ -159,7 +209,7 @@ macro_rules! impl_assign_op { T: Copy + ops::$inner, { fn $method(&mut self, other: Self) { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self.clone(), other, BinaryOp::$inner); @@ -173,7 +223,7 @@ macro_rules! impl_assign_op { T: Copy + ops::$inner, { fn $method(&mut self, other: &'a TensorBase) { - cmp!(ne: self.shape(), other.shape()); + check!(ne: self.shape(), other.shape()); let shape = self.shape().clone(); let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$inner); @@ -185,10 +235,37 @@ macro_rules! impl_assign_op { } -impl_arithmetic!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); +macro_rules! impl_binary_method { + (scalar: $variant:ident, $method:ident, $op:tt) => { + pub fn $method(&self, other: T) -> Self { + let shape = self.shape(); + let store = self.data().iter().map(| elem | *elem $op other).collect(); + let op = TensorExpr::binary_scalar(self.clone(), other.clone(), BinaryOp::$variant); + from_vec_with_op(false, op, shape, store) + } + + }; + (tensor: $variant:ident, $method:ident, $op:tt) => { + pub fn $method(&self, other: &Self) -> Self { + check!(ne: self.shape(), other.shape()); + let shape = self.shape(); + let store = self.data().iter().zip(other.data().iter()).map(|(a, b)| *a $op *b).collect(); + let op = TensorExpr::binary(self.clone(), other.clone(), BinaryOp::$variant); + from_vec_with_op(false, op, shape, store) + } + + }; +} + +impl_binary_op!((Add, add, +), (Div, div, /), (Mul, mul, *), (Rem, rem, %), (Sub, sub, -)); impl_assign_op!(AddAssign, add_assign, Add, +); impl_assign_op!(DivAssign, div_assign, Div, /); impl_assign_op!(MulAssign, mul_assign, Mul, *); impl_assign_op!(RemAssign, rem_assign, Rem, %); impl_assign_op!(SubAssign, sub_assign, Sub, -); + +impl TensorBase where T: Scalar { + impl_binary_method!(tensor: Add, add, +); + impl_binary_method!(scalar: Add, add_scalar, +); +} \ No newline at end of file diff --git a/tensor/src/impls/ops/unary.rs b/tensor/src/impls/ops/unary.rs index 5216fe64..c70cf363 100644 --- a/tensor/src/impls/ops/unary.rs +++ b/tensor/src/impls/ops/unary.rs @@ -15,7 +15,7 @@ where fn neg(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); + let store = self.data().iter().map(|a| (*a).neg()).collect(); let op = TensorExpr::unary(self, UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } @@ -28,8 +28,8 @@ where type Output = TensorBase; fn neg(self) -> Self::Output { - let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| -a).collect(); + let shape = self.shape(); + let store = self.data().iter().map(|a| (*a).neg()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Neg); from_vec_with_op(false, op, shape, store) } @@ -43,7 +43,7 @@ where fn not(self) -> Self::Output { let shape = self.shape().clone(); - let store = self.data().iter().copied().map(|a| !a).collect(); + let store = self.data().iter().map(|a| (*a).not()).collect(); let op = TensorExpr::unary(self, UnaryOp::Not); from_vec_with_op(false, op, shape, store) } @@ -57,7 +57,7 @@ where fn not(self) -> Self::Output { let shape = self.shape(); - let store = self.store.iter().copied().map(|a| !a).collect(); + let store = self.data.iter().copied().map(|a| !a).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Not); from_vec_with_op(false, op, shape, store) } @@ -67,7 +67,7 @@ macro_rules! impl_unary_op { ($variant:ident, $method:ident) => { pub fn $method(&self) -> Self { let shape = self.shape(); - let store = self.store.iter().copied().map(|v| v.$method()).collect(); + let store = self.data().iter().copied().map(|v| v.$method()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::$variant); from_vec_with_op(false, op, shape, store) } @@ -91,7 +91,7 @@ where T: Scalar, { let shape = self.shape(); - let store = self.store.iter().copied().map(|v| v.abs()).collect(); + let store = self.data.iter().copied().map(|v| v.abs()).collect(); let op = TensorExpr::unary(self.clone(), UnaryOp::Abs); from_vec_with_op(false, op, shape, store) } diff --git a/tensor/src/impls/reshape.rs b/tensor/src/impls/reshape.rs index 08835396..86891e01 100644 --- a/tensor/src/impls/reshape.rs +++ b/tensor/src/impls/reshape.rs @@ -18,7 +18,7 @@ where kind: self.kind(), layout, op: op.into(), - store: self.store.clone(), + data: self.data.clone(), } } @@ -36,7 +36,7 @@ where let layout = self.layout().clone().swap_axes(swap, with); let shape = self.layout.shape(); - let mut data = self.store.to_vec(); + let mut data = self.data.to_vec(); for i in 0..shape[swap] { for j in 0..shape[with] { @@ -51,7 +51,7 @@ where kind: self.kind.clone(), layout, op: op.into(), - store: data.clone(), + data: data.clone(), } } /// Transpose the tensor. @@ -64,7 +64,7 @@ where kind: self.kind(), layout, op: op.into(), - store: self.data().clone(), + data: self.data().clone(), } } diff --git a/tensor/src/lib.rs b/tensor/src/lib.rs index a404639c..6c661939 100644 --- a/tensor/src/lib.rs +++ b/tensor/src/lib.rs @@ -40,6 +40,7 @@ mod impls { } mod create; mod grad; + mod iter; mod linalg; mod num; mod reshape; diff --git a/tensor/src/shape/shape.rs b/tensor/src/shape/shape.rs index 75a97bf1..66c64e70 100644 --- a/tensor/src/shape/shape.rs +++ b/tensor/src/shape/shape.rs @@ -54,11 +54,14 @@ impl Shape { pub fn diagonalize(&self) -> Shape { Self::new(vec![self.size()]) } + pub fn get_final_position(&self) -> Vec { + self.iter().map(|&dim| dim - 1).collect() + } /// Inserts a new dimension along the given [Axis]. pub fn insert(&mut self, index: Axis, dim: usize) { self.0.insert(*index, dim) } - + /// Inserts a new dimension along the given [Axis]. pub fn insert_axis(&self, index: Axis) -> Self { let mut shape = self.clone(); shape.insert(index, 1); @@ -89,7 +92,7 @@ impl Shape { /// The number of columns in the shape. pub fn ncols(&self) -> usize { if self.len() >= 2 { - self.0[1] + self[1] } else if self.len() == 1 { 1 } else { @@ -99,12 +102,12 @@ impl Shape { /// The number of rows in the shape. pub fn nrows(&self) -> usize { if self.len() >= 1 { - *self.0.first().unwrap() + self[0] } else { 0 } } - + /// Removes and returns the last dimension of the shape. pub fn pop(&mut self) -> Option { self.0.pop() } @@ -247,66 +250,6 @@ impl SwapAxes for Shape { } } -impl From<()> for Shape { - fn from(_: ()) -> Self { - Self::default() - } -} - -impl From for Shape { - fn from(dim: usize) -> Self { - Self(vec![dim]) - } -} - -impl From> for Shape { - fn from(shape: Vec) -> Self { - Self(shape) - } -} - -impl From<&[usize]> for Shape { - fn from(shape: &[usize]) -> Self { - Self(shape.to_vec()) - } -} - -impl From<(usize,)> for Shape { - fn from(shape: (usize,)) -> Self { - Self(vec![shape.0]) - } -} - -impl From<(usize, usize)> for Shape { - fn from(shape: (usize, usize)) -> Self { - Self(vec![shape.0, shape.1]) - } -} - -impl From<(usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2]) - } -} - -impl From<(usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3]) - } -} - -impl From<(usize, usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4]) - } -} - -impl From<(usize, usize, usize, usize, usize, usize)> for Shape { - fn from(shape: (usize, usize, usize, usize, usize, usize)) -> Self { - Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4, shape.5]) - } -} - impl FromIterator for Shape { fn from_iter>(iter: I) -> Self { Self(Vec::from_iter(iter)) @@ -411,6 +354,68 @@ unsafe impl Send for Shape {} unsafe impl Sync for Shape {} + +impl From<()> for Shape { + fn from(_: ()) -> Self { + Self::default() + } +} + +impl From for Shape { + fn from(dim: usize) -> Self { + Self(vec![dim]) + } +} + +impl From> for Shape { + fn from(shape: Vec) -> Self { + Self(shape) + } +} + +impl From<&[usize]> for Shape { + fn from(shape: &[usize]) -> Self { + Self(shape.to_vec()) + } +} + +impl From<(usize,)> for Shape { + fn from(shape: (usize,)) -> Self { + Self(vec![shape.0]) + } +} + +impl From<(usize, usize)> for Shape { + fn from(shape: (usize, usize)) -> Self { + Self(vec![shape.0, shape.1]) + } +} + +impl From<(usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2]) + } +} + +impl From<(usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3]) + } +} + +impl From<(usize, usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4]) + } +} + +impl From<(usize, usize, usize, usize, usize, usize)> for Shape { + fn from(shape: (usize, usize, usize, usize, usize, usize)) -> Self { + Self(vec![shape.0, shape.1, shape.2, shape.3, shape.4, shape.5]) + } +} + + // macro_rules! tuple_vec { // ($($n:tt),*) => { // vec![$($n,)*] diff --git a/tensor/src/tensor.rs b/tensor/src/tensor.rs index 1f0b06ce..e7d57112 100644 --- a/tensor/src/tensor.rs +++ b/tensor/src/tensor.rs @@ -9,7 +9,6 @@ use crate::ops::{BackpropOp, TensorExpr}; use crate::prelude::{TensorId, TensorKind}; use crate::shape::{IntoShape, Rank, Shape, Stride}; -use acme::prelude::BinaryOp; #[cfg(not(feature = "std"))] use alloc::vec::{self, Vec}; use core::iter::Map; @@ -18,59 +17,60 @@ use core::slice::Iter as SliceIter; #[cfg(feature = "std")] use std::vec; -pub(crate) fn new( +pub(crate) fn create_with( kind: impl Into, op: impl Into>, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { TensorBase { id: TensorId::new(), + data, kind: kind.into(), layout: Layout::contiguous(shape), op: op.into(), - store, } } -pub(crate) fn from_vec( +pub(crate) fn from_vec_with_kind( kind: impl Into, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { - new(kind, BackpropOp::none(), shape, store) + create_with(kind, BackpropOp::none(), shape, data) } pub(crate) fn from_vec_with_op( kind: impl Into, op: TensorExpr, shape: impl IntoShape, - store: Vec, + data: Vec, ) -> TensorBase { - new(kind.into(), BackpropOp::new(op), shape, store) + create_with(kind.into(), BackpropOp::new(op), shape, data) } #[derive(Clone, Debug, Hash, Ord, PartialOrd)] pub struct TensorBase { pub(crate) id: TensorId, + pub(crate) data: Vec, pub(crate) kind: TensorKind, pub(crate) layout: Layout, pub(crate) op: BackpropOp, - pub(crate) store: Vec, } impl TensorBase { pub fn new(kind: TensorKind, shape: impl IntoShape) -> Self { let shape = shape.into_shape(); - let store = Vec::with_capacity(shape.size()); + let data = Vec::with_capacity(shape.size()); Self { id: TensorId::new(), + data, kind, layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } + /// Create a new tensor from an iterator. pub fn from_iter(iter: I) -> Self where I: IntoIterator, @@ -81,12 +81,13 @@ impl TensorBase { pub fn from_scalar(value: T) -> Self { Self { id: TensorId::new(), + data: vec![value], kind: TensorKind::default(), layout: Layout::contiguous(()), op: None.into(), - store: vec![value], } } + /// Create a new tensor from an iterator, with a particular shape. pub fn from_shape_iter(shape: impl IntoShape, iter: I) -> Self where I: IntoIterator, @@ -94,33 +95,33 @@ impl TensorBase { Self::from_shape_vec(shape, Vec::from_iter(iter)) } /// Create a new tensor from a [Vec], with a specified [shape](Shape). - pub fn from_shape_vec(shape: impl IntoShape, store: Vec) -> Self { + pub fn from_shape_vec(shape: impl IntoShape, data: Vec) -> Self { Self { id: TensorId::new(), + data, kind: TensorKind::default(), layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } /// Create a new, one-dimensional tensor from a [Vec]. - pub fn from_vec(store: Vec) -> Self { - let shape = Shape::from(store.len()); + pub fn from_vec(data: Vec) -> Self { + let shape = Shape::from(data.len()); Self { id: TensorId::new(), + data, kind: TensorKind::default(), layout: Layout::contiguous(shape), op: BackpropOp::none(), - store, } } /// Return a reference to the tensor's data. pub fn as_slice(&self) -> &[T] { - &self.store + &self.data } /// Return a mutable reference to the tensor's data. pub fn as_mut_slice(&mut self) -> &mut [T] { - &mut self.store + &mut self.data } /// Detach the computational graph from the tensor pub fn detach(&self) -> Self @@ -135,7 +136,7 @@ impl TensorBase { kind: self.kind, layout: self.layout.clone(), op: BackpropOp::none(), - store: self.store.clone(), + data: self.data.clone(), } } } @@ -152,12 +153,12 @@ impl TensorBase { /// Returns the data at the specified index. pub fn get(&self, index: impl AsRef<[usize]>) -> Option<&T> { let i = self.layout.index(index); - self.store.get(i) + self.data().get(i) } /// Returns a mutable reference to the data at the specified index. pub fn get_mut(&mut self, index: impl AsRef<[usize]>) -> Option<&mut T> { let i = self.layout.index(index); - self.store.get_mut(i) + self.data_mut().get_mut(i) } /// Returns the unique identifier of the tensor. pub const fn id(&self) -> TensorId { @@ -169,7 +170,7 @@ impl TensorBase { } /// Returns true if the tensor is empty. pub fn is_empty(&self) -> bool { - self.store.is_empty() + self.data().is_empty() } /// A function to check if the tensor is a scalar pub fn is_scalar(&self) -> bool { @@ -177,7 +178,7 @@ impl TensorBase { } /// A function to check if the tensor is a variable pub const fn is_variable(&self) -> bool { - self.kind.is_variable() + self.kind().is_variable() } /// Return an iterator over the tensor pub fn iter(&self) -> StrideIter<'_, T> { @@ -189,12 +190,12 @@ impl TensorBase { } /// Get a reference to the last element of the tensor pub fn last(&self) -> Option<&T> { - let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().iter().map(|d| d - 1).collect::>(); self.get(pos) } /// Get a mutable reference to the last element of the tensor pub fn last_mut(&mut self) -> Option<&mut T> { - let pos = self.layout.shape().iter().map(|d| d - 1).collect::>(); + let pos = self.shape().iter().map(|d| d - 1).collect::>(); self.get_mut(pos) } /// Get a reference to the [Layout] of the tensor @@ -207,19 +208,19 @@ impl TensorBase { } /// Get an owned reference to the [Rank] of the tensor pub fn rank(&self) -> Rank { - self.layout.shape().rank() + self.shape().rank() } /// An owned reference of the tensors [Shape] pub fn shape(&self) -> &Shape { - self.layout.shape() + self.layout().shape() } /// Returns the number of elements in the tensor. pub fn size(&self) -> usize { - self.layout.size() + self.layout().size() } /// Get a reference to the stride of the tensor pub fn stride(&self) -> &Stride { - self.layout.stride() + self.layout().stride() } /// Create an iterator over the tensor pub fn strided(&self) -> StrideIter<'_, T> { @@ -228,44 +229,23 @@ impl TensorBase { /// Turn the tensor into a scalar /// If the tensor has a rank greater than 0, this will return an error pub fn to_scalar(&self) -> TensorResult<&T> { - if self.is_scalar() { - Ok(self.first().unwrap()) - } else { - Err(TensorError::NotScalar) + if !self.is_scalar() { + return Err(TensorError::NotScalar); } + Ok(self.first().unwrap()) } /// Turn the tensor into a one-dimensional vector pub fn to_vec(&self) -> Vec where T: Clone, { - self.store.to_vec() + self.data.to_vec() } /// Changes the kind of tensor to a variable pub fn variable(mut self) -> Self { self.kind = TensorKind::Variable; self } - - pub fn apply_binary(&self, other: &Self, op: BinaryOp, f: F) -> Self - where - F: Fn(&T, &T) -> T, - T: Clone, - { - let store = self - .data() - .iter() - .zip(other.data().iter()) - .map(|(a, b)| f(a, b)) - .collect(); - TensorBase { - id: TensorId::new(), - kind: self.kind(), - layout: self.layout().clone(), - op: BackpropOp::binary(self.clone(), other.clone(), op), - store, - } - } /// pub unsafe fn with_layout(mut self, layout: Layout) -> Self { self.layout = layout; @@ -292,13 +272,13 @@ where } pub fn view<'a>(&'a self) -> TensorBase<&'a T> { - let store = self.store.iter().collect(); + let store = self.data.iter().collect(); TensorBase { id: self.id, kind: self.kind, layout: self.layout.clone(), op: self.op.view(), - store, + data: store, } } } @@ -306,15 +286,15 @@ where #[allow(dead_code)] impl TensorBase { pub(crate) fn data(&self) -> &Vec { - &self.store + &self.data } pub(crate) fn data_mut(&mut self) -> &mut Vec { - &mut self.store + &mut self.data } pub(crate) fn get_by_index(&self, index: usize) -> Option<&T> { - self.store.get(index) + self.data.get(index) } pub(crate) fn map<'a, F>(&'a self, f: F) -> Map, F> @@ -322,7 +302,7 @@ impl TensorBase { F: FnMut(&'a T) -> T, T: 'a + Clone, { - self.store.iter().map(f) + self.data.iter().map(f) } pub(crate) fn mapv(&self, f: F) -> TensorBase @@ -330,13 +310,13 @@ impl TensorBase { F: Fn(T) -> T, T: Copy, { - let store = self.store.iter().copied().map(f).collect(); + let store = self.data.iter().copied().map(f).collect(); TensorBase { id: TensorId::new(), kind: self.kind, layout: self.layout.clone(), op: self.op.clone(), - store, + data: store, } } } @@ -349,7 +329,7 @@ where fn index(&self, index: Idx) -> &Self::Output { let i = self.layout().index(index); - &self.store[i] + &self.data[i] } } @@ -359,7 +339,7 @@ where { fn index_mut(&mut self, index: Idx) -> &mut Self::Output { let i = self.layout().index(index); - &mut self.store[i] + &mut self.data[i] } } @@ -370,12 +350,6 @@ where T: PartialEq, { fn eq(&self, other: &Self) -> bool { - self.layout == other.layout && self.store == other.store - } -} - -impl FromIterator for TensorBase { - fn from_iter>(iter: I) -> Self { - Self::from_vec(Vec::from_iter(iter)) + self.layout == other.layout && self.data == other.data } } diff --git a/tensor/src/utils.rs b/tensor/src/utils.rs index 4be2199e..f4872f6e 100644 --- a/tensor/src/utils.rs +++ b/tensor/src/utils.rs @@ -26,7 +26,7 @@ where let pos = i * rhs.shape().ncols() + j; let left = i * lhs.shape().ncols() + k; let right = k * rhs.shape().ncols() + j; - result[pos] += lhs.store[left] * rhs.store[right]; + result[pos] += lhs.data[left] * rhs.data[right]; } } } @@ -52,7 +52,7 @@ where let pos = i * rhs.shape().ncols() + j; let left = i * lhs.shape().ncols() + k; let right = k * rhs.shape().ncols() + j; - result[pos] += lhs.store[left] * rhs.store[right]; + result[pos] += lhs.data[left] * rhs.data[right]; } } } diff --git a/tensor/tests/iter.rs b/tensor/tests/iter.rs new file mode 100644 index 00000000..bd2dccc5 --- /dev/null +++ b/tensor/tests/iter.rs @@ -0,0 +1,22 @@ +/* + Appellation: iter + Contrib: FL03 +*/ +#![cfg(test)] +extern crate acme_tensor as acme; + +use acme::prelude::{IntoShape, Tensor}; + +#[test] +fn test_sum() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 1f64); + assert_eq!(a.sum(), 4.0); +} + +#[test] +fn test_product() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 2f64); + assert_eq!(a.product(), 16.0); +} \ No newline at end of file diff --git a/tensor/tests/tensor.rs b/tensor/tests/tensor.rs index f8b8ab84..096c0411 100644 --- a/tensor/tests/tensor.rs +++ b/tensor/tests/tensor.rs @@ -6,7 +6,6 @@ extern crate acme_tensor as acme; use acme::prelude::{IntoShape, Tensor}; -use num::One; #[test] fn test_tensor() { @@ -18,9 +17,15 @@ fn test_tensor() { assert_eq!(a.shape(), b.shape()); assert_eq!(a.size(), b.size()); assert_eq!(a.stride(), b.stride()); +} - let tensor = Tensor::::one(); - assert!(tensor.is_scalar()); +#[test] +fn test_scalar_tensor() { + use num::{One, Zero}; + let one = Tensor::::one(); + let zero = Tensor::::zero(); + assert!(one.is_scalar()); + assert!(zero.is_scalar()); } #[test] @@ -47,8 +52,8 @@ fn test_index() { .unwrap(); assert_eq!(a[[0, 0]], 0f64); - assert_eq!(a[[0, 1]], 1f64); - assert_eq!(a[[1, 2]], 5f64); + assert_eq!(a[&[0, 1]], 1f64); + assert_eq!(a[vec![1, 2]], 5f64); } #[test] @@ -63,3 +68,17 @@ fn test_higher_dim() { assert_eq!(a.stride(), b.stride()); assert_eq!(a.stride().len(), 4); } + +#[test] +fn test_sum() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 1f64); + assert_eq!(a.sum(), 4.0); +} + +#[test] +fn test_product() { + let shape = (2, 2).into_shape(); + let a = Tensor::fill(shape, 2f64); + assert_eq!(a.product(), 16.0); +} \ No newline at end of file