diff --git a/Cargo.toml b/Cargo.toml index 78364f23..8738bd12 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,9 +15,8 @@ version = "0.1.12" # TODO - Update the cargo package version anyhow = "1" approx = "0.5" +itertools = { features = [], version = "0.12" } lazy_static = "1" -# ndarray = { features = ["serde-1"], version = "0.15" } -# ndarray-linalg = { features = [], version = "0.16" } ndarray-rand = { features = [], version = "0.14" } ndarray-stats = { features = [], version = "0.5" } num = { features = ["serde"], version = "0.4" } @@ -25,7 +24,7 @@ num = { features = ["serde"], version = "0.4" } serde = { features = ["derive"], version = "1" } serde_json = "1" smart-default = "0.7" -strum = { features = ["derive"], version = "0.25" } +strum = { features = ["derive"], version = "0.26" } [workspace] default-members = [ diff --git a/concision/Cargo.toml b/concision/Cargo.toml index 52a72a08..40f4bbe2 100644 --- a/concision/Cargo.toml +++ b/concision/Cargo.toml @@ -111,6 +111,8 @@ openblas-static = [ "concision-s4/openblas-static", ] +serde = [] + [lib] bench = true crate-type = ["rlib"] diff --git a/concision/examples/basic.rs b/concision/examples/basic.rs index 76f97644..9acb2fc9 100644 --- a/concision/examples/basic.rs +++ b/concision/examples/basic.rs @@ -1,9 +1,21 @@ extern crate concision; -use concision::prelude::BoxResult; +use concision as cnc; + +use cnc::core::ops::fft::*; +use cnc::prelude::{Arange, AsComplex, BoxResult}; + +use ndarray::prelude::*; fn main() -> BoxResult { println!("Welcome to concision!"); + let samples = 8; + let arr = Array1::::arange(samples).mapv(AsComplex::as_re); + let buff = arr.clone().into_raw_vec(); + let plan = FftPlan::new(samples); + println!("Permutations: {:?}", plan.plan()); + let res = ifft(buff.as_slice(), &plan); + println!("{:?}", &res); Ok(()) } diff --git a/core/Cargo.toml b/core/Cargo.toml index f792da56..f170e3b9 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -39,6 +39,7 @@ smart-default.workspace = true strum.workspace = true [dev-dependencies] +lazy_static.workspace = true [package.metadata.docs.rs] all-features = true diff --git a/core/src/errors/kinds.rs b/core/src/errors/kinds.rs index 56809bb2..442e0a87 100644 --- a/core/src/errors/kinds.rs +++ b/core/src/errors/kinds.rs @@ -4,7 +4,7 @@ */ use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[derive( Clone, @@ -14,7 +14,6 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumVariantNames}; EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -22,6 +21,7 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumVariantNames}; PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] @@ -39,7 +39,7 @@ pub enum Errors { IO, Null, Parse, - Process, + Process(ProcessError), Runtime, Syntax, Unknown, @@ -53,7 +53,6 @@ pub enum Errors { EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -61,6 +60,7 @@ pub enum Errors { PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] diff --git a/core/src/id/ids/atomic.rs b/core/src/id/ids/atomic.rs new file mode 100644 index 00000000..c32aafeb --- /dev/null +++ b/core/src/id/ids/atomic.rs @@ -0,0 +1,52 @@ +/* + Appellation: atomic + Contrib: FL03 +*/ +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct AtomicId(usize); + +impl AtomicId { + pub fn new() -> Self { + use std::sync::atomic; + static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); + Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) + } +} + +impl AsRef for AtomicId { + fn as_ref(&self) -> &usize { + &self.0 + } +} + +impl AsMut for AtomicId { + fn as_mut(&mut self) -> &mut usize { + &mut self.0 + } +} + +impl Default for AtomicId { + fn default() -> Self { + Self::new() + } +} + +impl std::fmt::Display for AtomicId { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +impl From for AtomicId { + fn from(id: usize) -> Self { + Self(id) + } +} + +impl From for usize { + fn from(id: AtomicId) -> Self { + id.0 + } +} diff --git a/core/src/id/ids/mod.rs b/core/src/id/ids/mod.rs new file mode 100644 index 00000000..31e6d967 --- /dev/null +++ b/core/src/id/ids/mod.rs @@ -0,0 +1,8 @@ +/* + Appellation: ids + Contrib: FL03 +*/ +//! # ids +pub use self::atomic::*; + +pub(crate) mod atomic; diff --git a/core/src/id/mod.rs b/core/src/id/mod.rs index dbe3ce71..592c3677 100644 --- a/core/src/id/mod.rs +++ b/core/src/id/mod.rs @@ -3,11 +3,18 @@ Contrib: FL03 */ //! # id -pub use self::{identity::*, utils::*}; +pub use self::{identity::*, ids::*, utils::*}; pub(crate) mod identity; +pub(crate) mod ids; pub(crate) mod utils { + // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 + pub fn atomic_id() -> usize { + use std::sync::atomic; + static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(0); + COUNTER.fetch_add(1, atomic::Ordering::Relaxed) + } pub fn rid(length: usize) -> String { use rand::distributions::Alphanumeric; @@ -22,4 +29,20 @@ pub(crate) mod utils { } #[cfg(test)] -mod tests {} +mod tests { + + use super::*; + + #[test] + fn test_atomic_id() { + let id = atomic_id(); + assert_eq!(id, 0); + assert_ne!(id, atomic_id()); + } + + #[test] + fn test_rid() { + let id = rid(10); + assert_eq!(id.len(), 10); + } +} diff --git a/core/src/lib.rs b/core/src/lib.rs index 2674282f..e8c81620 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -12,6 +12,7 @@ pub(crate) mod utils; pub mod errors; pub mod id; pub mod masks; +pub mod ops; pub mod params; pub mod specs; pub mod states; diff --git a/core/src/masks/mask.rs b/core/src/masks/mask.rs index de49bcdd..37451de7 100644 --- a/core/src/masks/mask.rs +++ b/core/src/masks/mask.rs @@ -2,24 +2,27 @@ Appellation: mask Contrib: FL03 */ -use ndarray::prelude::{Array, Array2}; -use ndarray::Dimension; -use ndarray_rand::rand_distr::{uniform::SampleUniform, Uniform}; +use ndarray::prelude::{Array, Array2, Dimension}; +use ndarray::ScalarOperand; +use ndarray_rand::rand_distr::uniform::{SampleUniform, Uniform}; use ndarray_rand::RandomExt; -use num::Float; +use num::traits::{Float, NumOps}; use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; use std::ops; use strum::EnumIs; #[derive(Clone, Debug, Deserialize, EnumIs, PartialEq, Serialize, SmartDefault)] -pub enum Mask { +pub enum Mask { Masked(Array2), #[default] Unmasked, } -impl Mask { +impl Mask +where + T: NumOps + ScalarOperand, +{ pub fn forward(&self, data: &Array2) -> Array2 { match self { Self::Masked(bias) => data + bias, @@ -28,11 +31,11 @@ impl Mask { } } -impl Mask +impl Mask where T: Float + SampleUniform, { - pub fn masked(size: usize) -> Self { + pub fn uniform(size: usize) -> Self { let ds = (T::from(size).unwrap()).sqrt(); let dist = Uniform::new(-ds, ds); let mask = Array2::::random((size, size), dist); @@ -40,7 +43,7 @@ where } } -impl From for Mask +impl From for Mask where T: Float + SampleUniform, { @@ -52,19 +55,13 @@ where } } -impl From> for Mask -where - T: Float, -{ +impl From> for Mask { fn from(bias: Array2) -> Self { Self::Masked(bias) } } -impl From>> for Mask -where - T: Float, -{ +impl From>> for Mask { fn from(bias: Option>) -> Self { match bias { Some(bias) => Self::Masked(bias), @@ -73,10 +70,7 @@ where } } -impl From> for Option> -where - T: Float, -{ +impl From> for Option> { fn from(bias: Mask) -> Self { match bias { Mask::Masked(bias) => Some(bias), @@ -85,8 +79,10 @@ where } } -impl ops::Add> for Mask +impl ops::Add> for Mask where + D: Dimension, + T: NumOps + ScalarOperand, Array: ops::Add, Output = Array>, { type Output = Array; @@ -100,8 +96,10 @@ where } } -impl ops::Add<&Array> for Mask +impl ops::Add<&Array> for Mask where + D: Dimension, + T: NumOps + ScalarOperand, Array: ops::Add, Output = Array>, { type Output = Array; @@ -115,8 +113,10 @@ where } } -impl ops::Add> for Array +impl ops::Add> for Array where + D: Dimension, + T: NumOps + ScalarOperand, Array: ops::Add, Output = Array>, { type Output = Array; @@ -130,8 +130,10 @@ where } } -impl ops::Add<&Mask> for Array +impl ops::Add<&Mask> for Array where + D: Dimension, + T: NumOps + ScalarOperand, Array: ops::Add, Output = Array>, { type Output = Array; diff --git a/core/src/masks/mod.rs b/core/src/masks/mod.rs index 6748f63b..db9ce4cd 100644 --- a/core/src/masks/mod.rs +++ b/core/src/masks/mod.rs @@ -7,6 +7,11 @@ pub use self::{mask::*, utils::*}; pub(crate) mod mask; +pub trait Masked { + fn mask(&self) -> &Mask; + fn mask_mut(&mut self) -> &mut Mask; +} + pub(crate) mod utils { use super::Mask; use ndarray::prelude::Array2; diff --git a/core/src/ops/fft/algorithms/dft.rs b/core/src/ops/fft/algorithms/dft.rs new file mode 100644 index 00000000..17f986f4 --- /dev/null +++ b/core/src/ops/fft/algorithms/dft.rs @@ -0,0 +1,9 @@ +/* + Appellation: dft + Contrib: FL03 +*/ +//! # Discrete Fourier Transform +//! +//! + +pub struct Dft; diff --git a/core/src/ops/fft/algorithms/mod.rs b/core/src/ops/fft/algorithms/mod.rs new file mode 100644 index 00000000..b639612e --- /dev/null +++ b/core/src/ops/fft/algorithms/mod.rs @@ -0,0 +1,6 @@ +pub use self::dft::*; + +pub(crate) mod dft; + +#[cfg(test)] +mod tests {} diff --git a/core/src/ops/fft/fft.rs b/core/src/ops/fft/fft.rs new file mode 100644 index 00000000..65dfa1c6 --- /dev/null +++ b/core/src/ops/fft/fft.rs @@ -0,0 +1,29 @@ +/* + Appellation: fft + Contrib: FL03 +*/ +use super::{FftDirection, FftPlan}; +// use crate::prelude::AsComplex; +// use num::complex::{Complex, ComplexFloat}; +// use num::traits::{Float, FloatConst, NumAssignOps, NumOps}; +// use num::traits::real::Real; +// use std::ops::Neg; + +pub struct FastFourierTransform { + direction: FftDirection, + plan: FftPlan, +} + +impl FastFourierTransform { + pub fn new(direction: FftDirection, plan: FftPlan) -> Self { + Self { direction, plan } + } + + pub fn direction(&self) -> FftDirection { + self.direction + } + + pub fn plan(&self) -> &FftPlan { + &self.plan + } +} diff --git a/core/src/ops/fft/mod.rs b/core/src/ops/fft/mod.rs new file mode 100644 index 00000000..c90585d6 --- /dev/null +++ b/core/src/ops/fft/mod.rs @@ -0,0 +1,357 @@ +/* + Appellation: fft + Contrib: FL03 +*/ +//! # Fast Fourier Transform +//! +//! +pub use self::{fft::*, modes::*, plan::*, utils::*}; + +pub(crate) mod fft; +pub(crate) mod modes; +pub(crate) mod plan; + +pub mod algorithms; + +pub trait Fft { + fn fft(&self) -> Vec; + fn ifft(&self) -> Vec; +} + +pub(crate) mod utils { + use super::FftPlan; + use crate::prelude::AsComplex; + use num::complex::{Complex, ComplexFloat}; + use num::traits::{Float, FloatConst, Num, NumAssignOps, NumCast, NumOps}; + + // pub(crate) fn rsize(n: usize) -> usize { + // (n / 2).floor() + 1 + // } + + pub(crate) fn fft_angle(n: usize) -> T + where + T: FloatConst + NumCast + NumOps, + { + T::TAU() / T::from(n).unwrap() + } + + pub(crate) fn floor(lhs: T, rhs: T) -> T + where + T: Copy + Num, + { + (lhs - lhs % rhs) / rhs + } + + pub(crate) fn unfloor(lhs: T, rhs: T) -> T + where + T: Copy + Num, + { + (lhs * rhs) - lhs % rhs + } + + /// Computes the Fast Fourier Transform of a one-dimensional, complex-valued signal. + pub fn fft(input: impl AsRef<[S]>, permute: &FftPlan) -> Vec> + where + S: ComplexFloat, + T: Float + FloatConst, + Complex: ComplexFloat + NumOps + NumOps, + { + // + let input = input.as_ref(); + // + let n = input.len(); + // initialize the result vector + let mut result = Vec::with_capacity(n); + // store the input values in the result vector according to the permutation + for position in permute.clone().into_iter() { + let arg = input[position]; + result.push(Complex::new(arg.re(), arg.im())); + } + let mut segment: usize = 1; + while segment < n { + segment <<= 1; + // compute the angle of the complex number + let angle = fft_angle::(segment); + // compute the radius of the complex number (length) + let radius = Complex::new(angle.cos(), angle.sin()); + // iterate over the signal in segments of length `segment` + for start in (0..n).step_by(segment) { + let mut w = Complex::new(T::one(), T::zero()); + for position in start..(start + segment / 2) { + let a = result[position]; + let b = result[position + segment / 2] * w; + result[position] = a + b; + result[position + segment / 2] = a - b; + w = w * radius; + } + } + } + result + } + + /// Computes the Fast Fourier Transform of an one-dimensional, real-valued signal. + /// TODO: Optimize the function to avoid unnecessary computation. + pub fn rfft( + input: impl AsRef<[T]>, + input_permutation: impl AsRef<[usize]>, + ) -> Vec> + where + T: Float + FloatConst, + Complex: ComplexFloat + NumAssignOps, + { + // create a reference to the input + let input = input.as_ref(); + // fetch the length of the input + let n = input.len(); + // compute the size of the result vector + let size = (n - (n % 2)) / 2 + 1; + // initialize the output vector + let mut store = Vec::with_capacity(size); + // store the input values in the result vector according to the permutation + for position in input_permutation.as_ref() { + store.push(input[*position].as_re()); + } + let mut segment: usize = 1; + while segment < n { + segment <<= 1; + // compute the angle of the complex number + let angle = fft_angle::(segment); + // compute the radius of the complex number (length) + let radius = Complex::new(angle.cos(), angle.sin()); + for start in (0..n).step_by(segment) { + let mut w = Complex::new(T::one(), T::zero()); + for position in start..(start + segment / 2) { + let a = store[position]; + let b = store[position + segment / 2] * w; + store[position] = a + b; + store[position + segment / 2] = a - b; + w *= radius; + } + } + } + store + .iter() + .cloned() + .filter(|x| x.im() >= T::zero()) + .collect() + } + /// Computes the Inverse Fast Fourier Transform of an one-dimensional, complex-valued signal. + pub fn ifft(input: &[S], input_permutation: &FftPlan) -> Vec> + where + S: ComplexFloat, + T: Float + FloatConst, + Complex: ComplexFloat + NumOps + NumOps, + { + let n = input.len(); + let mut result = Vec::with_capacity(n); + for position in input_permutation.clone().into_iter() { + let arg = input[position]; + result.push(Complex::new(arg.re(), arg.im())); + } + let mut length: usize = 1; + while length < n { + length <<= 1; + let angle = fft_angle::(length).neg(); + let radius = Complex::new(T::cos(angle), T::sin(angle)); // w_len + for start in (0..n).step_by(length) { + let mut w = Complex::new(T::one(), T::zero()); + for position in start..(start + length / 2) { + let a = result[position]; + let b = result[position + length / 2] * w; + result[position] = a + b; + result[position + length / 2] = a - b; + w = w * radius; + } + } + } + let scale = T::from(n).unwrap().recip(); + result.iter().map(|x| *x * scale).collect() + } + /// Computes the Inverse Fast Fourier Transform of an one-dimensional, real-valued signal. + /// TODO: Fix the function; currently fails to compute the correct result + pub fn irfft(input: &[Complex], plan: &FftPlan) -> Vec + where + T: Float + FloatConst, + Complex: ComplexFloat + NumAssignOps, + { + let n = input.len(); + let mut result = vec![Complex::new(T::zero(), T::zero()); n]; + + for position in plan.clone().into_iter() { + result.push(input[position]); + } + // for res in result.clone() { + // if res.im() > T::zero() { + // result.push(res.conj()); + // } + // } + // segment length + let mut segment: usize = 1; + while segment < n { + segment <<= 1; + // compute the angle of the complex number + let angle = fft_angle::(segment).neg(); + // compute the radius of the complex number (length) + let radius = Complex::new(T::cos(angle), T::sin(angle)); + for start in (0..n).step_by(segment) { + let mut w = Complex::new(T::one(), T::zero()); + for position in start..(start + segment / 2) { + let a = result[position]; + let b = result[position + segment / 2] * w; + result[position] = a + b; + result[position + segment / 2] = a - b; + w *= radius; + } + } + } + let scale = T::from(n).unwrap().recip(); + result.iter().map(|x| x.re() * scale).collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::prelude::almost_equal; + use lazy_static::lazy_static; + use num::complex::{Complex, ComplexFloat}; + + pub(crate) fn fft_permutation(length: usize) -> Vec { + let mut result = Vec::new(); + result.reserve_exact(length); + for i in 0..length { + result.push(i); + } + let mut reverse = 0_usize; + let mut position = 1_usize; + while position < length { + let mut bit = length >> 1; + while bit & reverse != 0 { + reverse ^= bit; + bit >>= 1; + } + reverse ^= bit; + // This is equivalent to adding 1 to a reversed number + if position < reverse { + // Only swap each element once + result.swap(position, reverse); + } + position += 1; + } + result + } + + const EPSILON: f64 = 1e-6; + + lazy_static! { + static ref EXPECTED_RFFT: Vec> = vec![ + Complex { re: 28.0, im: 0.0 }, + Complex { re: -4.0, im: 0.0 }, + Complex { + re: -4.0, + im: 1.6568542494923806 + }, + Complex { + re: -4.0, + im: 4.000000000000001 + }, + Complex { + re: -3.999999999999999, + im: 9.656854249492381 + } + ]; + } + + #[test] + fn test_plan() { + let samples = 16; + + let plan = FftPlan::new(samples); + assert_eq!(plan.plan(), fft_permutation(16).as_slice()); + } + + #[test] + fn test_rfft() { + let polynomial = (0..8).map(|i| i as f64).collect::>(); + let plan = FftPlan::new(polynomial.len()); + println!("Function Values: {:?}", &polynomial); + println!("Plan: {:?}", &plan); + let fft = rfft(&polynomial, &plan); + let mut tmp = fft + .iter() + .cloned() + .filter(|i| i.im() > 0.0) + .map(|i| i.conj()) + .collect::>(); + tmp.sort_by(|a, b| a.im().partial_cmp(&b.im()).unwrap()); + println!("FFT: {:?}", &tmp); + let mut res = fft.clone(); + res.sort_by(|a, b| a.re().partial_cmp(&b.re()).unwrap()); + res.sort_by(|a, b| a.im().partial_cmp(&b.im()).unwrap()); + println!("R: {:?}", &res); + res.extend(tmp); + assert!(fft.len() == EXPECTED_RFFT.len()); + for (x, y) in fft.iter().zip(EXPECTED_RFFT.iter()) { + assert!(almost_equal(x.re(), y.re(), EPSILON)); + assert!(almost_equal(x.im(), y.im(), EPSILON)); + } + // let plan = FftPlan::new(fft.len()); + let ifft = irfft(&res, &plan); + println!("Inverse: {:?}", &ifft); + for (x, y) in ifft.iter().zip(polynomial.iter()) { + assert!(almost_equal(*x, *y, EPSILON)); + } + } + + #[test] + fn small_polynomial_returns_self() { + let polynomial = vec![1.0f64, 1.0, 0.0, 2.5]; + let permutation = FftPlan::new(polynomial.len()); + let fft = fft(&polynomial, &permutation); + let ifft = ifft(&fft, &permutation) + .into_iter() + .map(|i| i.re()) + .collect::>(); + for (x, y) in ifft.iter().zip(polynomial.iter()) { + assert!(almost_equal(*x, *y, EPSILON)); + } + } + + #[test] + fn square_small_polynomial() { + let mut polynomial = vec![1.0f64, 1.0, 0.0, 2.0]; + polynomial.append(&mut vec![0.0; 4]); + let permutation = FftPlan::new(polynomial.len()); + let mut fft = fft(&polynomial, &permutation); + fft.iter_mut().for_each(|num| *num *= *num); + let ifft = ifft(&fft, &permutation) + .into_iter() + .map(|i| i.re()) + .collect::>(); + let expected = [1.0, 2.0, 1.0, 4.0, 4.0, 0.0, 4.0, 0.0, 0.0]; + for (x, y) in ifft.iter().zip(expected.iter()) { + assert!(almost_equal(*x, *y, EPSILON)); + } + } + + #[test] + #[ignore] + fn square_big_polynomial() { + // This test case takes ~1050ms on my machine in unoptimized mode, + // but it takes ~70ms in release mode. + let n = 1 << 17; // ~100_000 + let mut polynomial = vec![1.0f64; n]; + polynomial.append(&mut vec![0.0f64; n]); + let permutation = FftPlan::new(polynomial.len()); + let mut fft = fft(&polynomial, &permutation); + fft.iter_mut().for_each(|num| *num *= *num); + let ifft = irfft(&fft, &permutation) + .into_iter() + .map(|i| i.re()) + .collect::>(); + let expected = (0..((n << 1) - 1)).map(|i| std::cmp::min(i + 1, (n << 1) - 1 - i) as f64); + for (&x, y) in ifft.iter().zip(expected) { + assert!(almost_equal(x, y, EPSILON)); + } + } +} diff --git a/core/src/ops/fft/modes.rs b/core/src/ops/fft/modes.rs new file mode 100644 index 00000000..c5cb75af --- /dev/null +++ b/core/src/ops/fft/modes.rs @@ -0,0 +1,88 @@ +/* + Appellation: modes + Contrib: FL03 +*/ +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantArray, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantArray, + VariantNames, +)] +#[repr(usize)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum FftDirection { + #[default] + Forward = 0, + Inverse = 1, +} + +impl FftDirection { + pub fn forward() -> Self { + Self::Forward + } + + pub fn inverse() -> Self { + Self::Inverse + } +} + +impl From for FftDirection { + fn from(direction: usize) -> Self { + match direction % Self::COUNT { + 0 => Self::Forward, + _ => Self::Inverse, + } + } +} +impl From for usize { + fn from(direction: FftDirection) -> Self { + direction as usize + } +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantArray, + VariantNames, +)] +#[repr(usize)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum FftMode { + #[default] + Complex, + Real, +} diff --git a/core/src/ops/fft/plan.rs b/core/src/ops/fft/plan.rs new file mode 100644 index 00000000..58a9d293 --- /dev/null +++ b/core/src/ops/fft/plan.rs @@ -0,0 +1,84 @@ +/* + Appellation: plan + Contrib: FL03 +*/ +use super::FftDirection; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct FftPlan { + plan: Vec, +} + +impl FftPlan { + pub fn new(n: usize) -> Self { + let mut plan = Vec::with_capacity(n); + plan.extend(0..n); + + let mut rev = 0; // reverse + let mut pos = 1; // position + while pos < n { + let mut bit = n >> 1; + while bit & rev != 0 { + rev ^= bit; + bit >>= 1; + } + rev ^= bit; + // This is equivalent to adding 1 to a reversed number + if pos < rev { + // Only swap each element once + plan.swap(pos, rev); + } + pos += 1; + } + Self { plan } + } + + pub fn plan(&self) -> &[usize] { + &self.plan + } +} + +impl AsRef<[usize]> for FftPlan { + fn as_ref(&self) -> &[usize] { + &self.plan + } +} + +impl AsMut<[usize]> for FftPlan { + fn as_mut(&mut self) -> &mut [usize] { + &mut self.plan + } +} + +impl Extend for FftPlan { + fn extend>(&mut self, iter: T) { + self.plan.extend(iter); + } +} + +impl FromIterator for FftPlan { + fn from_iter>(iter: T) -> Self { + Self { + plan: Vec::from_iter(iter), + } + } +} + +impl IntoIterator for FftPlan { + type Item = usize; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.plan.into_iter() + } +} + +impl<'a> IntoIterator for &'a mut FftPlan { + type Item = &'a mut usize; + type IntoIter = std::slice::IterMut<'a, usize>; + + fn into_iter(self) -> Self::IntoIter { + self.plan.iter_mut() + } +} diff --git a/core/src/ops/kinds.rs b/core/src/ops/kinds.rs new file mode 100644 index 00000000..273a4f01 --- /dev/null +++ b/core/src/ops/kinds.rs @@ -0,0 +1,36 @@ +/* + Appellation: kinds + Contrib: FL03 +*/ + +pub enum Op {} + +pub enum CompareOp { + Eq, + Ge, + Gt, + Le, + Lt, + Ne, +} + +pub enum BinaryOp { + Add, + Div, + Maximum, + Minimum, + Mul, + Sub, +} + +pub trait BinaryOperation { + type Output; + + fn eval(&self, lhs: T, rhs: T) -> Self::Output; +} + +pub trait UnaryOperation { + type Output; + + fn eval(&self, arg: T) -> Self::Output; +} diff --git a/core/src/ops/mod.rs b/core/src/ops/mod.rs new file mode 100644 index 00000000..545a0a33 --- /dev/null +++ b/core/src/ops/mod.rs @@ -0,0 +1,30 @@ +/* + Appellation: ops + Contrib: FL03 +*/ +//! # Operations +pub use self::kinds::*; + +pub(crate) mod kinds; + +pub mod fft; + +pub trait Operation { + type Output; + + fn eval(&self, args: &T) -> Self::Output; +} + +impl Operation for F +where + F: Fn(&T) -> S, +{ + type Output = S; + + fn eval(&self, args: &T) -> Self::Output { + self(args) + } +} + +#[cfg(test)] +mod tests {} diff --git a/core/src/params/iter.rs b/core/src/params/iter.rs index 359553ef..7af19abb 100644 --- a/core/src/params/iter.rs +++ b/core/src/params/iter.rs @@ -3,6 +3,13 @@ Contrib: FL03 */ +pub struct Entry { + key: K, + value: V, +} + +pub struct IntoIter; + pub struct Iter; pub struct IterMut; diff --git a/core/src/params/kinds.rs b/core/src/params/kinds.rs index 660fa41b..fbfd0262 100644 --- a/core/src/params/kinds.rs +++ b/core/src/params/kinds.rs @@ -3,27 +3,37 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; pub trait ParamType: ToString { fn kind(&self) -> String; } +impl ParamType for T +where + T: ToString, +{ + fn kind(&self) -> String { + self.to_string() + } +} + #[derive( Clone, Debug, Default, Deserialize, + EnumCount, EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[non_exhaustive] #[repr(usize)] diff --git a/core/src/params/mod.rs b/core/src/params/mod.rs index 2d4f3a9d..d77a1a0e 100644 --- a/core/src/params/mod.rs +++ b/core/src/params/mod.rs @@ -6,19 +6,23 @@ //! //! ## Overview //! -pub use self::{group::*, iter::*, kinds::*, param::*, store::*}; +pub use self::{iter::*, kinds::*, param::*, store::*, variable::*}; -pub(crate) mod group; pub(crate) mod iter; pub(crate) mod kinds; pub(crate) mod param; pub(crate) mod store; +pub(crate) mod variable; + +pub mod group; use ndarray::prelude::{Array, Dimension, Ix2}; use num::Float; use std::collections::HashMap; pub trait Param { + type Dim: Dimension; + fn kind(&self) -> &ParamKind; fn name(&self) -> &str; diff --git a/core/src/params/param.rs b/core/src/params/param.rs index 36e91b74..f10c79c1 100644 --- a/core/src/params/param.rs +++ b/core/src/params/param.rs @@ -113,6 +113,8 @@ where T: Float, D: Dimension, { + type Dim = D; + fn kind(&self) -> &ParamKind { &self.kind } diff --git a/core/src/params/store.rs b/core/src/params/store.rs index b337143b..83c610e6 100644 --- a/core/src/params/store.rs +++ b/core/src/params/store.rs @@ -91,8 +91,8 @@ mod tests { fn test_model_store() { let (inputs, outputs) = (5, 3); - let shapes = [(inputs, outputs), (outputs, outputs), (outputs, 1)]; + let _shapes = [(inputs, outputs), (outputs, outputs), (outputs, 1)]; - let params = ParamStore::::new(); + let _params = ParamStore::::new(); } } diff --git a/core/src/params/variable.rs b/core/src/params/variable.rs new file mode 100644 index 00000000..a2394f90 --- /dev/null +++ b/core/src/params/variable.rs @@ -0,0 +1,16 @@ +/* + Appellation: variable + Contrib: FL03 +*/ +//! # Variables +//! +//! ## Overview +//! Variables extend the functionality of the 'Parameter' by enabling mutability. +//! + +pub struct Variable; + +pub enum P { + Param, + Variable(Box), +} diff --git a/core/src/primitives.rs b/core/src/primitives.rs index 920c63cc..2c0e698c 100644 --- a/core/src/primitives.rs +++ b/core/src/primitives.rs @@ -2,7 +2,7 @@ Appellation: primitives Contrib: FL03 */ -pub use self::{constants::*, statics::*, types::*}; +pub use self::{constants::*, types::*}; pub use ndarray::ShapeError; pub use ndarray_rand::rand_distr::uniform::SampleUniform; @@ -18,7 +18,7 @@ mod statics {} /// Collection of types used throughout the system mod types { /// - pub type BoxError = Box; + pub type BoxError = Box; /// pub type BoxResult = std::result::Result; diff --git a/core/src/specs/arrays.rs b/core/src/specs/arrays.rs index 3531bdec..3e2847e0 100644 --- a/core/src/specs/arrays.rs +++ b/core/src/specs/arrays.rs @@ -2,15 +2,39 @@ Appellation: base Contrib: FL03 */ -use ndarray::prelude::{Array, Axis, Dimension, Ix1, Ix2, NdFloat}; +use ndarray::prelude::{Array, Axis, Dimension, Ix1, Ix2}; use ndarray::{IntoDimension, ScalarOperand, ShapeError}; -// use ndarray::linalg::Dot; -use distr::uniform::SampleUniform; -use distr::{Bernoulli, BernoulliError, Distribution, StandardNormal, Uniform}; -use ndarray_rand::rand_distr as distr; +use ndarray_rand::rand::rngs::StdRng; +use ndarray_rand::rand::{Rng, SeedableRng}; +use ndarray_rand::rand_distr::uniform::{SampleUniform, Uniform}; +use ndarray_rand::rand_distr::{Bernoulli, BernoulliError, Distribution, StandardNormal}; use ndarray_rand::RandomExt; +use num::traits::real::Real; +use num::traits::{Float, Num, NumAssignOps, ToPrimitive}; +use std::ops; -use num::{Float, Num}; +pub trait Pad { + fn pad(&self, pad: usize) -> Self; + + fn pad_with(&self, pad: usize, value: T) -> Self; +} + +// impl Pad for Array +// where +// T: Clone + Num, +// D: Dimension, +// { +// fn pad(&self, pad: usize) -> Self { +// self.pad_with(pad, T::zero()) +// } + +// fn pad_with(&self, pad: usize, value: T) -> Self { +// let mut pad = vec![value; pad]; +// pad.extend_from_slice(self); +// pad.extend_from_slice(&vec![value; pad.len()]); +// Array::from_vec(pad) +// } +// } pub trait Affine: Sized { type Error; @@ -18,75 +42,192 @@ pub trait Affine: Sized { fn affine(&self, mul: T, add: T) -> Result; } -impl Affine for Array +impl Affine for Array where T: Num + ScalarOperand, D: Dimension, + Array: ops::Mul> + ops::Add>, { type Error = ShapeError; - fn affine(&self, mul: T, add: T) -> Result { - Ok(self * mul + add) + fn affine(&self, mul: S, add: S) -> Result { + Ok(self.clone() * mul + add) } } -pub trait Arange { - fn arange(start: T, stop: T, step: T) -> Self; +pub enum ArangeArgs { + Arange { start: T, stop: T, step: T }, + Between { start: T, stop: T }, + Until { stop: T }, } -impl Arange for Vec +impl ArangeArgs where - T: Float, + T: Copy + Num, { - fn arange(start: T, stop: T, step: T) -> Self { - let n = ((stop - start) / step).ceil().to_usize().unwrap(); - (0..n).map(|i| start + step * T::from(i).unwrap()).collect() + /// Returns the start value of the range. + pub fn start(&self) -> T { + match self { + ArangeArgs::Arange { start, .. } => *start, + ArangeArgs::Between { start, .. } => *start, + ArangeArgs::Until { .. } => T::zero(), + } + } + /// Returns the stop value of the range. + pub fn stop(&self) -> T { + match self { + ArangeArgs::Arange { stop, .. } => *stop, + ArangeArgs::Between { stop, .. } => *stop, + ArangeArgs::Until { stop } => *stop, + } + } + /// Returns the step value of the range. + pub fn step(&self) -> T { + match self { + ArangeArgs::Arange { step, .. } => *step, + ArangeArgs::Between { .. } => T::one(), + ArangeArgs::Until { .. } => T::one(), + } + } + /// Returns the number of steps between the given boundaries + pub fn steps(&self) -> usize + where + T: Real, + { + match self { + ArangeArgs::Arange { start, stop, step } => { + let n = ((*stop - *start) / *step).ceil().to_usize().unwrap(); + n + } + ArangeArgs::Between { start, stop } => { + let n = (*stop - *start).to_usize().unwrap(); + n + } + ArangeArgs::Until { stop } => { + let n = stop.to_usize().unwrap(); + n + } + } + } +} + +impl From> for ArangeArgs { + fn from(args: ops::Range) -> Self { + ArangeArgs::Between { + start: args.start, + stop: args.end, + } + } +} + +impl From> for ArangeArgs { + fn from(args: ops::RangeFrom) -> Self { + ArangeArgs::Until { stop: args.start } + } +} + +impl From<(T, T, T)> for ArangeArgs { + fn from(args: (T, T, T)) -> Self { + ArangeArgs::Arange { + start: args.0, + stop: args.1, + step: args.2, + } + } +} + +impl From<(T, T)> for ArangeArgs { + fn from(args: (T, T)) -> Self { + ArangeArgs::Between { + start: args.0, + stop: args.1, + } } } -impl Arange for Array +impl From for ArangeArgs where - T: Float, + T: Num, { - fn arange(start: T, stop: T, step: T) -> Self { - let n = ((stop - start) / step).ceil().to_usize().unwrap(); - Array::from_shape_fn(n, |i| start + step * T::from(i).unwrap()) + fn from(stop: T) -> Self { + ArangeArgs::Until { stop } } } -impl Arange for Array +pub trait Arange { + fn arange(args: impl Into>) -> Self; +} + +impl Arange for Vec where T: Float, { - fn arange(start: T, stop: T, step: T) -> Self { - let n = ((stop - start) / step).ceil().to_usize().unwrap(); - Array::from_shape_fn((n, 1), |(i, ..)| start + step * T::from(i).unwrap()) + fn arange(args: impl Into>) -> Self { + let args = args.into(); + let n: usize = args + .stop() + .to_usize() + .expect("Failed to convert 'stop' to a usize"); + (0..n) + .map(|i| args.start() + args.step() * T::from(i).unwrap()) + .collect() } } -pub trait RandNum: SampleUniform +impl Arange for Array where - StandardNormal: Distribution, + S: Copy + Num + ToPrimitive, + T: Float, { + fn arange(args: impl Into>) -> Self { + let args = args.into(); + let n: usize = args + .stop() + .to_usize() + .expect("Failed to convert 'stop' to a usize"); + let start = T::from(args.start()).unwrap(); + let step = T::from(args.step()).unwrap(); + + Array::from_iter((0..n).map(|i| start + step * T::from(i).unwrap())) + } } -impl RandNum for T +impl Arange for Array where - T: SampleUniform, - StandardNormal: Distribution, + S: Copy + Num + ToPrimitive, + T: Float, { + fn arange(args: impl Into>) -> Self { + let args = args.into(); + let start = T::from(args.start()).unwrap(); + let step = T::from(args.step()).unwrap(); + let n: usize = args + .stop() + .to_usize() + .expect("Failed to convert 'stop' to a usize"); + let f = |(i, _j)| start + step * T::from(i).unwrap(); + Array::from_shape_fn((n, 1), f) + } } pub trait GenerateRandom: Sized where D: Dimension, - T: Float + SampleUniform, - StandardNormal: Distribution, + T: Float, { fn rand(dim: impl IntoDimension, distr: IdS) -> Self where IdS: Distribution; + fn rand_using( + dim: impl IntoDimension, + distr: IdS, + rng: &mut R, + ) -> Self + where + IdS: Distribution, + R: Rng; + fn bernoulli(dim: impl IntoDimension, p: Option) -> Result where Bernoulli: Distribution, @@ -95,17 +236,38 @@ where Ok(Self::rand(dim.into_dimension(), dist)) } - fn stdnorm(dim: impl IntoDimension) -> Self { + fn stdnorm(dim: impl IntoDimension) -> Self + where + StandardNormal: Distribution, + { Self::rand(dim, StandardNormal) } - fn uniform(axis: usize, dim: impl IntoDimension) -> Self { + fn normal_from_key(key: u64, dim: impl IntoDimension) -> Self + where + StandardNormal: Distribution, + R: Rng, + { + Self::rand_using( + dim.into_dimension(), + StandardNormal, + &mut StdRng::seed_from_u64(key), + ) + } + + fn uniform(axis: usize, dim: impl IntoDimension) -> Self + where + T: SampleUniform, + { let dim = dim.into_dimension(); let dk = T::from(dim[axis]).unwrap().recip().sqrt(); Self::uniform_between(dk, dim) } - fn uniform_between(dk: T, dim: impl IntoDimension) -> Self { + fn uniform_between(dk: T, dim: impl IntoDimension) -> Self + where + T: SampleUniform, + { Self::rand(dim, Uniform::new(-dk, dk)) } } @@ -138,6 +300,14 @@ where { Self::random(dim.into_dimension(), distr) } + + fn rand_using(dim: impl IntoDimension, distr: IdS, rng: &mut R) -> Self + where + IdS: Distribution, + R: Rng, + { + Self::random_using(dim.into_dimension(), distr, rng) + } } pub trait IntoAxis { @@ -153,20 +323,51 @@ where } } -pub trait Inverse: Sized -where - T: Float, -{ +pub trait Inverse: Sized { fn inverse(&self) -> Option; } impl Inverse for Array where - T: NdFloat, + T: Copy + Num + NumAssignOps + ScalarOperand, { fn inverse(&self) -> Option { - crate::compute_inverse(self) + super::utils::inverse(self) } } // pub trait Stack + +pub trait Genspace { + fn arange(start: T, stop: T, step: T) -> Self; + + fn linspace(start: T, stop: T, n: usize) -> Self; + + fn logspace(start: T, stop: T, n: usize) -> Self; + + fn geomspace(start: T, stop: T, n: usize) -> Self; + + fn ones(n: usize) -> Self; + + fn zeros(n: usize) -> Self; +} + +pub trait ArrayLike { + fn ones_like(&self) -> Self; + + fn zeros_like(&self) -> Self; +} + +impl ArrayLike for Array +where + T: Clone + Num, + D: Dimension, +{ + fn ones_like(&self) -> Self { + Array::ones(self.dim()) + } + + fn zeros_like(&self) -> Self { + Array::zeros(self.dim()) + } +} diff --git a/core/src/specs/init.rs b/core/src/specs/init.rs index 6fc1ee0b..1dece2e6 100644 --- a/core/src/specs/init.rs +++ b/core/src/specs/init.rs @@ -7,6 +7,10 @@ pub trait Init { fn init(&mut self) -> Self; } +pub trait InitRandom { + fn genrand(&mut self) -> T; +} + pub trait Rand {} pub trait RandComplex {} diff --git a/core/src/specs/iter.rs b/core/src/specs/iter.rs new file mode 100644 index 00000000..98343fdb --- /dev/null +++ b/core/src/specs/iter.rs @@ -0,0 +1,8 @@ +/* + Appellation: base + Contrib: FL03 +*/ + +pub trait Walk { + fn walk(&self, other: &T) -> bool; +} diff --git a/core/src/specs/math/mod.rs b/core/src/specs/math/mod.rs new file mode 100644 index 00000000..878ce3c3 --- /dev/null +++ b/core/src/specs/math/mod.rs @@ -0,0 +1,78 @@ +/* + Appellation: math + Contrib: FL03 +*/ +pub use self::{numerical::*, ops::*, scalar::*}; + +pub(crate) mod numerical; +pub(crate) mod ops; +pub(crate) mod scalar; + +use num::complex::Complex; +use num::traits::{Float, Num}; + +pub trait AsComplex: Sized { + fn as_complex(self, real: bool) -> Complex; + + fn as_re(self) -> Complex { + self.as_complex(true) + } + + fn as_im(self) -> Complex { + self.as_complex(false) + } +} + +impl AsComplex for T +where + T: Num, +{ + fn as_complex(self, real: bool) -> Complex { + match real { + true => Complex::new(self, Self::zero()), + false => Complex::new(Self::zero(), self), + } + } +} + +pub trait FloorDiv { + type Output; + + fn floor_div(self, rhs: Rhs) -> Self::Output; +} + +impl FloorDiv for T +where + T: Copy + Num, +{ + type Output = T; + + fn floor_div(self, rhs: Self) -> Self::Output { + crate::floor_div(self, rhs) + } +} + +pub trait RoundTo { + fn round_to(&self, places: usize) -> Self; +} + +impl RoundTo for T +where + T: Float, +{ + fn round_to(&self, places: usize) -> Self { + crate::round_to(*self, places) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_as_complex() { + let x = 1.0; + let y = x.as_re(); + assert_eq!(y, Complex::new(1.0, 0.0)); + } +} diff --git a/core/src/specs/math/numerical.rs b/core/src/specs/math/numerical.rs new file mode 100644 index 00000000..b41e57fe --- /dev/null +++ b/core/src/specs/math/numerical.rs @@ -0,0 +1,129 @@ +/* + Appellation: num + Contrib: FL03 +*/ +use num::complex::Complex; +use num::traits::{Num, NumAssignOps, NumOps, One, Signed}; +// use num::traits::real::Real; +use std::ops::Div; + +pub trait Algebraic: NumOps + Sized {} + +pub trait AlgebraicExt +where + Self: Algebraic + NumAssignOps + Sized, +{ +} + +impl Algebraic for A where A: NumOps + Sized {} + +pub trait ComplexNum: Sized { + type Real: Algebraic + Algebraic; + + fn complex(self) -> Self; + + fn im(self) -> Self::Real; + + fn re(self) -> Self::Real; +} + +pub trait Imaginary: Sized +where + T: Algebraic + Algebraic, +{ + type Complex: Algebraic + Algebraic; + + fn im(self) -> T; + + fn re(self) -> T; +} + +impl Imaginary for Complex +where + T: Algebraic + Algebraic, Complex> + Clone + Num, +{ + type Complex = Complex; + + fn im(self) -> T { + self.im + } + + fn re(self) -> T { + self.re + } +} + +pub trait Number {} + +impl Number for i8 {} + +impl Number for i16 {} + +impl Number for i32 {} + +impl Number for i64 {} + +impl Number for i128 {} + +impl Number for isize {} + +impl Number for u8 {} + +impl Number for u16 {} + +impl Number for u32 {} + +impl Number for u64 {} + +impl Number for u128 {} + +impl Number for usize {} + +impl Number for f32 {} + +impl Number for f64 {} + +impl Number for S where S: ComplexNum {} + +pub trait Abs { + fn abs(&self) -> Self; +} + +impl Abs for T +where + T: Signed, +{ + fn abs(&self) -> Self { + Signed::abs(self) + } +} + +pub trait Reciprocal { + fn recip(self) -> Self; +} + +impl Reciprocal for T +where + T: Div + One, +{ + fn recip(self) -> Self { + Self::one() / self + } +} + +pub trait Numerical: Sized { + type Elem: Algebraic + Number; + + fn abs(&self) -> Self + where + Self::Elem: Abs, + { + self.eval(|x| x.abs()) + } + + fn conj(self) -> Self; + + fn eval(&self, f: F) -> Self + where + F: Fn(Self::Elem) -> Self::Elem; +} diff --git a/core/src/specs/math.rs b/core/src/specs/math/ops.rs similarity index 51% rename from core/src/specs/math.rs rename to core/src/specs/math/ops.rs index 2ab59c89..14d3d789 100644 --- a/core/src/specs/math.rs +++ b/core/src/specs/math/ops.rs @@ -1,16 +1,13 @@ /* - Appellation: math + Appellation: ops Contrib: FL03 */ -use ndarray::prelude::{Array, Dimension, Ix2, NdFloat}; -use ndarray_rand::rand_distr::uniform::SampleUniform; -use num::{Complex, Float, FromPrimitive, Num, One, Signed, Zero}; +use ndarray::linalg::Dot; +use ndarray::prelude::{Array, Dimension, Ix2}; +use num::complex::Complex; +use num::{Float, Num, Signed}; use std::ops; -pub trait Binary: One + Zero {} - -impl Binary for T where T: One + Zero {} - pub trait Conjugate { fn conj(&self) -> Self; } @@ -29,13 +26,22 @@ impl Conjugate for f64 { impl Conjugate for Complex where - T: Copy + Num + Signed, + T: Clone + Signed, { fn conj(&self) -> Self { - Complex::::new(self.re, -self.im) + Complex::::conj(self) } } +// impl Conjugate for T +// where +// T: ComplexFloat, +// { +// fn conj(&self) -> Self { +// ComplexFloat::conj(self) +// } +// } + impl Conjugate for Array where D: Dimension, @@ -46,15 +52,12 @@ where } } -pub trait FloatExt: FromPrimitive + NdFloat + Signed + SampleUniform {} - -impl FloatExt for T where T: FromPrimitive + NdFloat + Signed + SampleUniform {} - -pub trait Arithmetic: - ops::Add - + ops::Div - + ops::Mul - + ops::Sub +pub trait Arithmetic +where + Self: ops::Add + + ops::Div + + ops::Mul + + ops::Sub, { } @@ -79,7 +82,7 @@ where A: Dimension, B: Dimension, D: Dimension, - T: Float, + T: Arithmetic, Self: Arithmetic, Array>, { } @@ -89,7 +92,7 @@ where A: Dimension, B: Dimension, D: Dimension, - T: Float, + T: Arithmetic, Self: Arithmetic, Array>, { } @@ -118,3 +121,46 @@ where Complex::::sqrt(self) } } + +impl SquareRoot for Array +where + D: Dimension, + T: Float, +{ + fn sqrt(self) -> Self { + self.mapv(|x| x.sqrt()) + } +} + +pub trait Power { + type Output; + + fn pow(&self, rhs: Rhs) -> Self::Output; +} + +// impl Power for S where S: Pow { +// type Output = >::Output; + +// fn pow(self, rhs: T) -> Self::Output { +// >::pow(self, rhs) +// } +// } + +impl Power for Array +where + T: Clone + Num, + Array: Dot, +{ + type Output = Self; + + fn pow(&self, rhs: usize) -> Self::Output { + if !self.is_square() { + panic!("Matrix must be square to be raised to a power"); + } + let mut res = Array::eye(self.shape()[0]); + for _ in 0..rhs { + res = res.dot(&self); + } + res + } +} diff --git a/core/src/specs/math/scalar.rs b/core/src/specs/math/scalar.rs new file mode 100644 index 00000000..bbc540cf --- /dev/null +++ b/core/src/specs/math/scalar.rs @@ -0,0 +1,11 @@ +/* + Appellation: scalar + Contrib: FL03 +*/ +// use super::Algebraic; +use num::traits::NumOps; + +pub trait Scalar { + type Complex: NumOps + NumOps; + type Real: NumOps + NumOps; +} diff --git a/core/src/specs/mod.rs b/core/src/specs/mod.rs index 3407cec3..b1dbee82 100644 --- a/core/src/specs/mod.rs +++ b/core/src/specs/mod.rs @@ -2,49 +2,65 @@ Appellation: specs Contrib: FL03 */ -pub use self::{arrays::*, base::*, init::*, math::*}; +pub use self::{arrays::*, base::*, init::*, iter::*, math::*}; pub(crate) mod arrays; pub(crate) mod base; pub(crate) mod init; +pub(crate) mod iter; pub(crate) mod math; -use num::traits::float::FloatCore; -use num::{Complex, Num, Zero}; +pub trait Named { + fn name(&self) -> &str; +} -pub trait CncFloat: FloatCore {} +pub(crate) mod utils { + use ndarray::prelude::{s, Array2}; + use ndarray::ScalarOperand; + use num::traits::{Num, NumAssignOps}; -impl CncFloat for T where T: FloatCore {} + pub fn inverse(matrix: &Array2) -> Option> + where + T: Copy + Num + NumAssignOps + ScalarOperand, + { + let (rows, cols) = matrix.dim(); -pub trait AsComplex: Num { - fn as_complex(&self) -> Complex; + if !matrix.is_square() { + return None; // Matrix must be square for inversion + } - fn as_imag(&self) -> Complex; -} + let identity = Array2::eye(rows); -impl AsComplex for T -where - T: Copy + Num + Zero, -{ - fn as_complex(&self) -> Complex { - Complex::new(*self, T::zero()) - } + // Construct an augmented matrix by concatenating the original matrix with an identity matrix + let mut aug = Array2::zeros((rows, 2 * cols)); + aug.slice_mut(s![.., ..cols]).assign(matrix); + aug.slice_mut(s![.., cols..]).assign(&identity); - fn as_imag(&self) -> Complex { - Complex::new(T::zero(), *self) - } -} + // Perform Gaussian elimination to reduce the left half to the identity matrix + for i in 0..rows { + let pivot = aug[[i, i]]; -pub trait RoundTo { - fn round_to(&self, places: usize) -> Self; -} + if pivot == T::zero() { + return None; // Matrix is singular + } -impl RoundTo for T -where - T: num::Float, -{ - fn round_to(&self, places: usize) -> Self { - crate::round_to(*self, places) + aug.slice_mut(s![i, ..]).mapv_inplace(|x| x / pivot); + + for j in 0..rows { + if i != j { + let am = aug.clone(); + let factor = aug[[j, i]]; + let rhs = am.slice(s![i, ..]); + aug.slice_mut(s![j, ..]) + .zip_mut_with(&rhs, |x, &y| *x -= y * factor); + } + } + } + + // Extract the inverted matrix from the augmented matrix + let inverted = aug.slice(s![.., cols..]); + + Some(inverted.to_owned()) } } @@ -55,10 +71,9 @@ mod tests { use ndarray::prelude::*; #[test] - fn test_as_complex() { - let x = 1.0; - let y = x.as_complex(); - assert_eq!(y, Complex::new(1.0, 0.0)); + fn test_arange() { + let exp = array![0.0, 1.0, 2.0, 3.0, 4.0]; + assert_eq!(&exp, &Array1::::arange(5)) } #[test] @@ -68,4 +83,12 @@ mod tests { let y = x.affine(4.0, -2.0).unwrap(); assert_eq!(y, array![[-2.0, 2.0], [6.0, 10.0]]); } + + #[test] + fn test_matrix_power() { + let x = array![[1.0, 2.0], [3.0, 4.0]]; + assert_eq!(x.pow(0), Array2::::eye(2)); + assert_eq!(x.pow(1), x); + assert_eq!(x.pow(2), x.dot(&x)); + } } diff --git a/core/src/states/weighted.rs b/core/src/states/weighted.rs index aee6a61c..a4d2a0c5 100644 --- a/core/src/states/weighted.rs +++ b/core/src/states/weighted.rs @@ -4,16 +4,17 @@ */ use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; -use strum::{Display, EnumIs, EnumIter, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[derive( Clone, + Copy, Debug, Deserialize, Display, + EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -21,6 +22,7 @@ use strum::{Display, EnumIs, EnumIter, EnumVariantNames}; PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] diff --git a/core/src/utils.rs b/core/src/utils.rs index a37a6585..c1088ae5 100644 --- a/core/src/utils.rs +++ b/core/src/utils.rs @@ -2,101 +2,51 @@ Appellation: utils Contrib: FL03 */ +pub use self::{arrays::*, assertions::*}; +use ndarray::linalg::Dot; use ndarray::prelude::*; -use ndarray::{concatenate, IntoDimension, RemoveAxis, ShapeError}; -use ndarray_rand::rand_distr::{Bernoulli, BernoulliError}; +use ndarray::{IntoDimension, ShapeError}; +use ndarray_rand::rand::rngs::StdRng; +use ndarray_rand::rand::SeedableRng; +use ndarray_rand::rand_distr::{Distribution, StandardNormal}; use ndarray_rand::RandomExt; -use num::cast::AsPrimitive; -use num::{Float, Num, NumCast, Zero}; +use num::complex::Complex; +use num::traits::{AsPrimitive, Float, Num, NumCast}; +use rand::distributions::uniform::{SampleUniform, Uniform}; -pub fn arange(a: T, b: T, h: T) -> Array1 +pub fn pad(a: impl IntoIterator, pad: usize, value: Option) -> Vec where - T: AsPrimitive + Float, + T: Clone + Default, { - let n: usize = ((b - a) / h).as_(); - let mut res = Array1::::zeros(n); - res[0] = a; - for i in 1..n { - res[i] = res[i - 1] + h; - } + let pad = vec![value.unwrap_or_default(); pad]; + let mut res = Vec::from_iter(a); + res.extend(pad); res } - -pub fn bernoulli( - dim: impl IntoDimension, - p: Option, -) -> Result, BernoulliError> +/// +pub fn floor_div(numerator: T, denom: T) -> T where - D: Dimension, + T: Copy + Num, { - let dist = Bernoulli::new(p.unwrap_or(0.5))?; - Ok(Array::random(dim.into_dimension(), dist)) + (numerator - (numerator % denom)) / denom } -pub fn cauchy_dot(a: &Array, lambda: &Array, omega: &Array) -> T +pub fn arange(a: T, b: T, h: T) -> Array1 where - D: Dimension, - T: NdFloat, + T: AsPrimitive + Float, { - (a / (omega - lambda)).sum() -} - -pub fn compute_inverse(matrix: &Array2) -> Option> { - let (rows, cols) = matrix.dim(); - - if !matrix.is_square() { - return None; // Matrix must be square for inversion - } - - let identity = Array2::eye(rows); - - // Concatenate the original matrix with an identity matrix - let mut augmented_matrix = Array2::zeros((rows, 2 * cols)); - augmented_matrix.slice_mut(s![.., ..cols]).assign(matrix); - augmented_matrix.slice_mut(s![.., cols..]).assign(&identity); - - // Perform Gaussian elimination to reduce the left half to the identity matrix - for i in 0..rows { - let pivot = augmented_matrix[[i, i]]; - - if pivot == T::zero() { - return None; // Matrix is singular - } - - augmented_matrix - .slice_mut(s![i, ..]) - .mapv_inplace(|x| x / pivot); - - for j in 0..rows { - if i != j { - let am = augmented_matrix.clone(); - let factor = augmented_matrix[[j, i]]; - let rhs = am.slice(s![i, ..]); - augmented_matrix - .slice_mut(s![j, ..]) - .zip_mut_with(&rhs, |x, &y| *x -= y * factor); - } - } + let n: usize = ((b - a) / h).as_(); + let mut res = Array1::::zeros(n); + res[0] = a; + for i in 1..n { + res[i] = res[i - 1] + h; } - - // Extract the inverted matrix from the augmented matrix - let inverted = augmented_matrix.slice(s![.., cols..]); - - Some(inverted.to_owned()) + res } -pub fn concat_iter(axis: usize, iter: impl IntoIterator>) -> Array -where - D: RemoveAxis, - T: Clone, -{ - let mut arr = iter.into_iter().collect::>(); - let mut out = arr.pop().unwrap(); - for i in arr { - out = concatenate!(Axis(axis), out, i); - } - out +pub fn genspace(features: usize) -> Array1 { + Array1::from_iter((0..features).map(|x| T::from(x).unwrap())) } pub fn linarr(dim: impl IntoDimension) -> Result, ShapeError> @@ -112,13 +62,43 @@ where pub fn linspace(dim: impl IntoDimension) -> Result, ShapeError> where D: Dimension, - T: Float, + T: NumCast, { let dim = dim.into_dimension(); let n = dim.as_array_view().product(); - Array::linspace(T::zero(), T::from(n - 1).unwrap(), n).into_shape(dim) + Array::from_iter((0..n).map(|x| T::from(x).unwrap())).into_shape(dim) +} +/// Raise a matrix to a power +pub fn powmat(a: &Array2, n: usize) -> Array2 +where + T: Clone + Num + 'static, + Array2: Dot, Output = Array2>, +{ + if !a.is_square() { + panic!("Matrix must be square"); + } + let mut res = Array2::::eye(a.nrows()); + for _ in 0..n { + res = res.dot(a); + } + res +} +/// +pub fn randcomplex(shape: impl IntoDimension) -> Array, D> +where + D: Dimension, + T: Copy + Num, + StandardNormal: Distribution, +{ + let dim = shape.into_dimension(); + let re = Array::random(dim.clone(), StandardNormal); + let im = Array::random(dim.clone(), StandardNormal); + let mut res = Array::zeros(dim); + ndarray::azip!((re in &re, im in &im, res in &mut res) { + *res = Complex::new(*re, *im); + }); + res } - /// creates a matrix from the given shape filled with numerical elements [0, n) spaced evenly by 1 pub fn rangespace(dim: impl IntoDimension) -> Array where @@ -129,74 +109,186 @@ where let iter = (0..dim.size()).map(|i| T::from(i).unwrap()); Array::from_shape_vec(dim, iter.collect()).unwrap() } - +/// Round the given value to the given number of decimal places. pub fn round_to(val: T, decimals: usize) -> T { let factor = T::from(10).expect("").powi(decimals as i32); (val * factor).round() / factor } -pub fn stack_iter(iter: impl IntoIterator>) -> Array2 +/// Creates a random array from a uniform distribution using a given key +pub fn seeded_uniform( + key: u64, + start: T, + stop: T, + shape: impl IntoDimension, +) -> Array where - T: Clone + Num, + D: Dimension, + T: SampleUniform, { - let mut iter = iter.into_iter(); - let first = iter.next().unwrap(); - let shape = [iter.size_hint().0 + 1, first.len()]; - let mut res = Array2::::zeros(shape); - res.slice_mut(s![0, ..]).assign(&first); - for (i, s) in iter.enumerate() { - res.slice_mut(s![i + 1, ..]).assign(&s); - } - res + Array::random_using( + shape, + Uniform::new(start, stop), + &mut StdRng::seed_from_u64(key), + ) } - -/// Returns the upper triangular portion of a matrix. -pub fn triu(a: &Array2) -> Array2 +/// +pub fn seeded_stdnorm(key: u64, shape: impl IntoDimension) -> Array where - T: Clone + Zero, + D: Dimension, + StandardNormal: Distribution, { - let mut out = a.clone(); - for i in 0..a.shape()[0] { - for j in 0..i { - out[[i, j]] = T::zero(); - } - } - out + Array::random_using(shape, StandardNormal, &mut StdRng::seed_from_u64(key)) } -/// Returns the lower triangular portion of a matrix. -pub fn tril(a: &Array2) -> Array2 +/// +pub fn randc_normal(key: u64, shape: impl IntoDimension) -> Array, D> where - T: Clone + Zero, + D: Dimension, + T: Copy + Num, + StandardNormal: Distribution, { - let mut out = a.clone(); - for i in 0..a.shape()[0] { - for j in i + 1..a.shape()[1] { - out[[i, j]] = T::zero(); - } - } - out + let dim = shape.into_dimension(); + let re = seeded_stdnorm(key, dim.clone()); + let im = seeded_stdnorm(key, dim.clone()); + let mut res = Array::zeros(dim); + ndarray::azip!((re in &re, im in &im, res in &mut res) { + *res = Complex::new(*re, *im); + }); + res } - -pub fn hstack(iter: impl IntoIterator>) -> Array2 +/// Given a shape, generate a random array using the StandardNormal distribution +pub fn stdnorm(shape: impl IntoDimension) -> Array where - T: Clone + Num, + D: Dimension, + StandardNormal: Distribution, { - let iter = Vec::from_iter(iter); - let mut res = Array2::::zeros((iter.first().unwrap().len(), iter.len())); - for (i, s) in iter.iter().enumerate() { - res.slice_mut(s![.., i]).assign(s); + Array::random(shape, StandardNormal) +} + +pub(crate) mod assertions { + use ndarray::prelude::{Array, Dimension}; + use ndarray::ScalarOperand; + use num::traits::{FromPrimitive, Signed}; + use std::fmt::Debug; + /// + pub fn assert_atol(a: &Array, b: &Array, tol: T) + where + D: Dimension, + T: Debug + FromPrimitive + PartialOrd + ScalarOperand + Signed, + { + let err = (b - a).mapv(|i| i.abs()).mean().unwrap(); + assert!(err <= tol, "Error: {:?}", err); + } + /// A function helper for testing that some result is ok + pub fn assert_ok(res: Result) -> T + where + E: Debug, + { + assert!(res.is_ok(), "Error: {:?}", res.err()); + res.unwrap() + } + /// + pub fn assert_approx(a: T, b: T, epsilon: T) + where + T: Debug + PartialOrd + Signed, + { + let err = (b - a).abs(); + assert!(err < epsilon, "Error: {:?}", err) + } + /// + pub fn almost_equal(a: T, b: T, epsilon: T) -> bool + where + T: PartialOrd + Signed, + { + (b - a).abs() < epsilon } - res } -pub fn vstack(iter: impl IntoIterator>) -> Array2 -where - T: Clone + Num, -{ - let iter = Vec::from_iter(iter); - let mut res = Array2::::zeros((iter.len(), iter.first().unwrap().len())); - for (i, s) in iter.iter().enumerate() { - res.slice_mut(s![i, ..]).assign(s); +pub(crate) mod arrays { + use ndarray::prelude::{s, Array, Array1, Array2, Axis}; + use ndarray::{concatenate, RemoveAxis}; + use num::traits::{Num, Zero}; + /// Creates an n-dimensional array from an iterator of n dimensional arrays. + pub fn concat_iter( + axis: usize, + iter: impl IntoIterator>, + ) -> Array + where + D: RemoveAxis, + T: Clone, + { + let mut arr = iter.into_iter().collect::>(); + let mut out = arr.pop().unwrap(); + for i in arr { + out = concatenate!(Axis(axis), out, i); + } + out + } + /// Creates a larger array from an iterator of smaller arrays. + pub fn stack_iter(iter: impl IntoIterator>) -> Array2 + where + T: Clone + Num, + { + let mut iter = iter.into_iter(); + let first = iter.next().unwrap(); + let shape = [iter.size_hint().0 + 1, first.len()]; + let mut res = Array2::::zeros(shape); + res.slice_mut(s![0, ..]).assign(&first); + for (i, s) in iter.enumerate() { + res.slice_mut(s![i + 1, ..]).assign(&s); + } + res + } + /// + pub fn hstack(iter: impl IntoIterator>) -> Array2 + where + T: Clone + Num, + { + let iter = Vec::from_iter(iter); + let mut res = Array2::::zeros((iter.first().unwrap().len(), iter.len())); + for (i, s) in iter.iter().enumerate() { + res.slice_mut(s![.., i]).assign(s); + } + res + } + /// Returns the lower triangular portion of a matrix. + pub fn tril(a: &Array2) -> Array2 + where + T: Clone + Zero, + { + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in i + 1..a.shape()[1] { + out[[i, j]] = T::zero(); + } + } + out + } + /// Returns the upper triangular portion of a matrix. + pub fn triu(a: &Array2) -> Array2 + where + T: Clone + Zero, + { + let mut out = a.clone(); + for i in 0..a.shape()[0] { + for j in 0..i { + out[[i, j]] = T::zero(); + } + } + out + } + /// + pub fn vstack(iter: impl IntoIterator>) -> Array2 + where + T: Clone + Num, + { + let iter = Vec::from_iter(iter); + let mut res = Array2::::zeros((iter.len(), iter.first().unwrap().len())); + for (i, s) in iter.iter().enumerate() { + res.slice_mut(s![i, ..]).assign(s); + } + res } - res } + +pub(crate) mod linalg {} diff --git a/data/Cargo.toml b/data/Cargo.toml index 3fdce024..c9f426b1 100644 --- a/data/Cargo.toml +++ b/data/Cargo.toml @@ -15,7 +15,6 @@ version.workspace = true default = [] blas = [ - "linfa/blas", "ndarray/blas", ] @@ -54,8 +53,8 @@ test = true [build-dependencies] [dependencies] +concision-core = { path = "../core", version = "0.1.12" } anyhow.workspace = true -linfa = { features = ["serde"], version = "0.7" } ndarray = { features = ["serde-1"], version = "0.15" } num.workspace = true serde.workspace = true diff --git a/data/src/datasets/mod.rs b/data/src/datasets/mod.rs index 00597a07..95041bce 100644 --- a/data/src/datasets/mod.rs +++ b/data/src/datasets/mod.rs @@ -3,9 +3,10 @@ Contrib: FL03 */ //! # Dataset -pub use self::{dataset::*, group::*, utils::*}; +pub use self::{dataset::*, group::*}; pub(crate) mod dataset; pub(crate) mod group; -pub(crate) mod utils {} +#[cfg(test)] +mod tests {} diff --git a/data/src/df/mod.rs b/data/src/df/mod.rs index d5d9cfa7..57944b27 100644 --- a/data/src/df/mod.rs +++ b/data/src/df/mod.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ //! # DataFrame -pub use self::{dataframe::*, utils::*}; +pub use self::dataframe::*; pub(crate) mod dataframe; diff --git a/data/src/flows/direction.rs b/data/src/flows/direction.rs index 68bff080..f83e1b0d 100644 --- a/data/src/flows/direction.rs +++ b/data/src/flows/direction.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -12,22 +12,49 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; Default, Deserialize, Display, + EnumCount, EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum Direction { + Backward = 0, #[default] - Forward, - Backward, + Forward = 1, +} + +impl Direction { + /// A functional alias for [Direction::Backward]. + pub fn backward() -> Self { + Self::Backward + } + /// A functional alias for [Direction::Forward]. + pub fn forward() -> Self { + Self::Forward + } +} + +impl From for usize { + fn from(direction: Direction) -> Self { + direction as usize + } +} + +impl From for Direction { + fn from(index: usize) -> Self { + match index % Self::COUNT { + 0 => Self::Backward, + _ => Self::Forward, + } + } } diff --git a/data/src/flows/flow.rs b/data/src/flows/flow.rs index 07f5229b..a64d001e 100644 --- a/data/src/flows/flow.rs +++ b/data/src/flows/flow.rs @@ -2,22 +2,7 @@ Appellation: flow Contrib: FL03 */ -use serde::{Deserialize, Serialize}; -#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] -#[serde(rename_all = "lowercase")] -pub struct Flow { - data: Vec, -} - -impl Flow { - pub fn new() -> Self { - Self { data: Vec::new() } - } -} - -impl std::fmt::Display for Flow { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", serde_json::to_string(self).unwrap()) - } +pub trait Flow { + fn flow(&self, input: T) -> T; } diff --git a/data/src/flows/mod.rs b/data/src/flows/mod.rs index 08de4074..4b044153 100644 --- a/data/src/flows/mod.rs +++ b/data/src/flows/mod.rs @@ -3,9 +3,10 @@ Contrib: FL03 */ //! # Flows -pub use self::{direction::*, flow::*, utils::*}; +pub use self::{direction::*, flow::*}; pub(crate) mod direction; pub(crate) mod flow; -pub(crate) mod utils {} +#[cfg(test)] +mod tests {} diff --git a/data/src/lib.rs b/data/src/lib.rs index 2e5ae7ee..1fc128ce 100644 --- a/data/src/lib.rs +++ b/data/src/lib.rs @@ -6,27 +6,33 @@ //! #![feature(associated_type_defaults)] -pub use self::{misc::*, primitives::*, specs::*, utils::*}; +pub use self::misc::*; pub(crate) mod misc; pub(crate) mod primitives; -pub(crate) mod specs; pub(crate) mod utils; pub mod datasets; pub mod df; pub mod flows; +pub mod mat; +pub mod shape; +pub mod specs; +pub mod store; pub mod tensors; +pub(crate) use concision_core as core; + pub mod prelude { + pub use crate::misc::*; // pub use linfa::dataset::{Dataset, DatasetBase, DatasetView}; pub use crate::datasets::*; pub use crate::df::*; pub use crate::flows::*; - pub use crate::tensors::*; - pub use crate::primitives::*; + pub use crate::shape::*; pub use crate::specs::*; - pub use crate::utils::*; + pub use crate::store::*; + pub use crate::tensors::*; } diff --git a/data/src/mat/matrix.rs b/data/src/mat/matrix.rs new file mode 100644 index 00000000..76a4cbfa --- /dev/null +++ b/data/src/mat/matrix.rs @@ -0,0 +1,21 @@ +/* + Appellation: matrix + Contrib: FL03 +*/ +use ndarray::prelude::Array2; + +pub struct Matrix { + store: Array2, +} + +impl Matrix { + pub fn new(store: Array2) -> Self { + Self { store } + } +} + +impl AsRef> for Matrix { + fn as_ref(&self) -> &Array2 { + &self.store + } +} diff --git a/data/src/mat/mod.rs b/data/src/mat/mod.rs new file mode 100644 index 00000000..ed4a755b --- /dev/null +++ b/data/src/mat/mod.rs @@ -0,0 +1,15 @@ +/* + Appellation: mat + Contrib: FL03 +*/ +//! # Matrix +//! +//! A matrix is a two-dimensional array of elements. +pub use self::matrix::*; + +pub(crate) mod matrix; + +pub trait Mat {} + +#[cfg(test)] +mod tests {} diff --git a/data/src/misc/dtype.rs b/data/src/misc/dtype.rs index 1a176c44..52b405c7 100644 --- a/data/src/misc/dtype.rs +++ b/data/src/misc/dtype.rs @@ -2,10 +2,360 @@ Appellation: dtype Contrib: FL03 */ +use serde::{Deserialize, Serialize}; +use smart_default::SmartDefault; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; -pub enum DType {} +pub trait DataType { + fn dtype(&self) -> DType; +} + +impl DataType for T +where + T: Clone + Into, +{ + fn dtype(&self) -> DType { + self.clone().into() + } +} + +#[derive( + Clone, + Copy, + Debug, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + SmartDefault, + VariantNames, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum DType { + #[default] + FloatingPoint(FloatingPoint), + Integer(Integer), + Unsigned(Unsigned), +} + +impl DType { + pub fn detect(var: T) -> Self + where + T: Clone + Default + Into, + { + var.dtype() + } +} + +impl From for DType { + fn from(_: f32) -> Self { + DType::FloatingPoint(FloatingPoint::F32) + } +} + +impl From for DType { + fn from(_: f64) -> Self { + DType::FloatingPoint(FloatingPoint::F64) + } +} + +impl From for DType { + fn from(_: i8) -> Self { + DType::Integer(Integer::I8) + } +} + +impl From for DType { + fn from(_: i16) -> Self { + DType::Integer(Integer::I16) + } +} + +impl From for DType { + fn from(_: i32) -> Self { + DType::Integer(Integer::I32) + } +} + +impl From for DType { + fn from(_: i64) -> Self { + DType::Integer(Integer::I64) + } +} +impl From for DType { + fn from(_: i128) -> Self { + DType::Integer(Integer::I128) + } +} + +impl From for DType { + fn from(_: isize) -> Self { + DType::Integer(Integer::ISIZE) + } +} + +impl From for DType { + fn from(_: u8) -> Self { + DType::Unsigned(Unsigned::U8) + } +} + +impl From for DType { + fn from(_: u16) -> Self { + DType::Unsigned(Unsigned::U16) + } +} + +impl From for DType { + fn from(_: u32) -> Self { + DType::Unsigned(Unsigned::U32) + } +} + +impl From for DType { + fn from(_: u64) -> Self { + DType::Unsigned(Unsigned::U64) + } +} + +impl From for DType { + fn from(_: u128) -> Self { + DType::Unsigned(Unsigned::U128) + } +} + +impl From for DType { + fn from(_: usize) -> Self { + DType::Unsigned(Unsigned::USIZE) + } +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantNames, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] pub enum FloatingPoint { F32, + #[default] F64, } + +impl From for FloatingPoint { + fn from(_: f32) -> Self { + FloatingPoint::F32 + } +} + +impl From for FloatingPoint { + fn from(_: f64) -> Self { + FloatingPoint::F64 + } +} + +impl From for DType { + fn from(dtype: FloatingPoint) -> Self { + DType::FloatingPoint(dtype) + } +} + +pub struct Int { + size: IntSize, +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantNames, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum IntSize { + #[default] + S8 = 8, + S16 = 16, + S32 = 32, + S64 = 64, + S128 = 128, + SSize, +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantNames, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum Integer { + I8, + I16, + I32, + I64, + I128, + #[default] + ISIZE, +} + +impl From for Integer { + fn from(_: i8) -> Self { + Integer::I8 + } +} + +impl From for Integer { + fn from(_: i16) -> Self { + Integer::I16 + } +} + +impl From for Integer { + fn from(_: i32) -> Self { + Integer::I32 + } +} + +impl From for Integer { + fn from(_: i64) -> Self { + Integer::I64 + } +} + +impl From for Integer { + fn from(_: i128) -> Self { + Integer::I128 + } +} + +impl From for Integer { + fn from(_: isize) -> Self { + Integer::ISIZE + } +} + +impl From for DType { + fn from(dtype: Integer) -> Self { + DType::Integer(dtype) + } +} + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantNames, +)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum Unsigned { + U8, + U16, + U32, + U64, + U128, + #[default] + USIZE, +} + +impl From for Unsigned { + fn from(_: u8) -> Self { + Unsigned::U8 + } +} + +impl From for Unsigned { + fn from(_: u16) -> Self { + Unsigned::U16 + } +} + +impl From for Unsigned { + fn from(_: u32) -> Self { + Unsigned::U32 + } +} + +impl From for Unsigned { + fn from(_: u64) -> Self { + Unsigned::U64 + } +} + +impl From for Unsigned { + fn from(_: u128) -> Self { + Unsigned::U128 + } +} + +impl From for Unsigned { + fn from(_: usize) -> Self { + Unsigned::USIZE + } +} diff --git a/data/src/primitives.rs b/data/src/primitives.rs index 859023bb..51d14049 100644 --- a/data/src/primitives.rs +++ b/data/src/primitives.rs @@ -2,7 +2,6 @@ Appellation: primitives Contrib: FL03 */ -pub use self::{constants::*, statics::*, types::*}; mod constants {} diff --git a/data/src/shape/dimension.rs b/data/src/shape/dimension.rs new file mode 100644 index 00000000..f7c016bd --- /dev/null +++ b/data/src/shape/dimension.rs @@ -0,0 +1,6 @@ +/* + Appellation: dimension + Contrib: FL03 +*/ + +pub trait Dimension {} diff --git a/data/src/shape/mod.rs b/data/src/shape/mod.rs new file mode 100644 index 00000000..881f43c3 --- /dev/null +++ b/data/src/shape/mod.rs @@ -0,0 +1,24 @@ +/* + Appellation: shapes + Contrib: FL03 +*/ +//! # Shapes +pub use self::{dimension::*, rank::*, shape::*}; + +pub(crate) mod dimension; +pub(crate) mod rank; +pub(crate) mod shape; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_shape() { + let mut shape = Shape::default(); + shape.extend([1, 1, 1]); + assert_eq!(shape, Shape::new(vec![1, 1, 1])); + assert_eq!(shape.elements(), 1); + assert_eq!(shape.rank(), 3); + } +} diff --git a/data/src/shape/rank.rs b/data/src/shape/rank.rs new file mode 100644 index 00000000..99d94e8c --- /dev/null +++ b/data/src/shape/rank.rs @@ -0,0 +1,68 @@ +/* + Appellation: rank + Contrib: FL03 +*/ +//! # Rank +//! +//! The rank of a n-dimensional array describes the number of dimensions +use serde::{Deserialize, Serialize}; + +pub enum Ranks { + Zero(T), + One(Vec), + N(Vec), +} + +#[derive( + Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, +)] +#[serde(rename_all = "lowercase")] +pub struct Rank(pub usize); + +impl Rank { + pub fn new(rank: usize) -> Self { + Self(rank) + } + + pub fn rank(&self) -> usize { + self.0 + } +} + +impl AsRef for Rank { + fn as_ref(&self) -> &usize { + &self.0 + } +} + +impl AsMut for Rank { + fn as_mut(&mut self) -> &mut usize { + &mut self.0 + } +} + +impl From for Rank { + fn from(rank: usize) -> Self { + Self(rank) + } +} + +impl From for usize { + fn from(rank: Rank) -> Self { + rank.0 + } +} + +// impl TryFrom for Rank +// where +// T: NumCast, +// { +// type Error = Box; + +// fn try_from(value: T) -> Result { +// if let Some(rank) = ::from(value) { +// return Ok(Self(rank)); +// } +// Err("Could not convert to Rank".into()) +// } +// } diff --git a/data/src/shape/shape.rs b/data/src/shape/shape.rs new file mode 100644 index 00000000..9d4a4071 --- /dev/null +++ b/data/src/shape/shape.rs @@ -0,0 +1,148 @@ +/* + Appellation: shape + Contrib: FL03 +*/ +use serde::{Deserialize, Serialize}; +use std::ops; + +#[derive(Clone, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)] +pub struct Shape(Vec); + +impl Shape { + pub fn new(shape: Vec) -> Self { + Self(shape) + } + + pub fn elements(&self) -> usize { + self.0.iter().product() + } + + pub fn include(mut self, dim: usize) -> Self { + self.0.push(dim); + self + } + + pub fn push(&mut self, dim: usize) { + self.0.push(dim) + } + + pub fn rank(&self) -> usize { + self.0.len() + } + + pub fn with_capacity(capacity: usize) -> Self { + Self(Vec::with_capacity(capacity)) + } + + pub fn zero() -> Self { + Self::default() + } + + pub fn zeros(rank: usize) -> Self { + Self(vec![0; rank]) + } +} + +impl AsRef<[usize]> for Shape { + fn as_ref(&self) -> &[usize] { + &self.0 + } +} + +impl AsMut<[usize]> for Shape { + fn as_mut(&mut self) -> &mut [usize] { + &mut self.0 + } +} + +impl Extend for Shape { + fn extend>(&mut self, iter: I) { + self.0.extend(iter) + } +} + +impl FromIterator for Shape { + fn from_iter>(iter: I) -> Self { + Self(iter.into_iter().collect()) + } +} + +impl IntoIterator for Shape { + type Item = usize; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.0.into_iter() + } +} + +impl<'a> IntoIterator for &'a mut Shape { + type Item = &'a mut usize; + type IntoIter = std::slice::IterMut<'a, usize>; + + fn into_iter(self) -> Self::IntoIter { + self.0.iter_mut() + } +} + +impl ops::Index for Shape { + type Output = usize; + + fn index(&self, index: usize) -> &Self::Output { + &self.0[index] + } +} + +impl ops::IndexMut for Shape { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + &mut self.0[index] + } +} + +impl ops::Index> for Shape { + type Output = [usize]; + + fn index(&self, index: ops::Range) -> &Self::Output { + &self.0[index] + } +} + +impl ops::Index> for Shape { + type Output = [usize]; + + fn index(&self, index: ops::RangeTo) -> &Self::Output { + &self.0[index] + } +} + +impl ops::Index> for Shape { + type Output = [usize]; + + fn index(&self, index: ops::RangeFrom) -> &Self::Output { + &self.0[index] + } +} + +impl ops::Index for Shape { + type Output = [usize]; + + fn index(&self, index: ops::RangeFull) -> &Self::Output { + &self.0[index] + } +} + +impl ops::Index> for Shape { + type Output = [usize]; + + fn index(&self, index: ops::RangeInclusive) -> &Self::Output { + &self.0[index] + } +} + +impl ops::Index> for Shape { + type Output = [usize]; + + fn index(&self, index: ops::RangeToInclusive) -> &Self::Output { + &self.0[index] + } +} diff --git a/data/src/specs/elements.rs b/data/src/specs/elements.rs new file mode 100644 index 00000000..5b292598 --- /dev/null +++ b/data/src/specs/elements.rs @@ -0,0 +1,8 @@ +/* + Appellation: elements + Contrib: FL03 +*/ +use num::complex::Complex; +use num::traits::NumOps; + +pub trait Element: NumOps + NumOps, Complex> + Sized {} diff --git a/data/src/specs/export.rs b/data/src/specs/export.rs new file mode 100644 index 00000000..d3bfb5c8 --- /dev/null +++ b/data/src/specs/export.rs @@ -0,0 +1,9 @@ +/* + Appellation: export + Contrib: FL03 +*/ +use std::path::Path; + +pub trait Export { + fn export(&self, path: impl AsRef) -> Result<(), std::io::Error>; +} diff --git a/data/src/specs/import.rs b/data/src/specs/import.rs new file mode 100644 index 00000000..d8c6610d --- /dev/null +++ b/data/src/specs/import.rs @@ -0,0 +1,11 @@ +/* + Appellation: import + Contrib: FL03 +*/ +use std::path::Path; + +pub trait Import { + type Obj; + + fn import(&mut self, path: impl AsRef) -> Result; +} diff --git a/data/src/specs.rs b/data/src/specs/mod.rs similarity index 55% rename from data/src/specs.rs rename to data/src/specs/mod.rs index b6d9d9d7..bcf89655 100644 --- a/data/src/specs.rs +++ b/data/src/specs/mod.rs @@ -1,7 +1,13 @@ /* - Appellation: specs - Contrib: FL03 + Appellation: specs + Contrib: FL03 */ +pub use self::{elements::*, export::*, import::*}; + +pub(crate) mod elements; +pub(crate) mod export; +pub(crate) mod import; + use ndarray::prelude::{Array1, Array2}; pub trait Records { @@ -12,7 +18,7 @@ pub trait Records { impl Records for Array1 { fn features(&self) -> usize { - self.shape()[1] + 1 } fn samples(&self) -> usize { @@ -30,14 +36,5 @@ impl Records for Array2 { } } -pub trait NdArrayExt {} - -pub trait Store { - fn get(&self, key: &K) -> Option<&V>; - - fn get_mut(&mut self, key: &K) -> Option<&mut V>; - - fn insert(&mut self, key: K, value: V) -> Option; - - fn remove(&mut self, key: &K) -> Option; -} +#[cfg(test)] +mod tests {} diff --git a/data/src/store/layout.rs b/data/src/store/layout.rs new file mode 100644 index 00000000..28780f6c --- /dev/null +++ b/data/src/store/layout.rs @@ -0,0 +1,19 @@ +/* + Appellation: layout + Contrib: FL03 +*/ +use crate::shape::Shape; + +pub struct Layout { + shape: Shape, +} + +impl Layout { + pub fn new(shape: Shape) -> Self { + Self { shape } + } + + pub fn shape(&self) -> &Shape { + &self.shape + } +} diff --git a/data/src/store/mod.rs b/data/src/store/mod.rs new file mode 100644 index 00000000..1808329f --- /dev/null +++ b/data/src/store/mod.rs @@ -0,0 +1,73 @@ +/* + Appellation: store + Contrib: FL03 +*/ +//! # Store +pub use self::{layout::*, storage::*}; + +pub(crate) mod layout; +pub(crate) mod storage; + +use std::collections::{BTreeMap, HashMap}; +use std::ops; + +pub trait Store { + fn contains(&self, key: &K) -> bool { + self.get(key).is_some() + } + + fn get(&self, key: &K) -> Option<&V>; + + fn get_mut(&mut self, key: &K) -> Option<&mut V>; + + fn insert(&mut self, key: K, value: V) -> Option; + + fn remove(&mut self, key: &K) -> Option; +} + +pub trait StoreExt: Store + ops::Index {} + +impl Store for BTreeMap +where + K: Ord, +{ + fn get(&self, key: &K) -> Option<&V> { + BTreeMap::get(self, key) + } + + fn get_mut(&mut self, key: &K) -> Option<&mut V> { + BTreeMap::get_mut(self, key) + } + + fn insert(&mut self, key: K, value: V) -> Option { + BTreeMap::insert(self, key, value) + } + + fn remove(&mut self, key: &K) -> Option { + BTreeMap::remove(self, key) + } +} + +impl Store for HashMap +where + K: Eq + std::hash::Hash, +{ + fn get(&self, key: &K) -> Option<&V> { + HashMap::get(self, key) + } + + fn get_mut(&mut self, key: &K) -> Option<&mut V> { + HashMap::get_mut(self, key) + } + + fn insert(&mut self, key: K, value: V) -> Option { + HashMap::insert(self, key, value) + } + + fn remove(&mut self, key: &K) -> Option { + HashMap::remove(self, key) + } +} + +#[cfg(test)] +mod tests {} diff --git a/data/src/store/storage.rs b/data/src/store/storage.rs new file mode 100644 index 00000000..026e49f8 --- /dev/null +++ b/data/src/store/storage.rs @@ -0,0 +1,6 @@ +/* + Appellation: storage + Contrib: FL03 +*/ + +pub struct Storage {} diff --git a/data/src/tensors/mod.rs b/data/src/tensors/mod.rs index b5116995..46fbbf9a 100644 --- a/data/src/tensors/mod.rs +++ b/data/src/tensors/mod.rs @@ -3,16 +3,62 @@ Contrib: FL03 */ //! # Tensors -pub use self::tensor::*; +//! +//! A tensor is a generalization of vectors and matrices to potentially higher dimensions. +pub use self::{mode::*, tensor::*}; +pub(crate) mod mode; pub(crate) mod tensor; -use ndarray::prelude::{Array, Dimension, Ix2}; +// use ndarray::prelude::{Array, Dimension, Ix2}; +use crate::core::ops::Operation; +use num::traits::{Num, NumOps}; + +pub trait GradStore { + type Tensor: NdTensor; + + fn get(&self, id: &str) -> Option<&Self::Tensor>; +} + +pub trait ComplexN: Num + NumOps { + type Real: NumOps; + + fn im(&self) -> Self::Real; + + fn re(&self) -> Self::Real; +} + +pub trait TensorScalar { + type Complex: ComplexN; + type Real: Num + NumOps + NumOps; +} pub trait NdTensor { - type Dim: Dimension = Ix2; + fn affine(&self, a: T, b: T) -> Self; + + fn apply(&self, f: F) -> Self + where + F: Fn(T) -> T; + + fn apply_op(&self, op: Op) -> >::Output + where + Op: Operation, + Self: Sized, + { + op.eval(self) + } + + fn backward(&self) -> Self; + + fn id(&self) -> &str; + + fn is_variable(&self) -> bool { + self.mode().is_variable() + } + + fn mode(&self) -> TensorKind; - fn tensor(&self) -> &Array; + fn tensor(&self) -> &Self; } #[cfg(test)] diff --git a/data/src/tensors/mode.rs b/data/src/tensors/mode.rs new file mode 100644 index 00000000..bac0be07 --- /dev/null +++ b/data/src/tensors/mode.rs @@ -0,0 +1,60 @@ +/* + Appellation: mode + Contrib: FL03 +*/ +use serde::{Deserialize, Serialize}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; + +#[derive( + Clone, + Copy, + Debug, + Default, + Deserialize, + Display, + EnumCount, + EnumIs, + EnumIter, + EnumString, + Eq, + Hash, + Ord, + PartialEq, + PartialOrd, + Serialize, + VariantNames, +)] +#[repr(usize)] +#[serde(rename_all = "lowercase")] +#[strum(serialize_all = "lowercase")] +pub enum TensorKind { + #[default] + Standard = 0, + Variable = 1, +} + +impl TensorKind { + /// A functional alias for [TensorKind::Standard]. + pub fn standard() -> Self { + Self::Standard + } + /// A functional alias for [TensorKind::Variable]. + pub fn variable() -> Self { + Self::Variable + } +} + +impl From for usize { + fn from(var: TensorKind) -> Self { + var as usize + } +} + +impl From for TensorKind { + fn from(index: usize) -> Self { + match index % Self::COUNT { + 0 => Self::Standard, + _ => Self::Variable, + } + } +} diff --git a/data/src/tensors/tensor.rs b/data/src/tensors/tensor.rs index af399da4..d42f7f58 100644 --- a/data/src/tensors/tensor.rs +++ b/data/src/tensors/tensor.rs @@ -2,9 +2,12 @@ Appellation: tensor Contrib: FL03 */ +use super::TensorKind; +use crate::core::id::AtomicId; +use crate::prelude::DType; use ndarray::prelude::{Array, Dimension, Ix2}; use ndarray::IntoDimension; -use num::Float; +use num::Num; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Default, Deserialize, Eq, Hash, PartialEq, Serialize)] @@ -12,19 +15,51 @@ use serde::{Deserialize, Serialize}; pub struct Tensor where D: Dimension, - T: Float, { + id: AtomicId, data: Array, + dtype: DType, + mode: TensorKind, } impl Tensor where D: Dimension, - T: Float, { - pub fn new(shape: impl IntoDimension) -> Self { + pub fn new(data: Array) -> Self { Self { + id: AtomicId::new(), + data, + dtype: DType::default(), + mode: TensorKind::default(), + } + } + + pub fn mode(&self) -> TensorKind { + self.mode + } + + pub fn set_mode(&mut self, mode: TensorKind) { + self.mode = mode; + } + + pub fn as_variable(mut self) -> Self { + self.mode = TensorKind::Variable; + self + } +} + +impl Tensor +where + D: Dimension, + T: Clone + Num, +{ + pub fn zeros(shape: impl IntoDimension) -> Self { + Self { + id: AtomicId::new(), data: Array::zeros(shape), + dtype: DType::default(), + mode: TensorKind::default(), } } } @@ -32,7 +67,7 @@ where impl std::fmt::Display for Tensor where D: Dimension, - T: Float + std::fmt::Debug, + T: std::fmt::Debug, { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "{:?}", self.data) diff --git a/ml/linear/src/cmp/mod.rs b/ml/linear/src/cmp/mod.rs new file mode 100644 index 00000000..e341c207 --- /dev/null +++ b/ml/linear/src/cmp/mod.rs @@ -0,0 +1,13 @@ +/* + Appellation: cmp + Contrib: FL03 +*/ +//! # Components +//! +//! + +pub mod neurons; +pub mod params; + +#[cfg(test)] +mod tests {} diff --git a/ml/linear/src/cmp/neurons/mod.rs b/ml/linear/src/cmp/neurons/mod.rs new file mode 100644 index 00000000..730d56c0 --- /dev/null +++ b/ml/linear/src/cmp/neurons/mod.rs @@ -0,0 +1,95 @@ +/* + Appellation: neurons + Contrib: FL03 +*/ +//! # neurons +pub use self::{node::*, perceptron::*, synapse::*}; + +pub(crate) mod node; +pub(crate) mod perceptron; +pub(crate) mod synapse; + +use crate::neural::func::activate::Activate; +use ndarray::prelude::{Array0, Array1, Array2, Ix1, NdFloat}; + +pub trait ArtificialNeuron +where + T: NdFloat, +{ + type Rho: Activate; + + fn bias(&self) -> Array0; + + fn linear(&self, args: &Array2) -> Array1 { + args.dot(self.weights()) + self.bias() + } + + fn forward(&self, args: &Array2) -> Array1 { + self.rho().activate(&self.linear(args)) + } + + fn rho(&self) -> &Self::Rho; + + fn weights(&self) -> &Array1; +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::neural::prelude::{softmax, Activate, Forward, Softmax}; + use ndarray::prelude::{array, Array1, Ix2, NdFloat}; + + fn _artificial( + args: &Array2, + bias: Option<&Array1>, + rho: impl Activate, + weights: &Array2, + ) -> Array2 + where + T: NdFloat, + { + let linear = if let Some(bias) = bias { + args.dot(weights) + bias + } else { + args.dot(weights) + }; + rho.activate(&linear) + } + + #[test] + fn test_neuron() { + let bias = 0.0; + + let data = array![[10.0, 10.0, 6.0, 1.0, 8.0]]; + let weights = array![2.0, 1.0, 10.0, 1.0, 7.0]; + let neuron = Perceptron::::new(5).with_weights(weights.clone()); + + let linear = data.dot(&weights) + bias; + let exp = softmax(&linear); + + assert_eq!(exp, neuron.forward(&data)); + } + + // #[test] + // fn test_node() { + // let bias = ndarray::Array1::::zeros(4); + + // let a_data = array![10.0, 10.0, 6.0, 1.0, 8.0]; + // let a_weights = array![2.0, 1.0, 10.0, 1.0, 7.0]; + // let a = Neuron::new(softmax, bias.clone(), a_weights.clone()); + // let node_a = Node::new(a.clone()).with_data(a_data.clone()); + + // let exp = _artificial(&a_data, Some(bias.clone()), Softmax::default(), &a_weights); + // assert_eq!(node_a.process(), exp); + + // let b_data = array![0.0, 9.0, 3.0, 5.0, 3.0]; + // let b_weights = array![2.0, 8.0, 8.0, 0.0, 3.0]; + + // let b = Neuron::new(softmax, bias.clone(), b_weights.clone()); + // let node_b = Node::new(b.clone()).with_data(b_data.clone()); + // let exp = _artificial(&b_data, Some(bias), Softmax::default(), &b_weights); + // assert_eq!(node_b.process(), exp); + + // assert_eq!(node_a.dot() + node_b.dot(), 252.0); + // } +} diff --git a/ml/linear/src/cmp/neurons/node.rs b/ml/linear/src/cmp/neurons/node.rs new file mode 100644 index 00000000..3c718b52 --- /dev/null +++ b/ml/linear/src/cmp/neurons/node.rs @@ -0,0 +1,263 @@ +/* + Appellation: node + Contrib: FL03 +*/ +use crate::core::prelude::GenerateRandom; +use crate::neural::prelude::Forward; + +use ndarray::linalg::Dot; +use ndarray::prelude::{Array, Array0, Array1, Array2, Dimension, NdFloat}; +use ndarray::{RemoveAxis, ScalarOperand}; +use ndarray_rand::rand_distr::uniform::SampleUniform; +use ndarray_rand::rand_distr::{Distribution, StandardNormal}; +use num::{Float, Num}; +use std::ops; + +#[derive(Clone, Debug, PartialEq)] +pub struct Node { + bias: Option>, + features: usize, + weights: Array1, +} + +impl Node +where + T: Clone + Num, +{ + pub fn create(biased: bool, features: usize) -> Self { + let bias = if biased { + Some(Array0::zeros(())) + } else { + None + }; + Self { + bias, + features, + weights: Array1::zeros(features), + } + } + + pub fn biased(features: usize) -> Self { + Self::create(true, features) + } + + pub fn new(features: usize) -> Self { + Self::create(false, features) + } +} +impl Node +where + T: Num, +{ + pub fn bias(&self) -> Option<&Array0> { + self.bias.as_ref() + } + + pub fn bias_mut(&mut self) -> Option<&mut Array0> { + self.bias.as_mut() + } + + pub fn features(&self) -> usize { + self.features + } + + pub fn is_biased(&self) -> bool { + self.bias.is_some() + } + + pub fn set_bias(&mut self, bias: Option>) { + self.bias = bias; + } + + pub fn set_features(&mut self, features: usize) { + self.features = features; + } + + pub fn set_weights(&mut self, weights: Array1) { + self.weights = weights; + } + + pub fn weights(&self) -> &Array1 { + &self.weights + } + + pub fn weights_mut(&mut self) -> &mut Array1 { + &mut self.weights + } + + pub fn with_bias(mut self, bias: Option>) -> Self { + self.bias = bias; + self + } + + pub fn with_features(mut self, features: usize) -> Self { + self.features = features; + self + } + + pub fn with_weights(mut self, weights: Array1) -> Self { + self.weights = weights; + self + } +} + +impl Node +where + T: Num + ScalarOperand, + Array2: Dot, Output = Array1>, +{ + pub fn linear(&self, data: &Array2) -> Array1 { + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + data.dot(&w) + bias + } else { + data.dot(&w) + } + } +} +impl Node +where + T: Float + SampleUniform, + StandardNormal: Distribution, +{ + pub fn init(mut self, biased: bool) -> Self { + if biased { + self = self.init_bias(); + } + self.init_weight() + } + + pub fn init_bias(mut self) -> Self { + let dk = (T::one() / T::from(self.features).unwrap()).sqrt(); + self.bias = Some(Array0::uniform_between(dk, ())); + self + } + + pub fn init_weight(mut self) -> Self { + let features = self.features; + let dk = (T::one() / T::from(features).unwrap()).sqrt(); + self.weights = Array1::uniform_between(dk, features); + self + } +} + +impl Node +where + T: NdFloat, +{ + pub fn apply_gradient(&mut self, gamma: T, gradient: G) + where + G: Fn(&Array1) -> Array1, + { + let grad = gradient(self.weights()); + self.weights_mut().scaled_add(-gamma, &grad); + } + + pub fn activate(&self, data: &Array2, activator: A) -> Array1 + where + A: Fn(&Array1) -> Array1, + { + activator(&self.forward(data)) + } +} + +impl Forward> for Node +where + D: Dimension + RemoveAxis, + T: NdFloat, + Array: Dot, Output = Array>, + Array: ops::Add, Output = Array>, +{ + type Output = Array; + + fn forward(&self, data: &Array) -> Self::Output { + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + return data.dot(&w) + bias.clone(); + } + data.dot(&w) + } +} + +impl FromIterator for Node +where + T: Float, +{ + fn from_iter(iter: I) -> Self + where + I: IntoIterator, + { + let weights = Array1::::from_iter(iter); + Self { + bias: None, + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, Array0)> for Node +where + T: Float, +{ + fn from((weights, bias): (Array1, Array0)) -> Self { + Self { + bias: Some(bias), + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, T)> for Node +where + T: NdFloat, +{ + fn from((weights, bias): (Array1, T)) -> Self { + Self { + bias: Some(Array0::ones(()) * bias), + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, Option)> for Node +where + T: Float + ScalarOperand, +{ + fn from((weights, bias): (Array1, Option)) -> Self { + let bias = if let Some(b) = bias { + Some(Array0::ones(()) * b) + } else { + None + }; + Self { + bias, + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, Option>)> for Node +where + T: Float, +{ + fn from((weights, bias): (Array1, Option>)) -> Self { + Self { + bias, + features: weights.len(), + weights, + } + } +} + +impl From> for (Array1, Option>) +where + T: Float, +{ + fn from(node: Node) -> Self { + (node.weights, node.bias) + } +} diff --git a/ml/linear/src/cmp/neurons/perceptron.rs b/ml/linear/src/cmp/neurons/perceptron.rs new file mode 100644 index 00000000..6e1bf0cc --- /dev/null +++ b/ml/linear/src/cmp/neurons/perceptron.rs @@ -0,0 +1,212 @@ +/* + Appellation: neuron + Contrib: FL03 +*/ +use super::Node; +use crate::neural::prelude::{Activate, Forward, LinearActivation}; +use ndarray::prelude::{Array0, Array1, Array2, Ix1, NdFloat}; +use ndarray_rand::rand_distr::uniform::SampleUniform; +use ndarray_rand::rand_distr::{Distribution, StandardNormal}; +use num::Float; + +/// Artificial Neuron +#[derive(Clone, Debug, PartialEq)] +pub struct Perceptron +where + A: Activate, + T: Float, +{ + activation: A, + node: Node, +} + +impl Perceptron +where + A: Activate, + T: Float, +{ + pub fn new(features: usize) -> Self + where + A: Default, + { + Self { + activation: A::default(), + node: Node::create(false, features), + } + } + + pub fn node(&self) -> &Node { + &self.node + } + + pub fn node_mut(&mut self) -> &mut Node { + &mut self.node + } + + pub fn features(&self) -> usize { + self.node.features() + } + + pub fn params(&self) -> &Node { + &self.node + } + + pub fn params_mut(&mut self) -> &mut Node { + &mut self.node + } + + pub fn rho(&self) -> &A { + &self.activation + } + + pub fn with_bias(mut self, bias: Option>) -> Self { + self.node = self.node.with_bias(bias); + self + } + + pub fn with_rho>(self, rho: B) -> Perceptron { + Perceptron { + activation: rho, + node: self.node, + } + } + + pub fn with_node(mut self, node: Node) -> Self { + self.node = node; + self + } + + pub fn with_weights(mut self, weights: Array1) -> Self { + self.node = self.node.with_weights(weights); + self + } + + pub fn weights(&self) -> &Array1 { + self.node.weights() + } + + pub fn weights_mut(&mut self) -> &mut Array1 { + self.node.weights_mut() + } + + pub fn set_weights(&mut self, weights: Array1) { + self.node.set_weights(weights); + } +} + +impl Perceptron +where + T: NdFloat, + A: Activate, +{ + pub fn apply_gradient(&mut self, gamma: T, gradient: G) + where + G: Fn(&Array1) -> Array1, + { + let grad = gradient(self.node().weights()); + self.update_with_gradient(gamma, &grad); + } + + pub fn update_with_gradient(&mut self, gamma: T, grad: &Array1) { + self.node.weights_mut().scaled_add(-gamma, grad); + } +} + +impl Perceptron +where + T: Float + SampleUniform, + A: Activate, + StandardNormal: Distribution, +{ + pub fn init(mut self, biased: bool) -> Self { + if biased { + self = self.init_bias(); + } + self.init_weight() + } + + pub fn init_bias(mut self) -> Self { + self.node = self.node.init_bias(); + self + } + + pub fn init_weight(mut self) -> Self { + self.node = self.node.init_weight(); + self + } +} + +impl Forward> for Perceptron +where + T: NdFloat, + A: Activate, +{ + type Output = Array1; + + fn forward(&self, args: &Array2) -> Self::Output { + let linstep = self.params().forward(args); + self.rho().activate(&linstep) + } +} + +impl From<(Array1, Array0)> for Perceptron +where + T: Float, + A: Activate + Default, +{ + fn from((weights, bias): (Array1, Array0)) -> Self { + Self { + activation: A::default(), + node: Node::from((weights, bias)), + } + } +} + +impl From<(Array1, T)> for Perceptron +where + T: NdFloat, + A: Activate + Default, +{ + fn from((weights, bias): (Array1, T)) -> Self { + Self { + activation: A::default(), + node: Node::from((weights, bias)), + } + } +} + +impl From<(Array1, Array0, A)> for Perceptron +where + T: Float, + A: Activate, +{ + fn from((weights, bias, activation): (Array1, Array0, A)) -> Self { + Self { + activation, + node: Node::from((weights, bias)), + } + } +} + +impl From<(Array1, T, A)> for Perceptron +where + T: NdFloat, + A: Activate, +{ + fn from((weights, bias, activation): (Array1, T, A)) -> Self { + Self { + activation, + node: Node::from((weights, bias)), + } + } +} + +impl From> for (Array1, Option>) +where + T: Float, + A: Activate, +{ + fn from(neuron: Perceptron) -> Self { + neuron.node().clone().into() + } +} diff --git a/ml/linear/src/cmp/neurons/synapse.rs b/ml/linear/src/cmp/neurons/synapse.rs new file mode 100644 index 00000000..f04a1c80 --- /dev/null +++ b/ml/linear/src/cmp/neurons/synapse.rs @@ -0,0 +1,9 @@ +/* + Appellation: synapse + Contrib: FL03 +*/ + +pub struct Synapse { + pub layer: usize, + pub node: usize, +} diff --git a/ml/linear/src/params/features.rs b/ml/linear/src/cmp/params/features.rs similarity index 100% rename from ml/linear/src/params/features.rs rename to ml/linear/src/cmp/params/features.rs diff --git a/ml/linear/src/params/kinds.rs b/ml/linear/src/cmp/params/kinds.rs similarity index 94% rename from ml/linear/src/params/kinds.rs rename to ml/linear/src/cmp/params/kinds.rs index 660fa41b..e618bc7a 100644 --- a/ml/linear/src/params/kinds.rs +++ b/ml/linear/src/cmp/params/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; pub trait ParamType: ToString { fn kind(&self) -> String; @@ -14,16 +14,17 @@ pub trait ParamType: ToString { Debug, Default, Deserialize, + EnumCount, EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[non_exhaustive] #[repr(usize)] diff --git a/ml/linear/src/params/mod.rs b/ml/linear/src/cmp/params/mod.rs similarity index 87% rename from ml/linear/src/params/mod.rs rename to ml/linear/src/cmp/params/mod.rs index e2ce6bd1..cb0782bb 100644 --- a/ml/linear/src/params/mod.rs +++ b/ml/linear/src/cmp/params/mod.rs @@ -7,3 +7,6 @@ pub use self::{features::*, kinds::*, store::*}; pub(crate) mod features; pub(crate) mod kinds; pub(crate) mod store; + +#[cfg(test)] +mod tests {} diff --git a/ml/linear/src/cmp/params/store.rs b/ml/linear/src/cmp/params/store.rs new file mode 100644 index 00000000..c6e29168 --- /dev/null +++ b/ml/linear/src/cmp/params/store.rs @@ -0,0 +1,244 @@ +/* + Appellation: params + Contrib: FL03 +*/ +use super::LayerShape; +use crate::cmp::neurons::Node; +use crate::core::prelude::GenerateRandom; +use crate::neural::prelude::{Features, Forward}; +use ndarray::linalg::Dot; +use ndarray::prelude::{Array, Array1, Array2, Axis, Dimension, NdFloat}; +use ndarray::{LinalgScalar, ShapeError}; +use ndarray_rand::rand_distr::uniform::SampleUniform; +use ndarray_rand::rand_distr::{Distribution, StandardNormal}; +use num::{Float, Num, Signed}; +use serde::{Deserialize, Serialize}; +use std::ops; + +#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] +pub struct LinearParams { + bias: Option>, + pub features: LayerShape, + weights: Array2, +} + +impl LinearParams { + pub fn bias(&self) -> Option<&Array1> { + self.bias.as_ref() + } + + pub fn bias_mut(&mut self) -> Option<&mut Array1> { + self.bias.as_mut() + } + + pub fn features(&self) -> &LayerShape { + &self.features + } + + pub fn features_mut(&mut self) -> &mut LayerShape { + &mut self.features + } + + pub fn is_biased(&self) -> bool { + self.bias.is_some() + } + + pub fn reshape(&mut self, features: LayerShape) -> Result<(), ShapeError> + where + T: Clone, + { + self.features = features; + self.weights = self.weights().clone().into_shape(features.out_by_in())?; + if let Some(bias) = self.bias_mut() { + *bias = bias.clone().into_shape(features.outputs())?; + } + Ok(()) + } + + pub fn set_bias(&mut self, bias: Option>) { + self.bias = bias; + } + + pub fn set_weights(&mut self, weights: Array2) { + self.weights = weights; + } + + pub fn weights(&self) -> &Array2 { + &self.weights + } + + pub fn weights_mut(&mut self) -> &mut Array2 { + &mut self.weights + } + + pub fn with_bias(mut self, bias: Option>) -> Self { + self.bias = bias; + self + } + + pub fn with_weights(mut self, weights: Array2) -> Self { + self.weights = weights; + self + } +} + +impl LinearParams +where + T: Clone + Num, +{ + pub fn new(bias: Option>, weights: Array2) -> Self { + let features = LayerShape::new(weights.ncols(), weights.nrows()); + Self { + bias, + features, + weights, + } + } + + pub fn zeros(biased: bool, features: LayerShape) -> Self { + let bias = if biased { + Some(Array1::zeros(features.outputs())) + } else { + None + }; + Self { + bias, + features, + weights: Array2::zeros(features.out_by_in()), + } + } + + pub fn biased(features: LayerShape) -> Self { + Self::zeros(true, features) + } + + pub fn reset(&mut self) { + if let Some(bias) = self.bias_mut() { + *bias = Array1::zeros(bias.dim()); + } + self.weights = Array2::zeros(self.weights.dim()); + } + + pub fn set_node(&mut self, idx: usize, node: Node) { + if let Some(bias) = node.bias() { + if !self.is_biased() { + let mut tmp = Array1::zeros(self.features().outputs()); + tmp.index_axis_mut(Axis(0), idx).assign(bias); + self.bias = Some(tmp); + } + self.bias + .as_mut() + .unwrap() + .index_axis_mut(Axis(0), idx) + .assign(bias); + } + + self.weights_mut() + .index_axis_mut(Axis(0), idx) + .assign(&node.weights()); + } +} + +impl LinearParams +where + T: LinalgScalar + Signed, +{ + pub fn update_with_gradient(&mut self, gamma: T, gradient: &Array2) { + self.weights_mut().scaled_add(-gamma, gradient); + } +} + +impl LinearParams +where + T: Float + SampleUniform, + StandardNormal: Distribution, +{ + pub fn init(mut self, biased: bool) -> Self { + if biased { + self = self.init_bias(); + } + self.init_weight() + } + + pub fn init_bias(mut self) -> Self { + let dk = (T::one() / T::from(self.features().inputs()).unwrap()).sqrt(); + self.bias = Some(Array1::uniform_between(dk, self.features().outputs())); + self + } + + pub fn init_weight(mut self) -> Self { + let dk = (T::one() / T::from(self.features().inputs()).unwrap()).sqrt(); + self.weights = Array2::uniform_between(dk, self.features().out_by_in()); + self + } +} + +impl Features for LinearParams { + fn inputs(&self) -> usize { + self.features.inputs() + } + + fn outputs(&self) -> usize { + self.features.outputs() + } +} + +impl Forward> for LinearParams +where + D: Dimension, + T: NdFloat, + Array: Dot, Output = Array> + ops::Add, Output = Array>, +{ + type Output = Array; + + fn forward(&self, input: &Array) -> Self::Output { + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + return input.dot(&w) + bias.clone(); + } + input.dot(&w) + } +} + +impl IntoIterator for LinearParams +where + T: Float, +{ + type Item = Node; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + if let Some(bias) = self.bias() { + return self + .weights() + .axis_iter(Axis(0)) + .zip(bias.axis_iter(Axis(0))) + .map(|(w, b)| (w.to_owned(), b.to_owned()).into()) + .collect::>() + .into_iter(); + } + self.weights() + .axis_iter(Axis(0)) + .map(|w| (w.to_owned(), None).into()) + .collect::>() + .into_iter() + } +} + +impl FromIterator> for LinearParams +where + T: Float, +{ + fn from_iter>>(nodes: I) -> Self { + let nodes = nodes.into_iter().collect::>(); + let mut iter = nodes.iter(); + let node = iter.next().unwrap(); + let shape = LayerShape::new(node.features(), nodes.len()); + let mut params = Self::zeros(true, shape); + params.set_node(0, node.clone()); + for (i, node) in iter.into_iter().enumerate() { + params.set_node(i + 1, node.clone()); + } + params + } +} diff --git a/ml/linear/src/conv/mod.rs b/ml/linear/src/conv/mod.rs index c804f96e..73681242 100644 --- a/ml/linear/src/conv/mod.rs +++ b/ml/linear/src/conv/mod.rs @@ -2,3 +2,9 @@ Appellation: conv Contrib: FL03 */ +pub use self::module::*; + +pub(crate) mod module; + +#[cfg(test)] +mod tests {} diff --git a/ml/linear/src/conv/module.rs b/ml/linear/src/conv/module.rs new file mode 100644 index 00000000..bebd9632 --- /dev/null +++ b/ml/linear/src/conv/module.rs @@ -0,0 +1,6 @@ +/* + Appellation: conv + Contrib: FL03 +*/ + +pub struct Conv; diff --git a/ml/linear/src/dense/mod.rs b/ml/linear/src/dense/mod.rs index db9dd16e..1ccf1162 100644 --- a/ml/linear/src/dense/mod.rs +++ b/ml/linear/src/dense/mod.rs @@ -2,3 +2,9 @@ Appellation: dense Contrib: FL03 */ +pub use self::module::*; + +pub(crate) mod module; + +#[cfg(test)] +mod tests {} diff --git a/ml/linear/src/dense/module.rs b/ml/linear/src/dense/module.rs new file mode 100644 index 00000000..413aac98 --- /dev/null +++ b/ml/linear/src/dense/module.rs @@ -0,0 +1,6 @@ +/* + Appellation: module + Contrib: FL03 +*/ + +pub struct Dense; diff --git a/ml/linear/src/lib.rs b/ml/linear/src/lib.rs index 9a1908a6..2faf5b39 100644 --- a/ml/linear/src/lib.rs +++ b/ml/linear/src/lib.rs @@ -7,10 +7,10 @@ //! This library implements the framework for building linear models. //! +pub mod cmp; pub mod conv; pub mod dense; pub mod model; -pub mod params; pub(crate) use concision_core as core; pub(crate) use concision_neural as neural; diff --git a/ml/linear/src/model/layer.rs b/ml/linear/src/model/layer.rs new file mode 100644 index 00000000..725e6f14 --- /dev/null +++ b/ml/linear/src/model/layer.rs @@ -0,0 +1,289 @@ +/* + Appellation: model + Contrib: FL03 +*/ +use crate::cmp::neurons::{Node, Perceptron}; +use crate::cmp::params::LayerShape; +use crate::cmp::params::LinearParams as LayerParams; +use crate::neural::prelude::{Activate, Features, Forward, Gradient}; + +use ndarray::prelude::{Array2, Ix1, NdFloat}; +use ndarray::ShapeError; +use ndarray_rand::rand_distr::uniform::SampleUniform; +use ndarray_rand::rand_distr::{Distribution, StandardNormal}; +use ndarray_stats::DeviationExt; +use num::{Float, Signed}; +use serde::{Deserialize, Serialize}; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct Linear>> +where + A: Activate, + T: Float, +{ + activator: A, + features: LayerShape, + name: String, + params: LayerParams, +} + +impl Linear +where + A: Activate, + T: Float, +{ + pub fn new(activator: A, biased: bool, features: LayerShape, name: impl ToString) -> Self { + Self { + activator, + features, + name: name.to_string(), + params: LayerParams::zeros(biased, features), + } + } + + pub fn from_features(inputs: usize, outputs: usize) -> Self + where + A: Default, + { + let features = LayerShape::new(inputs, outputs); + Self { + activator: A::default(), + features, + name: String::new(), + params: LayerParams::zeros(false, features), + } + } + + pub fn activator(&self) -> &A { + &self.activator + } + + pub fn as_dyn(&self) -> Linear>> + where + A: Clone + 'static, + { + Linear { + activator: Box::new(self.activator.clone()), + features: self.features.clone(), + name: self.name.clone(), + params: self.params.clone(), + } + } + + pub fn features(&self) -> &LayerShape { + &self.features + } + + pub fn features_mut(&mut self) -> &mut LayerShape { + &mut self.features + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn params(&self) -> &LayerParams { + &self.params + } + + pub fn params_mut(&mut self) -> &mut LayerParams { + &mut self.params + } + + pub fn set_name(&mut self, name: impl ToString) { + self.name = name.to_string(); + } + + pub fn set_node(&mut self, idx: usize, neuron: &Perceptron) + where + A: Activate, + { + self.params.set_node(idx, neuron.node().clone()); + } + + pub fn reshape(&mut self, inputs: usize, outputs: usize) -> Result<(), ShapeError> { + self.features = LayerShape::new(inputs, outputs); + self.params.reshape(self.features) + } + + pub fn validate_layer(&self, other: &Self, next: bool) -> bool { + if next { + return self.features().inputs() == other.features().outputs(); + } + self.features().outputs() == other.features().inputs() + } + + pub fn with_name(mut self, name: impl ToString) -> Self { + self.name = name.to_string(); + self + } +} + +impl Linear +where + A: Activate, + T: Float + 'static, +{ + pub fn apply_gradient(&mut self, gamma: T, gradient: F) + where + F: Fn(&Array2) -> Array2, + { + let grad = gradient(&self.params.weights()); + self.params.weights_mut().scaled_add(-gamma, &grad); + } + + pub fn update_with_gradient(&mut self, gamma: T, grad: &Array2) { + self.params.weights_mut().scaled_add(-gamma, grad); + } +} + +impl Linear +where + A: Activate, + T: NdFloat, +{ + pub fn linear(&self, args: &Array2) -> Array2 { + self.params().forward(args) + } +} + +impl Linear +where + A: Activate + Gradient, + T: NdFloat + Signed, +{ + pub fn grad(&mut self, gamma: T, args: &Array2, targets: &Array2) -> T { + let ns = T::from(args.shape()[0]).unwrap(); + let pred = self.forward(args); + + let scale = T::from(2).unwrap() * ns; + + let errors = &pred - targets; + let dz = errors * self.activator.gradient(&pred); + let dw = args.t().dot(&dz) / scale; + + self.params_mut().weights_mut().scaled_add(-gamma, &dw.t()); + + let loss = targets + .mean_sq_err(&pred) + .expect("Failed to calculate loss"); + T::from(loss).unwrap() + } +} + +impl Linear +where + A: Activate, + T: Float + SampleUniform, + StandardNormal: Distribution, +{ + pub fn init(mut self, biased: bool) -> Self { + self.params = self.params.init(biased); + self + } +} + +impl Features for Linear +where + A: Activate, + T: Float, +{ + fn inputs(&self) -> usize { + self.features.inputs() + } + + fn outputs(&self) -> usize { + self.features.outputs() + } +} + +// impl Forward> for Layer +// where +// A: Activate, +// D: Dimension, +// T: NdFloat, +// Array: Dot, Output = Array>, +// { +// type Output = Array2; + +// fn forward(&self, args: &Array2) -> Self::Output { +// self.activator.activate(&self.linear(args)) +// } +// } + +impl Forward> for Linear +where + A: Activate, + T: NdFloat, +{ + type Output = Array2; + + fn forward(&self, args: &Array2) -> Self::Output { + self.activator.activate(&self.linear(args)) + } +} + +// impl PartialOrd for Layer +// where +// A: Activate + PartialEq, +// T: Float, +// { +// fn partial_cmp(&self, other: &Self) -> Option { +// self.position.partial_cmp(&other.position) +// } +// } + +// impl From for Layer +// where +// A: Activate + Default, +// S: IntoDimension +// T: Float, +// { +// fn from(features: LayerShape) -> Self { +// Self::new(features, LayerPosition::input()) +// } +// } + +impl From for Linear +where + A: Activate + Default, + T: Float, +{ + fn from(features: LayerShape) -> Self { + Self { + activator: A::default(), + features, + name: String::new(), + params: LayerParams::zeros(false, features), + } + } +} + +impl IntoIterator for Linear +where + A: Activate + Default, + T: Float, +{ + type Item = Node; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.params.into_iter() + } +} + +impl FromIterator> for Linear +where + A: Activate + Default, + T: Float, +{ + fn from_iter>>(nodes: I) -> Self { + let params = LayerParams::from_iter(nodes); + Self { + activator: A::default(), + features: *params.features(), + name: String::new(), + params, + } + } +} diff --git a/ml/linear/src/model/mod.rs b/ml/linear/src/model/mod.rs index a8eee288..d16397bb 100644 --- a/ml/linear/src/model/mod.rs +++ b/ml/linear/src/model/mod.rs @@ -4,7 +4,52 @@ */ //! # Linear Model //! -pub use self::{config::*, module::*}; +pub use self::{config::*, layer::*, module::*}; pub(crate) mod config; +pub(crate) mod layer; pub(crate) mod module; + +#[cfg(test)] +mod tests { + use super::*; + use crate::cmp::neurons::Node; + use crate::cmp::params::LayerShape; + use crate::core::prelude::linarr; + use crate::neural::prelude::{Forward, Softmax}; + use ndarray::prelude::Ix2; + + #[test] + fn test_linear() { + let (samples, inputs, outputs) = (20, 5, 3); + let features = LayerShape::new(inputs, outputs); + + let args = linarr::((samples, inputs)).unwrap(); + + let layer = Linear::::from(features).init(true); + + let pred = layer.forward(&args); + + assert_eq!(pred.dim(), (samples, outputs)); + + let nodes = (0..outputs) + .map(|_| Node::::new(inputs).init(true)) + .collect::>(); + let layer = Linear::::from_iter(nodes); + assert_eq!(layer.features(), &features); + } + + #[test] + fn test_linear_iter() { + let (_samples, inputs, outputs) = (20, 5, 3); + let features = LayerShape::new(inputs, outputs); + + let layer = Linear::::from(features).init(true); + + for node in layer.into_iter() { + assert!(node.is_biased()); + assert_eq!(node.features(), inputs); + assert_eq!(node.bias().as_ref().unwrap().dim(), ()); + } + } +} diff --git a/ml/linear/src/model/module.rs b/ml/linear/src/model/module.rs index 4fafa7cc..501538bc 100644 --- a/ml/linear/src/model/module.rs +++ b/ml/linear/src/model/module.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::core::params::{Biased, Weighted}; -use crate::neural::models::exp::{Module, ModuleParams}; +use crate::neural::models::exp::ModuleParams; use crate::neural::prelude::Forward; use ndarray::prelude::{Array2, NdFloat}; use num::Float; @@ -41,22 +41,22 @@ where } } -impl Module for LinearModel -where - T: NdFloat, -{ - fn name(&self) -> &str { - "LinearModel" - } +// impl Module> for LinearModel> +// where +// T: NdFloat, +// { +// fn name(&self) -> &str { +// "LinearModel" +// } - fn parameters(&self) -> &ModuleParams { - &self.params - } +// fn parameters(&self) -> &ModuleParams { +// &self.params +// } - fn parameters_mut(&mut self) -> &mut ModuleParams { - &mut self.params - } -} +// fn parameters_mut(&mut self) -> &mut ModuleParams { +// &mut self.params +// } +// } impl Biased for LinearModel where @@ -96,23 +96,6 @@ where } } -impl crate::neural::prelude::Weighted for LinearModel -where - T: NdFloat, -{ - fn weights(&self) -> &Array2 { - &self.params["weight"] - } - - fn weights_mut(&mut self) -> &mut Array2 { - self.params.get_mut("weight").unwrap() - } - - fn set_weights(&mut self, weights: Array2) { - self.params.insert("weight".to_string(), weights); - } -} - impl Forward> for LinearModel where T: NdFloat, diff --git a/ml/linear/src/params/store.rs b/ml/linear/src/params/store.rs deleted file mode 100644 index 84127685..00000000 --- a/ml/linear/src/params/store.rs +++ /dev/null @@ -1,202 +0,0 @@ -/* - Appellation: params - Contrib: FL03 -*/ -use super::LayerShape; -use crate::core::prelude::GenerateRandom; -// use crate::core::params::{Biased, Weighted}; -use crate::neural::prelude::{Biased, Features, Forward, Node, Weighted}; -use ndarray::linalg::Dot; -use ndarray::prelude::{Array, Array1, Array2, Axis, Dimension, Ix2, NdFloat}; -use ndarray_rand::rand_distr::uniform::SampleUniform; -use ndarray_rand::rand_distr::{Distribution, StandardNormal}; -use num::Float; -use serde::{Deserialize, Serialize}; -use std::ops; - -#[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] -pub struct LayerParams { - bias: Array1, - pub features: LayerShape, - weights: Array2, -} - -impl LayerParams -where - T: Float, -{ - pub fn new(features: LayerShape) -> Self { - Self { - bias: Array1::zeros(features.outputs()), - features, - weights: Array2::zeros(features.out_by_in()), - } - } - - pub fn features(&self) -> &LayerShape { - &self.features - } - - pub fn features_mut(&mut self) -> &mut LayerShape { - &mut self.features - } - - pub fn set_node(&mut self, idx: usize, node: Node) { - self.bias_mut() - .index_axis_mut(Axis(0), idx) - .assign(&node.bias()); - - self.weights_mut() - .index_axis_mut(Axis(0), idx) - .assign(&node.weights()); - } - - pub fn with_bias(mut self, bias: Array1) -> Self { - self.bias = bias; - self - } - - pub fn with_weights(mut self, weights: Array2) -> Self { - self.weights = weights; - self - } -} - -impl LayerParams -where - T: Float + 'static, -{ - pub fn update_with_gradient(&mut self, gamma: T, gradient: &Array2) { - self.weights_mut().scaled_add(-gamma, gradient); - } -} - -impl LayerParams -where - T: NdFloat, -{ - pub fn reset(&mut self) { - self.bias *= T::zero(); - self.weights *= T::zero(); - } -} - -impl LayerParams -where - T: Float + SampleUniform, - StandardNormal: Distribution, -{ - pub fn init(mut self, biased: bool) -> Self { - if biased { - self = self.init_bias(); - } - self.init_weight() - } - - pub fn init_bias(mut self) -> Self { - let dk = (T::one() / T::from(self.features().inputs()).unwrap()).sqrt(); - self.bias = Array1::uniform_between(dk, self.features().outputs()); - self - } - - pub fn init_weight(mut self) -> Self { - let dk = (T::one() / T::from(self.features().inputs()).unwrap()).sqrt(); - self.weights = Array2::uniform_between(dk, self.features().out_by_in()); - self - } -} - -impl Biased for LayerParams -where - T: Float, -{ - fn bias(&self) -> &Array1 { - &self.bias - } - - fn bias_mut(&mut self) -> &mut Array1 { - &mut self.bias - } - - fn set_bias(&mut self, bias: Array1) { - self.bias = bias; - } -} - -impl Weighted for LayerParams -where - T: Float, -{ - fn set_weights(&mut self, weights: Array2) { - self.weights = weights; - } - - fn weights(&self) -> &Array2 { - &self.weights - } - - fn weights_mut(&mut self) -> &mut Array2 { - &mut self.weights - } -} - -impl Features for LayerParams -where - T: Float, -{ - fn inputs(&self) -> usize { - self.features.inputs() - } - - fn outputs(&self) -> usize { - self.features.outputs() - } -} - -impl Forward> for LayerParams -where - D: Dimension, - T: NdFloat, - Array: Dot, Output = Array> + ops::Add, Output = Array>, -{ - type Output = Array; - - fn forward(&self, input: &Array) -> Self::Output { - input.dot(&self.weights().t().to_owned()) + self.bias().clone() - } -} - -impl IntoIterator for LayerParams -where - T: Float, -{ - type Item = Node; - type IntoIter = std::vec::IntoIter; - - fn into_iter(self) -> Self::IntoIter { - self.weights() - .axis_iter(Axis(0)) - .zip(self.bias().axis_iter(Axis(0))) - .map(|(w, b)| (w.to_owned(), b.to_owned()).into()) - .collect::>() - .into_iter() - } -} - -impl FromIterator> for LayerParams -where - T: Float, -{ - fn from_iter>>(nodes: I) -> Self { - let nodes = nodes.into_iter().collect::>(); - let mut iter = nodes.iter(); - let node = iter.next().unwrap(); - let shape = LayerShape::new(*node.features(), nodes.len()); - let mut params = LayerParams::new(shape); - params.set_node(0, node.clone()); - for (i, node) in iter.into_iter().enumerate() { - params.set_node(i + 1, node.clone()); - } - params - } -} diff --git a/ml/neural/Cargo.toml b/ml/neural/Cargo.toml index d70fd817..14022756 100644 --- a/ml/neural/Cargo.toml +++ b/ml/neural/Cargo.toml @@ -32,6 +32,7 @@ test = true concision-core = { path = "../../core", version = "0.1.12" } anyhow.workspace = true +itertools.workspace = true ndarray = { features = ["serde-1"], version = "0.15" } ndarray-rand.workspace = true ndarray-stats.workspace = true diff --git a/ml/neural/src/errors/error.rs b/ml/neural/src/errors/error.rs index e5e7ce73..1c00849b 100644 --- a/ml/neural/src/errors/error.rs +++ b/ml/neural/src/errors/error.rs @@ -4,16 +4,16 @@ */ use serde::{Deserialize, Serialize}; use smart_default::SmartDefault; -use strum::{Display, EnumIs, EnumIter, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, VariantNames}; #[derive( Clone, Debug, Deserialize, Display, + EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -21,17 +21,18 @@ use strum::{Display, EnumIs, EnumIter, EnumVariantNames}; PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum MlError { + Compute(ComputeError), Data(String), Dimension(String), #[default] Error(String), Network(NetworkError), - Process(ProcessError), } impl std::error::Error for MlError {} @@ -65,9 +66,9 @@ impl From for MlError { Debug, Deserialize, Display, + EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -75,6 +76,7 @@ impl From for MlError { PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] @@ -89,9 +91,9 @@ pub enum PredictError { Debug, Deserialize, Display, + EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -99,11 +101,12 @@ pub enum PredictError { PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] -pub enum ProcessError { +pub enum ComputeError { Arithmetic(String), #[default] Process(String), @@ -115,9 +118,9 @@ pub enum ProcessError { Debug, Deserialize, Display, + EnumCount, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, @@ -125,6 +128,7 @@ pub enum ProcessError { PartialOrd, Serialize, SmartDefault, + VariantNames, )] #[non_exhaustive] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/errors/mod.rs b/ml/neural/src/errors/mod.rs index 267dace8..9e51d373 100644 --- a/ml/neural/src/errors/mod.rs +++ b/ml/neural/src/errors/mod.rs @@ -2,7 +2,7 @@ Appellation: errors Contrib: FL03 */ -pub use self::{error::*, utils::*}; +pub use self::error::*; pub(crate) mod error; diff --git a/ml/neural/src/func/activate/activator.rs b/ml/neural/src/func/activate/activator.rs index d4af82e7..2a4f044f 100644 --- a/ml/neural/src/func/activate/activator.rs +++ b/ml/neural/src/func/activate/activator.rs @@ -40,7 +40,7 @@ where T: Clone, { pub fn linear() -> Self { - Self::new(Box::new(super::Linear::new())) + Self::new(Box::new(super::LinearActivation::new())) } } diff --git a/ml/neural/src/func/activate/linear.rs b/ml/neural/src/func/activate/linear.rs index 3836e9f6..0d6608e2 100644 --- a/ml/neural/src/func/activate/linear.rs +++ b/ml/neural/src/func/activate/linear.rs @@ -10,9 +10,9 @@ use serde::{Deserialize, Serialize}; #[derive( Clone, Copy, Debug, Default, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, )] -pub struct Linear; +pub struct LinearActivation; -impl Linear { +impl LinearActivation { pub fn new() -> Self { Self::default() } @@ -33,7 +33,7 @@ impl Linear { } pub fn linear(args: &T) -> T { - Linear::method()(args) + LinearActivation::method()(args) } pub fn method() -> fn(&T) -> T { @@ -45,7 +45,7 @@ impl Linear { } } -impl Gradient for Linear +impl Gradient for LinearActivation where D: Dimension, T: Clone + One, @@ -55,7 +55,7 @@ where } } -impl Fn<(&T,)> for Linear +impl Fn<(&T,)> for LinearActivation where T: Clone, { @@ -64,7 +64,7 @@ where } } -impl FnMut<(&T,)> for Linear +impl FnMut<(&T,)> for LinearActivation where T: Clone, { @@ -73,7 +73,7 @@ where } } -impl FnOnce<(&T,)> for Linear +impl FnOnce<(&T,)> for LinearActivation where T: Clone, { diff --git a/ml/neural/src/func/activate/mod.rs b/ml/neural/src/func/activate/mod.rs index 9bec62b9..729b9357 100644 --- a/ml/neural/src/func/activate/mod.rs +++ b/ml/neural/src/func/activate/mod.rs @@ -140,7 +140,7 @@ mod tests { let exp = array![0.0, 1.0, 2.0]; let args = array![0.0, 1.0, 2.0]; - assert_eq!(Linear::new().activate(&args), exp); - assert_eq!(Linear(&args), exp); + assert_eq!(LinearActivation::new().activate(&args), exp); + assert_eq!(LinearActivation(&args), exp); } } diff --git a/ml/neural/src/func/block.rs b/ml/neural/src/func/block.rs index 02d6669c..24974478 100644 --- a/ml/neural/src/func/block.rs +++ b/ml/neural/src/func/block.rs @@ -2,7 +2,7 @@ Appellation: block Contrib: FL03 */ -use crate::func::activate::{Activate, Linear, ReLU, Softmax}; +use crate::func::activate::{Activate, LinearActivation, ReLU, Softmax}; use num::Float; use std::marker::PhantomData; @@ -15,7 +15,7 @@ pub struct FuncBlock { method: Vec T>, } -pub struct FFNBlock +pub struct FFNBlock where I: Activate, H: Activate, diff --git a/ml/neural/src/func/loss/kinds.rs b/ml/neural/src/func/loss/kinds.rs index 676dd32b..92bddbc8 100644 --- a/ml/neural/src/func/loss/kinds.rs +++ b/ml/neural/src/func/loss/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -12,16 +12,17 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; Default, Deserialize, Display, + EnumCount, EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/func/loss/mod.rs b/ml/neural/src/func/loss/mod.rs index b0883478..a8c79cc2 100644 --- a/ml/neural/src/func/loss/mod.rs +++ b/ml/neural/src/func/loss/mod.rs @@ -8,7 +8,7 @@ //! Overall, neural network models aim to minimize the average loss by adjusting certain hyperparameters, //! the weights and biases. -pub use self::{kinds::*, utils::*}; +pub use self::kinds::*; pub(crate) mod kinds; diff --git a/ml/neural/src/func/prop/mod.rs b/ml/neural/src/func/prop/mod.rs index 4510df4a..92ae0562 100644 --- a/ml/neural/src/func/prop/mod.rs +++ b/ml/neural/src/func/prop/mod.rs @@ -5,7 +5,7 @@ //! # Propagation //! //! This module describes the propagation of data through a neural network. -pub use self::{modes::*, results::*, utils::*}; +pub use self::{modes::*, results::*}; pub(crate) mod modes; pub(crate) mod results; diff --git a/ml/neural/src/func/prop/modes.rs b/ml/neural/src/func/prop/modes.rs index 3860b43e..cb58ad73 100644 --- a/ml/neural/src/func/prop/modes.rs +++ b/ml/neural/src/func/prop/modes.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -15,13 +15,13 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/layers/cmp/kinds.rs b/ml/neural/src/layers/cmp/kinds.rs index 703c429b..9fa587bc 100644 --- a/ml/neural/src/layers/cmp/kinds.rs +++ b/ml/neural/src/layers/cmp/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -15,13 +15,13 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/layers/exp/layer.rs b/ml/neural/src/layers/exp/layer.rs deleted file mode 100644 index b2724850..00000000 --- a/ml/neural/src/layers/exp/layer.rs +++ /dev/null @@ -1,180 +0,0 @@ -/* - Appellation: model - Contrib: FL03 -*/ -use super::LayerConfig; -use crate::func::activate::{Activate, Activator}; -use crate::layers::LayerShape; -use crate::prelude::{Forward, ParamGroup, Parameterized, Params}; -use ndarray::prelude::{Array2, Dimension, Ix1, Ix2, NdFloat}; -use ndarray_rand::rand_distr::uniform::SampleUniform; -use ndarray_rand::rand_distr::{Distribution, StandardNormal}; -use num::Float; - -pub struct Layer -where - D: Dimension, - T: Float, -{ - activator: Activator, - config: LayerConfig, - params: ParamGroup, -} - -impl Layer -where - T: Float, -{ - pub fn new(activator: impl Activate + 'static, config: LayerConfig) -> Self { - let params = ParamGroup::new(*config.features()); - Self { - activator: Activator::new(Box::new(activator)), - config, - params, - } - } - - pub fn activator(&self) -> &Activator { - &self.activator - } - - pub fn config(&self) -> &LayerConfig { - &self.config - } - - pub fn config_mut(&mut self) -> &mut LayerConfig { - &mut self.config - } - - // pub fn set_node(&mut self, idx: usize, node: &Node) { - // self.params - // .weights_mut() - // .slice_mut(s![idx, ..]) - // .assign(&node.weights()); - // } - - // pub fn validate_layer(&self, other: &Self, next: bool) -> bool { - // if next { - // return self.features().inputs() == other.features().outputs(); - // } - // self.features().outputs() == other.features().inputs() - // } -} - -impl Layer -where - T: Float + 'static, -{ - pub fn apply_gradient(&mut self, gamma: T, gradient: F) - where - F: Fn(&Array2) -> Array2, - { - let grad = gradient(&self.params.weights()); - self.params.weights_mut().scaled_add(-gamma, &grad); - } - - pub fn update_with_gradient(&mut self, gamma: T, grad: &Array2) { - self.params.weights_mut().scaled_add(-gamma, grad); - } -} - -impl Layer -where - T: NdFloat, -{ - pub fn linear(&self, args: &Array2) -> Array2 { - args.dot(&self.params.weights().t()) + self.params.bias() - } -} - -impl Layer -where - T: Float + SampleUniform, - StandardNormal: Distribution, -{ - pub fn init(mut self, biased: bool) -> Self { - self.params = self.params.init(biased); - self - } -} - -impl Forward> for Layer -where - T: NdFloat, -{ - type Output = Array2; - - fn forward(&self, args: &Array2) -> Self::Output { - self.activator.activate(&self.linear(args)) - } -} - -impl Parameterized for Layer -where - D: Dimension, - T: Float, -{ - type Features = D; - type Params = ParamGroup; - - fn features(&self) -> &D { - self.params().features() - } - - fn features_mut(&mut self) -> &mut D { - self.params_mut().features_mut() - } - - fn params(&self) -> &Self::Params { - &self.params - } - - fn params_mut(&mut self) -> &mut Self::Params { - &mut self.params - } -} - -// impl PartialOrd for Layer -// where -// A: Activate + PartialEq, -// T: Float, -// { -// fn partial_cmp(&self, other: &Self) -> Option { -// self.position.partial_cmp(&other.position) -// } -// } - -impl From for Layer -where - T: Float + 'static, -{ - fn from(features: LayerShape) -> Self { - Self::new(Activator::linear(), features.into()) - } -} - -// impl IntoIterator for Layer -// where -// T: Float, -// { -// type Item = Node; -// type IntoIter = std::vec::IntoIter; - -// fn into_iter(self) -> Self::IntoIter { -// self.params.into_iter() -// } -// } - -// impl FromIterator> for Layer -// where -// T: Float, -// { -// fn from_iter>>(nodes: I) -> Self { -// let params = LayerParams::from_iter(nodes); -// Self { -// activator: Activator::linear(), -// config: LayerConfig::from(*params.features()), -// params, -// } -// } -// } diff --git a/ml/neural/src/layers/exp/mod.rs b/ml/neural/src/layers/exp/mod.rs index 89669e8b..444368fc 100644 --- a/ml/neural/src/layers/exp/mod.rs +++ b/ml/neural/src/layers/exp/mod.rs @@ -3,10 +3,9 @@ Contrib: FL03 */ //! # Experimental Layers -pub use self::{config::*, layer::*, sublayer::*, wrapper::*}; +pub use self::{config::*, sublayer::*, wrapper::*}; pub(crate) mod config; -pub(crate) mod layer; pub(crate) mod sublayer; pub(crate) mod wrapper; diff --git a/ml/neural/src/layers/exp/sublayer.rs b/ml/neural/src/layers/exp/sublayer.rs index 4dc297c5..49f4df8b 100644 --- a/ml/neural/src/layers/exp/sublayer.rs +++ b/ml/neural/src/layers/exp/sublayer.rs @@ -3,14 +3,14 @@ Contrib: FL03 */ use crate::layers::Layer; -use crate::prelude::{Activate, Forward, LayerNorm, Linear}; +use crate::prelude::{Activate, Forward, LayerNorm, LinearActivation}; use ndarray::prelude::{Array2, NdFloat}; use num::{Float, FromPrimitive}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -pub struct Sublayer +pub struct Sublayer where A: Activate, T: Float, diff --git a/ml/neural/src/layers/layer.rs b/ml/neural/src/layers/layer.rs index 17c8996e..92002e80 100644 --- a/ml/neural/src/layers/layer.rs +++ b/ml/neural/src/layers/layer.rs @@ -3,8 +3,8 @@ Contrib: FL03 */ use super::{LayerParams, LayerShape}; -use crate::func::activate::{Activate, Gradient, Linear}; -use crate::prelude::{Features, Forward, Node, Parameterized, Params, Perceptron}; +use crate::func::activate::{Activate, Gradient, LinearActivation}; +use crate::prelude::{Features, Forward, Node, Perceptron}; use ndarray::prelude::{Array2, Ix1, NdFloat}; use ndarray_rand::rand_distr::uniform::SampleUniform; use ndarray_rand::rand_distr::{Distribution, StandardNormal}; @@ -13,7 +13,7 @@ use num::{Float, Signed}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] -pub struct Layer +pub struct Layer where A: Activate, T: Float, @@ -58,10 +58,26 @@ where &self.activator } + pub fn features(&self) -> &LayerShape { + &self.features + } + + pub fn features_mut(&mut self) -> &mut LayerShape { + &mut self.features + } + pub fn name(&self) -> &str { &self.name } + pub fn params(&self) -> &LayerParams { + &self.params + } + + pub fn params_mut(&mut self) -> &mut LayerParams { + &mut self.params + } + pub fn set_name(&mut self, name: impl ToString) { self.name = name.to_string(); } @@ -205,31 +221,6 @@ where } } -impl Parameterized for Layer -where - A: Activate, - T: Float, -{ - type Features = LayerShape; - type Params = LayerParams; - - fn features(&self) -> &LayerShape { - &self.features - } - - fn features_mut(&mut self) -> &mut LayerShape { - &mut self.features - } - - fn params(&self) -> &LayerParams { - &self.params - } - - fn params_mut(&mut self) -> &mut LayerParams { - &mut self.params - } -} - // impl PartialOrd for Layer // where // A: Activate + PartialEq, diff --git a/ml/neural/src/layers/mod.rs b/ml/neural/src/layers/mod.rs index cf28b291..5a43db19 100644 --- a/ml/neural/src/layers/mod.rs +++ b/ml/neural/src/layers/mod.rs @@ -11,6 +11,7 @@ pub(crate) mod params; pub(crate) mod stack; pub mod exp; +pub mod seq; use crate::prelude::{Activate, ActivateDyn, Forward, Node}; use ndarray::prelude::{Array2, Ix2}; @@ -74,8 +75,7 @@ pub(crate) mod utils { mod tests { use super::*; use crate::core::prelude::linarr; - use crate::func::activate::Softmax; - use crate::prelude::{Biased, Forward, Node, Parameterized}; + use crate::prelude::{Forward, Node, Softmax}; use ndarray::prelude::Ix2; #[test] @@ -106,8 +106,9 @@ mod tests { let layer = Layer::::from(features).init(true); for node in layer.into_iter() { - assert_eq!(node.features(), &inputs); - assert_eq!(node.bias().dim(), ()); + assert!(node.is_biased()); + assert_eq!(node.features(), inputs); + assert_eq!(node.bias().as_ref().unwrap().dim(), ()); } } } diff --git a/ml/neural/src/layers/params.rs b/ml/neural/src/layers/params.rs index d50917b3..e42757b5 100644 --- a/ml/neural/src/layers/params.rs +++ b/ml/neural/src/layers/params.rs @@ -4,9 +4,9 @@ */ use super::LayerShape; use crate::core::prelude::GenerateRandom; -use crate::prelude::{Biased, Features, Forward, Node, Weighted}; +use crate::prelude::{Features, Forward, Node}; use ndarray::linalg::Dot; -use ndarray::prelude::{Array, Array1, Array2, Axis, Dimension, Ix2, NdFloat}; +use ndarray::prelude::{Array, Array1, Array2, Axis, Dimension, NdFloat}; use ndarray_rand::rand_distr::uniform::SampleUniform; use ndarray_rand::rand_distr::{Distribution, StandardNormal}; use num::Float; @@ -15,7 +15,7 @@ use std::ops; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct LayerParams { - bias: Array1, + bias: Option>, pub features: LayerShape, weights: Array2, } @@ -25,13 +25,34 @@ where T: Float, { pub fn new(features: LayerShape) -> Self { + Self::create(false, features) + } + + pub fn biased(features: LayerShape) -> Self { + Self::create(true, features) + } + + pub fn create(biased: bool, features: LayerShape) -> Self { + let bias = if biased { + Some(Array1::zeros(features.outputs())) + } else { + None + }; Self { - bias: Array1::zeros(features.outputs()), + bias, features, weights: Array2::zeros(features.out_by_in()), } } + pub fn bias(&self) -> &Option> { + &self.bias + } + + pub fn bias_mut(&mut self) -> &mut Option> { + &mut self.bias + } + pub fn features(&self) -> &LayerShape { &self.features } @@ -40,17 +61,46 @@ where &mut self.features } + pub fn is_biased(&self) -> bool { + self.bias.is_some() + } + + pub fn set_bias(&mut self, bias: Option>) { + self.bias = bias; + } + pub fn set_node(&mut self, idx: usize, node: Node) { - self.bias_mut() - .index_axis_mut(Axis(0), idx) - .assign(&node.bias()); + if let Some(bias) = node.bias() { + if !self.is_biased() { + let mut tmp = Array1::zeros(self.features().outputs()); + tmp.index_axis_mut(Axis(0), idx).assign(bias); + self.bias = Some(tmp); + } + self.bias + .as_mut() + .unwrap() + .index_axis_mut(Axis(0), idx) + .assign(bias); + } self.weights_mut() .index_axis_mut(Axis(0), idx) .assign(&node.weights()); } - pub fn with_bias(mut self, bias: Array1) -> Self { + pub fn set_weights(&mut self, weights: Array2) { + self.weights = weights; + } + + pub fn weights(&self) -> &Array2 { + &self.weights + } + + pub fn weights_mut(&mut self) -> &mut Array2 { + &mut self.weights + } + + pub fn with_bias(mut self, bias: Option>) -> Self { self.bias = bias; self } @@ -75,7 +125,9 @@ where T: NdFloat, { pub fn reset(&mut self) { - self.bias *= T::zero(); + if let Some(bias) = self.bias() { + self.bias = Some(Array1::zeros(bias.dim())); + } self.weights *= T::zero(); } } @@ -94,7 +146,7 @@ where pub fn init_bias(mut self) -> Self { let dk = (T::one() / T::from(self.features().inputs()).unwrap()).sqrt(); - self.bias = Array1::uniform_between(dk, self.features().outputs()); + self.bias = Some(Array1::uniform_between(dk, self.features().outputs())); self } @@ -105,40 +157,6 @@ where } } -impl Biased for LayerParams -where - T: Float, -{ - fn bias(&self) -> &Array1 { - &self.bias - } - - fn bias_mut(&mut self) -> &mut Array1 { - &mut self.bias - } - - fn set_bias(&mut self, bias: Array1) { - self.bias = bias; - } -} - -impl Weighted for LayerParams -where - T: Float, -{ - fn set_weights(&mut self, weights: Array2) { - self.weights = weights; - } - - fn weights(&self) -> &Array2 { - &self.weights - } - - fn weights_mut(&mut self) -> &mut Array2 { - &mut self.weights - } -} - impl Features for LayerParams where T: Float, @@ -161,7 +179,11 @@ where type Output = Array; fn forward(&self, input: &Array) -> Self::Output { - input.dot(&self.weights().t().to_owned()) + self.bias().clone() + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + return input.dot(&w) + bias.clone(); + } + input.dot(&w) } } @@ -173,10 +195,18 @@ where type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { + if let Some(bias) = self.bias() { + return self + .weights() + .axis_iter(Axis(0)) + .zip(bias.axis_iter(Axis(0))) + .map(|(w, b)| (w.to_owned(), b.to_owned()).into()) + .collect::>() + .into_iter(); + } self.weights() .axis_iter(Axis(0)) - .zip(self.bias().axis_iter(Axis(0))) - .map(|(w, b)| (w.to_owned(), b.to_owned()).into()) + .map(|w| (w.to_owned(), None).into()) .collect::>() .into_iter() } @@ -190,8 +220,8 @@ where let nodes = nodes.into_iter().collect::>(); let mut iter = nodes.iter(); let node = iter.next().unwrap(); - let shape = LayerShape::new(*node.features(), nodes.len()); - let mut params = LayerParams::new(shape); + let shape = LayerShape::new(node.features(), nodes.len()); + let mut params = LayerParams::create(true, shape); params.set_node(0, node.clone()); for (i, node) in iter.into_iter().enumerate() { params.set_node(i + 1, node.clone()); diff --git a/ml/neural/src/layers/seq/mod.rs b/ml/neural/src/layers/seq/mod.rs new file mode 100644 index 00000000..74a592d6 --- /dev/null +++ b/ml/neural/src/layers/seq/mod.rs @@ -0,0 +1,11 @@ +/* + Appellation: seq + Contrib: FL03 +*/ + +pub use self::sequential::*; + +pub(crate) mod sequential; + +#[cfg(test)] +mod tests {} diff --git a/ml/neural/src/layers/seq/sequential.rs b/ml/neural/src/layers/seq/sequential.rs new file mode 100644 index 00000000..e876a330 --- /dev/null +++ b/ml/neural/src/layers/seq/sequential.rs @@ -0,0 +1,172 @@ +/* + Appellation: sequential + Contrib: FL03 +*/ +use crate::prelude::Forward; +use serde::{Deserialize, Serialize}; + +pub struct Sequential { + layers: Vec>>, +} + +impl Sequential { + pub fn new() -> Self { + Self { layers: Vec::new() } + } + + pub fn include(mut self, layer: L) -> Self + where + L: Forward + 'static, + { + self.layers.push(Box::new(layer)); + self + } + + pub fn push(&mut self, layer: L) + where + L: Forward + 'static, + { + self.layers.push(Box::new(layer)); + } +} + +impl AsRef<[Box>]> for Sequential { + fn as_ref(&self) -> &[Box>] { + &self.layers + } +} + +impl AsMut<[Box>]> for Sequential { + fn as_mut(&mut self) -> &mut [Box>] { + &mut self.layers + } +} + +impl Extend>> for Sequential { + fn extend>>>(&mut self, iter: I) { + self.layers.extend(iter); + } +} + +impl Forward for Sequential +where + T: Clone, +{ + type Output = T; + + fn forward(&self, input: &T) -> Self::Output { + let mut output = input.clone(); + for layer in &self.layers { + output = layer.forward(&output); + } + output + } +} + +impl FromIterator>> for Sequential { + fn from_iter>>>(iter: I) -> Self { + Self { + layers: Vec::from_iter(iter), + } + } +} + +impl IntoIterator for Sequential { + type Item = Box>; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.layers.into_iter() + } +} + +impl Clone for Sequential +where + Box>: Clone, +{ + fn clone(&self) -> Self { + Self { + layers: self.layers.clone(), + } + } +} + +impl Default for Sequential { + fn default() -> Self { + Self::new() + } +} + +impl std::fmt::Debug for Sequential +where + Box>: std::fmt::Debug, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Sequential") + .field("layers", &self.layers) + .finish() + } +} + +impl PartialEq for Sequential +where + Box>: PartialEq, +{ + fn eq(&self, other: &Self) -> bool { + self.layers == other.layers + } +} + +impl Eq for Sequential where Box>: Eq {} + +impl std::hash::Hash for Sequential +where + Box>: std::hash::Hash, +{ + fn hash(&self, state: &mut H) { + self.layers.hash(state); + } +} + +impl PartialOrd for Sequential +where + Box>: PartialOrd, +{ + fn partial_cmp(&self, other: &Self) -> Option { + self.layers.partial_cmp(&other.layers) + } +} + +impl Ord for Sequential +where + Box>: Ord, +{ + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.layers.cmp(&other.layers) + } +} + +impl<'a, T> Deserialize<'a> for Sequential +where + Box>: Deserialize<'a>, +{ + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'a>, + { + let layers = Vec::>>::deserialize(deserializer)?; + Ok(Self { layers }) + } +} + +impl Serialize for Sequential +where + Box>: Serialize, +{ + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.layers.serialize(serializer) + } +} diff --git a/ml/neural/src/layers/stack.rs b/ml/neural/src/layers/stack.rs index 60eb1a41..e5d18792 100644 --- a/ml/neural/src/layers/stack.rs +++ b/ml/neural/src/layers/stack.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::layers::{Layer, LayerShape}; -use crate::prelude::{Activate, Features, Linear, Parameterized}; +use crate::prelude::{Activate, Features, LinearActivation}; use ndarray_rand::rand_distr::uniform::SampleUniform; use ndarray_rand::rand_distr::{Distribution, StandardNormal}; use num::Float; @@ -30,7 +30,7 @@ where /// A [Stack] is a collection of [Layer]s, typically used to construct the hidden /// layers of a deep neural network. #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] -pub struct Stack +pub struct Stack where A: Activate, T: Float, diff --git a/ml/neural/src/lib.rs b/ml/neural/src/lib.rs index 805da18e..b3372643 100644 --- a/ml/neural/src/lib.rs +++ b/ml/neural/src/lib.rs @@ -30,7 +30,6 @@ pub mod prelude { pub use crate::specs::*; pub use crate::utils::*; - pub use crate::cmp::*; pub use crate::errors::*; pub use crate::func::{activate::*, loss::*, prop::*, rms::*}; pub use crate::layers::*; diff --git a/ml/neural/src/models/exp/mod.rs b/ml/neural/src/models/exp/mod.rs index e60e5f74..c73329b1 100644 --- a/ml/neural/src/models/exp/mod.rs +++ b/ml/neural/src/models/exp/mod.rs @@ -8,31 +8,31 @@ pub use self::{modules::*, store::*}; pub(crate) mod modules; pub(crate) mod store; -use crate::prelude::Forward; -use ndarray::prelude::Array2; -use num::Float; +// use crate::prelude::Predict; +// use ndarray::prelude::Array2; +// use num::Float; -pub trait Model: Forward> -where - T: Float, -{ - type Config; +// pub trait Model: Predict> +// where +// T: Float, +// { +// type Config; - fn name(&self) -> &str; +// fn name(&self) -> &str; - fn modules(&self) -> &Vec>>; +// fn modules(&self) -> &Vec>>; - fn modules_mut(&mut self) -> &mut Vec>>; +// fn modules_mut(&mut self) -> &mut Vec>>; - fn register_module(&mut self, module: Box>) -> &mut Self { - self.modules_mut().push(module); - self - } +// fn register_module(&mut self, module: Box>) -> &mut Self { +// self.modules_mut().push(module); +// self +// } - fn get_module(&self, name: &str) -> Option<&Box>> { - self.modules().iter().find(|m| m.name() == name) - } -} +// fn get_module(&self, name: &str) -> Option<&Box>> { +// self.modules().iter().find(|m| m.name() == name) +// } +// } #[cfg(test)] mod tests {} diff --git a/ml/neural/src/models/exp/modules.rs b/ml/neural/src/models/exp/modules.rs index 64cddbea..21071fc1 100644 --- a/ml/neural/src/models/exp/modules.rs +++ b/ml/neural/src/models/exp/modules.rs @@ -4,32 +4,29 @@ */ //! # Model //! -use crate::prelude::Forward; +use crate::prelude::Predict; use ndarray::prelude::Array2; -use num::Float; use std::collections::HashMap; pub type ModuleParams = HashMap>; -pub struct M(Box>>); +// pub struct M(Box>); -pub trait Module: Forward> -where - T: Float, -{ +pub trait Store { + fn get(&self, name: &str) -> Option<&V>; + fn get_mut(&mut self, name: &str) -> Option<&mut V>; + fn insert(&mut self, name: K, value: V) -> Option; + fn remove(&mut self, name: &str) -> Option; +} + +pub trait Module: Predict> { fn get_param(&self, name: &str) -> Option<&Array2> { self.parameters().get(name) } fn name(&self) -> &str; - fn parameters(&self) -> &ModuleParams; - - fn parameters_mut(&mut self) -> &mut ModuleParams; -} + fn parameters(&self) -> &HashMap>; -pub trait ModuleExt: Module -where - T: Float, -{ + fn parameters_mut(&mut self) -> &mut HashMap>; } diff --git a/ml/neural/src/models/model.rs b/ml/neural/src/models/model.rs index edc49384..025e11e6 100644 --- a/ml/neural/src/models/model.rs +++ b/ml/neural/src/models/model.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use super::{ModelConfig, ModelParams}; -use crate::prelude::{Forward, Gradient, LayerParams, Weighted}; +use crate::prelude::{Forward, Gradient, LayerParams}; use ndarray::linalg::Dot; use ndarray::prelude::{Array, Array1, Array2, Dimension, NdFloat}; use ndarray_rand::rand_distr::uniform::SampleUniform; diff --git a/ml/neural/src/models/modes.rs b/ml/neural/src/models/modes.rs index 08192212..964e9ab7 100644 --- a/ml/neural/src/models/modes.rs +++ b/ml/neural/src/models/modes.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames, VariantNames}; +use strum::{Display, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -15,13 +15,13 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames, VariantName EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/neurons/node.rs b/ml/neural/src/neurons/node.rs index 1f9dd4bc..c8d0be59 100644 --- a/ml/neural/src/neurons/node.rs +++ b/ml/neural/src/neurons/node.rs @@ -4,50 +4,88 @@ */ use crate::core::prelude::GenerateRandom; -use crate::prelude::{Biased, Forward, Weighted}; +use crate::prelude::Forward; use ndarray::linalg::Dot; -use ndarray::prelude::{Array, Array0, Array1, Array2, Dimension, Ix1, NdFloat}; -use ndarray::RemoveAxis; +use ndarray::prelude::{Array, Array0, Array1, Array2, Dimension, NdFloat}; +use ndarray::{RemoveAxis, ScalarOperand}; use ndarray_rand::rand_distr::uniform::SampleUniform; use ndarray_rand::rand_distr::{Distribution, StandardNormal}; -use num::{Float, FromPrimitive}; +use num::{Float, Num}; use std::ops; #[derive(Clone, Debug, PartialEq)] -pub struct Node -where - T: Float, -{ - bias: Array0, +pub struct Node { + bias: Option>, features: usize, weights: Array1, } impl Node where - T: Float, + T: Clone + Num, { - pub fn new(features: usize) -> Self { + pub fn create(biased: bool, features: usize) -> Self { + let bias = if biased { + Some(Array0::zeros(())) + } else { + None + }; Self { - bias: Array0::zeros(()), + bias, features, weights: Array1::zeros(features), } } - pub fn features(&self) -> &usize { - &self.features + pub fn biased(features: usize) -> Self { + Self::create(true, features) } - pub fn features_mut(&mut self) -> &mut usize { - &mut self.features + pub fn new(features: usize) -> Self { + Self::create(false, features) + } +} +impl Node +where + T: Num, +{ + pub fn bias(&self) -> Option<&Array0> { + self.bias.as_ref() + } + + pub fn bias_mut(&mut self) -> Option<&mut Array0> { + self.bias.as_mut() + } + + pub fn features(&self) -> usize { + self.features + } + + pub fn is_biased(&self) -> bool { + self.bias.is_some() + } + + pub fn set_bias(&mut self, bias: Option>) { + self.bias = bias; } pub fn set_features(&mut self, features: usize) { self.features = features; } - pub fn with_bias(mut self, bias: Array0) -> Self { + pub fn set_weights(&mut self, weights: Array1) { + self.weights = weights; + } + + pub fn weights(&self) -> &Array1 { + &self.weights + } + + pub fn weights_mut(&mut self) -> &mut Array1 { + &mut self.weights + } + + pub fn with_bias(mut self, bias: Option>) -> Self { self.bias = bias; self } @@ -63,6 +101,20 @@ where } } +impl Node +where + T: Num + ScalarOperand + 'static, + Array2: Dot, Output = Array1>, +{ + pub fn linear(&self, data: &Array2) -> Array1 { + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + data.dot(&w) + bias + } else { + data.dot(&w) + } + } +} impl Node where T: Float + SampleUniform, @@ -77,7 +129,7 @@ where pub fn init_bias(mut self) -> Self { let dk = (T::one() / T::from(self.features).unwrap()).sqrt(); - self.bias = Array0::uniform_between(dk, ()); + self.bias = Some(Array0::uniform_between(dk, ())); self } @@ -91,8 +143,7 @@ where impl Node where - T: FromPrimitive + NdFloat, - Self: Weighted, + T: NdFloat, { pub fn apply_gradient(&mut self, gamma: T, gradient: G) where @@ -106,89 +157,25 @@ where where A: Fn(&Array1) -> Array1, { - activator(&self.linear(data)) - } -} -impl Node -where - T: FromPrimitive + NdFloat, - Self: Biased + Weighted, -{ - pub fn linear(&self, data: &Array2) -> Array1 { - data.dot(&self.weights().t()) + self.bias() + activator(&self.forward(data)) } } impl Forward> for Node where - Self: Biased + Weighted, D: Dimension + RemoveAxis, - T: FromPrimitive + NdFloat, + T: NdFloat, Array: Dot, Output = Array>, Array: ops::Add, Output = Array>, { type Output = Array; fn forward(&self, data: &Array) -> Self::Output { - data.dot(&self.weights().t().to_owned()) + self.bias().clone() - } -} - -// impl Forward> for Node -// where -// Self: Biased + Weighted, -// T: FromPrimitive + NdFloat, -// { -// type Output = T; - -// fn forward(&self, data: &Array1) -> Self::Output { -// data.dot(&self.weights().t()) + self.bias().first().unwrap().clone() -// } -// } - -// impl Forward> for Node -// where -// Self: Biased + Weighted, -// T: FromPrimitive + NdFloat, -// { -// type Output = Array1; - -// fn forward(&self, data: &Array2) -> Self::Output { -// data.dot(&self.weights().t()) + self.bias().clone() -// } -// } - -impl Biased for Node -where - T: Float, -{ - fn bias(&self) -> &Array0 { - &self.bias - } - - fn bias_mut(&mut self) -> &mut Array0 { - &mut self.bias - } - - fn set_bias(&mut self, bias: Array0) { - self.bias = bias; - } -} - -impl Weighted for Node -where - T: Float, -{ - fn set_weights(&mut self, weights: Array1) { - self.weights = weights; - } - - fn weights(&self) -> &Array1 { - &self.weights - } - - fn weights_mut(&mut self) -> &mut Array1 { - &mut self.weights + let w = self.weights().t().to_owned(); + if let Some(bias) = self.bias() { + return data.dot(&w) + bias.clone(); + } + data.dot(&w) } } @@ -202,7 +189,7 @@ where { let weights = Array1::::from_iter(iter); Self { - bias: Array0::zeros(()), + bias: None, features: weights.len(), weights, } @@ -215,7 +202,7 @@ where { fn from((weights, bias): (Array1, Array0)) -> Self { Self { - bias, + bias: Some(bias), features: weights.len(), weights, } @@ -228,14 +215,45 @@ where { fn from((weights, bias): (Array1, T)) -> Self { Self { - bias: Array0::ones(()) * bias, + bias: Some(Array0::ones(()) * bias), + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, Option)> for Node +where + T: Float + ScalarOperand, +{ + fn from((weights, bias): (Array1, Option)) -> Self { + let bias = if let Some(b) = bias { + Some(Array0::ones(()) * b) + } else { + None + }; + Self { + bias, + features: weights.len(), + weights, + } + } +} + +impl From<(Array1, Option>)> for Node +where + T: Float, +{ + fn from((weights, bias): (Array1, Option>)) -> Self { + Self { + bias, features: weights.len(), weights, } } } -impl From> for (Array1, Array0) +impl From> for (Array1, Option>) where T: Float, { diff --git a/ml/neural/src/neurons/perceptron.rs b/ml/neural/src/neurons/perceptron.rs index f636988b..8626eee3 100644 --- a/ml/neural/src/neurons/perceptron.rs +++ b/ml/neural/src/neurons/perceptron.rs @@ -3,8 +3,7 @@ Contrib: FL03 */ use super::Node; -use crate::func::activate::{Activate, Linear}; -use crate::prelude::{Forward, Parameterized, ParameterizedExt, Weighted}; +use crate::prelude::{Activate, Forward, LinearActivation}; use ndarray::prelude::{Array0, Array1, Array2, Ix1, NdFloat}; use ndarray_rand::rand_distr::uniform::SampleUniform; use ndarray_rand::rand_distr::{Distribution, StandardNormal}; @@ -12,7 +11,7 @@ use num::Float; /// Artificial Neuron #[derive(Clone, Debug, PartialEq)] -pub struct Perceptron +pub struct Perceptron where A: Activate, T: Float, @@ -26,6 +25,16 @@ where A: Activate, T: Float, { + pub fn new(features: usize) -> Self + where + A: Default, + { + Self { + activation: A::default(), + node: Node::create(false, features), + } + } + pub fn node(&self) -> &Node { &self.node } @@ -34,11 +43,23 @@ where &mut self.node } + pub fn features(&self) -> usize { + self.node.features() + } + + pub fn params(&self) -> &Node { + &self.node + } + + pub fn params_mut(&mut self) -> &mut Node { + &mut self.node + } + pub fn rho(&self) -> &A { &self.activation } - pub fn with_bias(mut self, bias: Array0) -> Self { + pub fn with_bias(mut self, bias: Option>) -> Self { self.node = self.node.with_bias(bias); self } @@ -59,18 +80,17 @@ where self.node = self.node.with_weights(weights); self } -} -impl Perceptron -where - T: NdFloat, - A: Activate + Default, -{ - pub fn new(features: usize) -> Self { - Self { - activation: A::default(), - node: Node::new(features), - } + pub fn weights(&self) -> &Array1 { + self.node.weights() + } + + pub fn weights_mut(&mut self) -> &mut Array1 { + self.node.weights_mut() + } + + pub fn set_weights(&mut self, weights: Array1) { + self.node.set_weights(weights); } } @@ -116,77 +136,6 @@ where } } -// impl Biased for Neuron -// where -// T: Float, -// A: Activate, -// { -// fn bias(&self) -> &Array0 { -// self.node.bias() -// } - -// fn bias_mut(&mut self) -> &mut Array0 { -// self.node.bias_mut() -// } - -// fn set_bias(&mut self, bias: Array0) { -// self.node.set_bias(bias); -// } -// } - -impl Weighted for Perceptron -where - T: Float, - A: Activate, -{ - fn weights(&self) -> &Array1 { - self.node.weights() - } - - fn weights_mut(&mut self) -> &mut Array1 { - self.node.weights_mut() - } - - fn set_weights(&mut self, weights: Array1) { - self.node.set_weights(weights); - } -} - -impl Parameterized for Perceptron -where - A: Activate, - T: Float, -{ - type Features = usize; - - type Params = Node; - - fn features(&self) -> &Self::Features { - self.node.features() - } - - fn features_mut(&mut self) -> &mut Self::Features { - self.node.features_mut() - } - - fn params(&self) -> &Self::Params { - &self.node - } - - fn params_mut(&mut self) -> &mut Self::Params { - &mut self.node - } -} - -// impl Forward> for Neuron { -// type Output = f64; - -// fn forward(&self, args: &Array1) -> Self::Output { -// self.rho().activate(args.dot(&self.weights().t().to_owned()) + self.bias) -// } - -// } - impl Forward> for Perceptron where T: NdFloat, @@ -195,7 +144,7 @@ where type Output = Array1; fn forward(&self, args: &Array2) -> Self::Output { - let linstep = args.dot(&self.node().weights().t()) + self.bias(); + let linstep = self.params().forward(args); self.rho().activate(&linstep) } } @@ -252,7 +201,7 @@ where } } -impl From> for (Array1, Array0) +impl From> for (Array1, Option>) where T: Float, A: Activate, diff --git a/ml/neural/src/nn/cnn/model.rs b/ml/neural/src/nn/cnn/model.rs index 0c92921e..4dd61560 100644 --- a/ml/neural/src/nn/cnn/model.rs +++ b/ml/neural/src/nn/cnn/model.rs @@ -2,7 +2,5 @@ Appellation: model Contrib: FL03 */ -use ndarray::prelude::Array2; -use num::Float; pub struct CNN {} diff --git a/ml/neural/src/nn/ffn/mlp.rs b/ml/neural/src/nn/ffn/mlp.rs index ca6f8d07..0fa16c6c 100644 --- a/ml/neural/src/nn/ffn/mlp.rs +++ b/ml/neural/src/nn/ffn/mlp.rs @@ -5,15 +5,16 @@ //! # Multi-Layer Perceptron //! -use crate::func::activate::{Activate, Linear, ReLU, Softmax}; +use crate::func::activate::{Activate, LinearActivation, ReLU, Softmax}; use crate::layers::{Layer, LayerShape, Stack}; -use crate::prelude::{Features, Forward, Parameterized}; + +use crate::prelude::{Features, Forward}; use ndarray::prelude::{Array2, Ix2, NdFloat}; use ndarray::IntoDimension; use num::Float; -pub struct MLP +pub struct MLP where T: Float, I: Activate, diff --git a/ml/neural/src/nn/ffn/mod.rs b/ml/neural/src/nn/ffn/mod.rs index f0a6d506..018b086c 100644 --- a/ml/neural/src/nn/ffn/mod.rs +++ b/ml/neural/src/nn/ffn/mod.rs @@ -4,13 +4,11 @@ */ //! # Feed Forward Neural Network //! -pub use self::{mlp::*, model::*, utils::*}; +pub use self::{mlp::*, model::*}; pub(crate) mod mlp; pub(crate) mod model; -pub(crate) mod utils {} - #[cfg(tets)] mod tests { use super::*; diff --git a/ml/neural/src/nn/ffn/model.rs b/ml/neural/src/nn/ffn/model.rs index 6daec964..51467898 100644 --- a/ml/neural/src/nn/ffn/model.rs +++ b/ml/neural/src/nn/ffn/model.rs @@ -4,7 +4,7 @@ */ use crate::func::activate::Activator; -use crate::prelude::{Features, Forward, Layer, Parameterized}; +use crate::prelude::{Features, Forward, Layer}; use ndarray::prelude::{Array2, NdFloat}; use num::Float; diff --git a/ml/neural/src/nn/gnn/mod.rs b/ml/neural/src/nn/gnn/mod.rs index 2b8f4900..321efbe5 100644 --- a/ml/neural/src/nn/gnn/mod.rs +++ b/ml/neural/src/nn/gnn/mod.rs @@ -4,7 +4,7 @@ */ //! # Graph Neural Network //! -pub use self::{model::*, tasks::*, utils::*}; +pub use self::{model::*, tasks::*}; pub(crate) mod model; pub(crate) mod tasks; diff --git a/ml/neural/src/nn/gnn/model.rs b/ml/neural/src/nn/gnn/model.rs index 147a580e..0cea9291 100644 --- a/ml/neural/src/nn/gnn/model.rs +++ b/ml/neural/src/nn/gnn/model.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use crate::prelude::Node; -use ndarray::prelude::Array; + use num::Float; use petgraph::prelude::{Directed, Graph}; diff --git a/ml/neural/src/nn/kinds.rs b/ml/neural/src/nn/kinds.rs index 600f0c00..e79b5655 100644 --- a/ml/neural/src/nn/kinds.rs +++ b/ml/neural/src/nn/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -15,13 +15,13 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "snake_case")] @@ -66,13 +66,13 @@ impl NetworkKind { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] @@ -108,13 +108,13 @@ impl Learning { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "snake_case")] @@ -139,13 +139,13 @@ pub enum NetworkType { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "snake_case")] diff --git a/ml/neural/src/nn/mod.rs b/ml/neural/src/nn/mod.rs index b1366c57..912ea228 100644 --- a/ml/neural/src/nn/mod.rs +++ b/ml/neural/src/nn/mod.rs @@ -3,15 +3,14 @@ Contrib: FL03 */ //! # Neural Network -pub use self::{kinds::*, position::*, sequential::*, utils::*}; +pub use self::{kinds::*, position::*}; pub(crate) mod kinds; pub(crate) mod position; -pub(crate) mod sequential; pub mod cnn; pub mod ffn; pub mod gnn; pub mod rnn; -pub(crate) mod utils {} +pub trait NeuralNetwork {} diff --git a/ml/neural/src/nn/rnn/mod.rs b/ml/neural/src/nn/rnn/mod.rs index 5f8a8d7f..d2cb74d5 100644 --- a/ml/neural/src/nn/rnn/mod.rs +++ b/ml/neural/src/nn/rnn/mod.rs @@ -5,7 +5,7 @@ //! # Reccurrant Neural Network (RNN) //! //! -pub use self::{model::*, utils::*}; +pub use self::model::*; pub(crate) mod model; diff --git a/ml/neural/src/nn/rnn/model.rs b/ml/neural/src/nn/rnn/model.rs index 08a2b48f..b3e3fd21 100644 --- a/ml/neural/src/nn/rnn/model.rs +++ b/ml/neural/src/nn/rnn/model.rs @@ -2,7 +2,5 @@ Appellation: model Contrib: FL03 */ -use ndarray::prelude::Array2; -use num::Float; pub struct RNN {} diff --git a/ml/neural/src/nn/sequential.rs b/ml/neural/src/nn/sequential.rs deleted file mode 100644 index b3884a9e..00000000 --- a/ml/neural/src/nn/sequential.rs +++ /dev/null @@ -1,12 +0,0 @@ -/* - Appellation: sequential - Contrib: FL03 -*/ - -pub struct Sequential {} - -impl Sequential { - pub fn new() -> Self { - Self {} - } -} diff --git a/ml/neural/src/ops/mod.rs b/ml/neural/src/ops/mod.rs index e8b3d4cc..ad9f5fee 100644 --- a/ml/neural/src/ops/mod.rs +++ b/ml/neural/src/ops/mod.rs @@ -2,18 +2,16 @@ Appellation: ops Contrib: FL03 */ -pub use self::{dropout::*, norm::*, utils::*}; +pub use self::{dropout::*, norm::*}; pub(crate) mod dropout; pub(crate) mod norm; -pub(crate) mod utils {} - #[cfg(test)] mod tests { use super::*; + use crate::core::prelude::RoundTo; use crate::prelude::Forward; - use concision_core::prelude::RoundTo; use ndarray::prelude::{array, Array, Ix2}; #[test] diff --git a/ml/neural/src/ops/norm.rs b/ml/neural/src/ops/norm.rs index 436346ef..167017be 100644 --- a/ml/neural/src/ops/norm.rs +++ b/ml/neural/src/ops/norm.rs @@ -2,19 +2,29 @@ Appellation: norm Contrib: FL03 */ -use crate::core::MatrixOps; use crate::prelude::Forward; use ndarray::prelude::{Array, Axis, Dimension, Ix2, NdFloat}; use ndarray::{IntoDimension, RemoveAxis}; -use num::{Float, FromPrimitive}; +use num::{FromPrimitive, Num}; use serde::{Deserialize, Serialize}; -use std::ops::{Add, Mul}; -pub fn norm(x: &Array, axis: usize) -> Array +pub fn norm(x: &Array) -> Array where - D: Dimension + RemoveAxis, + D: Dimension, + T: FromPrimitive + NdFloat, +{ + let epsilon = T::from(1e-6).unwrap(); + // Calculate the mean and standard deviation of the activations along the feature axis. + let mean = x.mean().expect("mean_axis failed"); + + let std = x.std(T::one()); + (x.clone() - mean) / (std + epsilon) +} + +pub fn norma(x: &Array, axis: usize) -> Array +where + D: RemoveAxis, T: FromPrimitive + NdFloat, - Array: MatrixOps, { let axis = Axis(axis); let epsilon = T::from(1e-6).unwrap(); @@ -33,8 +43,6 @@ pub fn norm_and_scale( where D: Dimension, T: FromPrimitive + NdFloat, - Array: - Add, Output = Array> + Mul, Output = Array>, { let epsilon = T::from(1e-6).unwrap(); // Calculate the mean and standard deviation of the activations along the feature axis. @@ -49,8 +57,8 @@ where #[derive(Clone, Debug, Default, Deserialize, PartialEq, Serialize)] pub struct LayerNorm where - T: Float, D: Dimension, + T: Num, { alpha: Array, beta: Array, @@ -58,8 +66,8 @@ where impl LayerNorm where - T: Float, - D: Dimension + RemoveAxis, + D: Dimension, + T: Clone + Num, { pub fn new(dim: impl IntoDimension) -> Self { let dim = dim.into_dimension(); @@ -90,7 +98,6 @@ impl Forward> for LayerNorm where D: Dimension, T: FromPrimitive + NdFloat, - Array: Add, Output = Array> + Mul, Output = Array>, { type Output = Array; @@ -102,6 +109,6 @@ where let norm = (data - mean) / (data.std(T::one()) + epsilon); // Scale and shift the normalized activations with learnable parameters alpha and beta. - norm * self.alpha.clone() + self.beta.clone() + norm * self.alpha() + self.beta() } } diff --git a/ml/neural/src/params/mod.rs b/ml/neural/src/params/mod.rs index 78f0a632..2b4c9429 100644 --- a/ml/neural/src/params/mod.rs +++ b/ml/neural/src/params/mod.rs @@ -12,12 +12,9 @@ pub(crate) mod group; pub(crate) mod param; pub(crate) mod shapes; -use ndarray::linalg::Dot; use ndarray::prelude::{Array, Dimension, Ix2}; -use ndarray::IntoDimension; -use num::Float; -pub type BoxedParams = Box>; +use num::Float; pub trait Biased where @@ -45,173 +42,7 @@ where fn set_weights(&mut self, weights: Array); } -pub trait Params -where - D: Dimension, - T: Float, -{ - /// Returns an owned reference to the bias of the layer. - fn bias(&self) -> &Array; - /// Returns a mutable reference to the bias of the layer. - fn bias_mut(&mut self) -> &mut Array; - /// Returns an owned reference to the weights of the layer. - fn weights(&self) -> &Array; - /// Returns a mutable reference to the weights of the layer. - fn weights_mut(&mut self) -> &mut Array; - /// Sets the bias of the layer. - fn set_bias(&mut self, bias: Array); - /// Sets the weights of the layer. - fn set_weights(&mut self, weights: Array); -} - -pub trait ParamsExt: Biased + Weighted -where - Array: Dot, Output = Array>, - D: Dimension, - T: Float, -{ - fn linear(&self, args: &Array) -> Array { - args.dot(self.weights()) + self.bias() - } -} - -pub trait Parameterized -where - D: Dimension, - T: Float, -{ - type Features: IntoDimension; - type Params; - - fn features(&self) -> &Self::Features; - - fn features_mut(&mut self) -> &mut Self::Features; - - fn params(&self) -> &Self::Params; - - fn params_mut(&mut self) -> &mut Self::Params; -} - -pub trait ParameterizedExt: Parameterized -where - D: Dimension, - T: Float, - >::Params: Params + 'static, -{ - fn bias(&self) -> &Array { - Params::bias(self.params()) - } - - fn bias_mut(&mut self) -> &mut Array { - Params::bias_mut(self.params_mut()) - } - - fn weights(&self) -> &Array { - Params::weights(self.params()) - } - - fn weights_mut(&mut self) -> &mut Array { - Params::weights_mut(self.params_mut()) - } - - fn set_bias(&mut self, bias: Array) { - Params::set_bias(self.params_mut(), bias) - } - - fn set_weights(&mut self, weights: Array) { - Params::set_weights(self.params_mut(), weights) - } -} - -impl ParameterizedExt for P -where - D: Dimension, - P: Parameterized, - T: Float, -

>::Params: Params + 'static, -{ -} - -// impl Params for S -// where -// S: Parameterized, -// D: Dimension, -// P: Biased, -// T: Float, -// ::Smaller: Dimension, -// { -// fn bias(&self) -> &Array { -// self.params().bias() -// } - -// fn bias_mut(&mut self) -> &mut Array { -// self.params_mut().bias_mut() -// } - -// fn weights(&self) -> &Array { -// self.params().weights() -// } - -// fn weights_mut(&mut self) -> &mut Array { -// self.params_mut().weights_mut() -// } - -// fn set_bias(&mut self, bias: Array) { -// self.params_mut().set_bias(bias) -// } - -// fn set_weights(&mut self, weights: Array) { -// self.params_mut().set_weights(weights) -// } -// } - -impl Params for P -where - D: Dimension, - T: Float, - Self: Biased + Weighted + Sized, -{ - fn bias(&self) -> &Array { - Biased::bias(self) - } - - fn bias_mut(&mut self) -> &mut Array { - Biased::bias_mut(self) - } - - fn weights(&self) -> &Array { - Weighted::weights(self) - } - - fn weights_mut(&mut self) -> &mut Array { - Weighted::weights_mut(self) - } - - fn set_bias(&mut self, bias: Array) { - Biased::set_bias(self, bias) - } - - fn set_weights(&mut self, weights: Array) { - Weighted::set_weights(self, weights) - } -} - -// impl Biased for P -// where -// D: Dimension, -// P: Parameterized, -// T: Float, -// ::Smaller: Dimension, -//

>::Params: 'static, -// { -// fn bias(&self) -> &Array { -// self.params().bias() -// } - -// fn bias_mut(&mut self) -> &mut Array { -// self.params_mut().bias_mut() -// } -// } +pub trait Params {} #[cfg(test)] mod tests {} diff --git a/ml/neural/src/params/shapes.rs b/ml/neural/src/params/shapes.rs index bd67e16c..e7634373 100644 --- a/ml/neural/src/params/shapes.rs +++ b/ml/neural/src/params/shapes.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumIs, EnumIter, EnumString, VariantNames}; pub trait LayerFeatures { fn inputs(&self) -> usize; @@ -33,13 +33,13 @@ impl LayerFeatures for ParameterShapes { Deserialize, EnumIs, EnumIter, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] pub enum ParameterShapes { Layer { inputs: usize, outputs: usize }, @@ -56,13 +56,13 @@ pub enum ParameterShapes { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/neural/src/primitives.rs b/ml/neural/src/primitives.rs index 120654da..4fabd493 100644 --- a/ml/neural/src/primitives.rs +++ b/ml/neural/src/primitives.rs @@ -2,7 +2,7 @@ Appellation: primitives Contrib: FL03 */ -pub use self::{constants::*, statics::*, types::*}; +pub use self::{constants::*, types::*}; pub(crate) mod constants { pub const DEFAULT_BUFFER: usize = 1024; diff --git a/ml/neural/src/specs.rs b/ml/neural/src/specs.rs index e48fdfd5..6f2f149d 100644 --- a/ml/neural/src/specs.rs +++ b/ml/neural/src/specs.rs @@ -4,7 +4,7 @@ */ use crate::core::BoxResult; use crate::func::loss::Loss; -use ndarray::prelude::{Array, Array1, Axis, Dimension, Ix2}; +use ndarray::prelude::{Array, Axis, Ix2}; use num::Float; pub trait Backward: Forward { @@ -36,40 +36,32 @@ where // { // } -pub trait Compile -where - D: Dimension, - T: Float, -{ +pub trait Batched { + type Output; + + fn batch(&self, batch_size: usize) -> Self::Output; +} + +pub trait Compile { type Opt; - fn compile(&mut self, loss: impl Loss>, optimizer: Self::Opt) -> BoxResult<()>; + fn compile(&mut self, loss: impl Loss, optimizer: Self::Opt) -> BoxResult<()>; } -pub trait Predict -where - D: Dimension, - T: Float, -{ +pub trait Predict { type Output; - fn predict(&self, input: &Array) -> BoxResult; - - fn predict_batch(&self, input: &[Array]) -> BoxResult> { - let res = input.iter().map(|x| self.predict(x).expect("")).collect(); - Ok(res) - } + fn predict(&self, input: &T) -> BoxResult; } -impl Predict for Box> +impl Predict for S where - D: Dimension, - T: Float, + S: Forward, { type Output = O; - fn predict(&self, input: &Array) -> BoxResult { - self.as_ref().predict(input) + fn predict(&self, input: &T) -> BoxResult { + Ok(self.forward(input)) } } @@ -97,10 +89,6 @@ where } } -pub trait Module: Forward, Output = Array> -where - D: Dimension, - T: Float, -{ +pub trait Module: Compile + Predict { type Config; } diff --git a/ml/nlp/src/lib.rs b/ml/nlp/src/lib.rs index 8ddedbe2..75150baa 100644 --- a/ml/nlp/src/lib.rs +++ b/ml/nlp/src/lib.rs @@ -3,7 +3,6 @@ Contrib: FL03 */ //! # Natural Language Processing -pub use self::{primitives::*, specs::*, utils::*}; pub(crate) mod primitives; pub(crate) mod specs; @@ -18,8 +17,4 @@ pub use concision_core as core; pub mod prelude { pub use crate::embed::*; pub use crate::encode::*; - - pub use crate::primitives::*; - pub use crate::specs::*; - pub use crate::utils::*; } diff --git a/ml/nlp/src/primitives.rs b/ml/nlp/src/primitives.rs index 859023bb..51d14049 100644 --- a/ml/nlp/src/primitives.rs +++ b/ml/nlp/src/primitives.rs @@ -2,7 +2,6 @@ Appellation: primitives Contrib: FL03 */ -pub use self::{constants::*, statics::*, types::*}; mod constants {} diff --git a/ml/optim/src/cost/mod.rs b/ml/optim/src/cost/mod.rs index 7dac866b..75ba92e1 100644 --- a/ml/optim/src/cost/mod.rs +++ b/ml/optim/src/cost/mod.rs @@ -4,7 +4,7 @@ */ //! # cost //! -pub use self::{kinds::*, utils::*}; +pub use self::kinds::*; pub(crate) mod kinds; diff --git a/ml/optim/src/grad/gradient.rs b/ml/optim/src/grad/gradient.rs index 967ba7e0..61cc8230 100644 --- a/ml/optim/src/grad/gradient.rs +++ b/ml/optim/src/grad/gradient.rs @@ -4,7 +4,7 @@ */ use crate::neural::func::activate::Sigmoid; use crate::neural::models::ModelParams; -use crate::neural::prelude::{Forward, Gradient, Weighted}; +use crate::neural::prelude::{Forward, Gradient}; use ndarray::prelude::{Array2, Axis, NdFloat}; use ndarray_stats::DeviationExt; use num::{Float, Signed}; diff --git a/ml/optim/src/grad/mod.rs b/ml/optim/src/grad/mod.rs index 671365b2..d9ebf164 100644 --- a/ml/optim/src/grad/mod.rs +++ b/ml/optim/src/grad/mod.rs @@ -28,25 +28,27 @@ pub struct DescentParams { } pub(crate) mod utils { + use crate::core::prelude::BoxResult; use crate::neural::func::activate::Gradient; - use crate::neural::params::{Biased, Weighted}; - use crate::neural::prelude::{Forward, ForwardIter, Parameterized, Params}; + use crate::neural::models::exp::Module; + use crate::neural::prelude::{Forward, ForwardIter}; use ndarray::linalg::Dot; use ndarray::prelude::{Array, Array2, Dimension, NdFloat}; use ndarray_stats::DeviationExt; use num::{FromPrimitive, Signed}; + use std::ops::Sub; pub fn gradient_descent( - gamma: T, + _gamma: T, model: &mut M, - objective: impl Gradient, + _objective: impl Gradient, data: &Array2, targets: &Array, ) -> anyhow::Result where D: Dimension, M: Clone + ForwardIter, I, Output = Array>, - I: Forward, Output = Array> + Biased + Weighted, + I: Forward, Output = Array>, T: FromPrimitive + NdFloat + Signed, Array2: Dot, Output = Array>, { @@ -54,22 +56,22 @@ pub(crate) mod utils { Ok(loss) } - pub fn gradient( + pub fn gradient<'a, T, D, A>( gamma: T, model: &mut A, data: &Array2, targets: &Array, grad: impl Gradient, - ) -> f64 + ) -> BoxResult where - A: Forward, Output = Array> + Parameterized, - D: Dimension, + A: Module>, + D: Dimension + 'a, T: FromPrimitive + NdFloat + Signed, - >::Params: Params + 'static, Array2: Dot, Output = Array>, + &'a Array2: Sub<&'a Array, Output = Array>, { let (_samples, _inputs) = data.dim(); - let pred = model.forward(data); + let pred = model.predict(data)?; let ns = T::from(data.len()).unwrap(); @@ -83,13 +85,14 @@ pub(crate) mod utils { // let db = dz.sum_axis(Axis(0)) / ns; // // Apply the gradients to the model's learnable parameters // model.params_mut().bias_mut().scaled_add(-gamma, &db.t()); - - model.params_mut().weights_mut().scaled_add(-gamma, &dw.t()); + for p in model.parameters_mut().values_mut() { + p.scaled_add(-gamma, &dw.t()); + } let loss = targets - .mean_sq_err(&model.forward(data)) + .mean_sq_err(&model.predict(data)?) .expect("Error when calculating the MSE of the model"); - loss + Ok(loss) } } @@ -98,9 +101,9 @@ mod tests { use super::*; use crate::core::prelude::linarr; - use crate::neural::func::activate::{Linear, Sigmoid}; + use crate::neural::func::activate::{LinearActivation, Sigmoid}; use crate::neural::models::ModelParams; - use crate::neural::prelude::{Features, Layer, LayerShape}; + use crate::neural::prelude::{Features, Forward, Layer, LayerShape}; use ndarray::prelude::{Array1, Ix2}; #[test] @@ -134,22 +137,24 @@ mod tests { fn test_gradient() { let (samples, inputs, outputs) = (20, 5, 1); - let (epochs, gamma) = (10, 0.001); + let (_epochs, _gamma) = (10, 0.001); let features = LayerShape::new(inputs, outputs); // Generate some example data - let x = linarr((samples, features.inputs())).unwrap(); - let y = linarr((samples, features.outputs())).unwrap(); + let x = linarr::((samples, features.inputs())).unwrap(); + let _y = linarr::((samples, features.outputs())).unwrap(); - let mut model = Layer::::from(features).init(true); + let model = Layer::::from(features).init(true); - let mut losses = Array1::zeros(epochs); - for e in 0..epochs { - let cost = gradient(gamma, &mut model, &x, &y, Sigmoid); - losses[e] = cost; - } - assert_eq!(losses.len(), epochs); - assert!(losses.first() > losses.last()); + let _pred = model.forward(&x); + + // let mut losses = Array1::zeros(epochs); + // for e in 0..epochs { + // let cost = gradient(gamma, &mut model, &x, &y, Sigmoid).unwrap(); + // losses[e] = cost; + // } + // assert_eq!(losses.len(), epochs); + // assert!(losses.first() > losses.last()); } } diff --git a/ml/optim/src/grad/sgd.rs b/ml/optim/src/grad/sgd.rs index 12473391..cb0171ca 100644 --- a/ml/optim/src/grad/sgd.rs +++ b/ml/optim/src/grad/sgd.rs @@ -6,7 +6,7 @@ //! //! -use crate::neural::prelude::{Activate, Features, Forward, Layer, Parameterized, Weighted}; +use crate::neural::prelude::{Activate, Features, Forward, Layer}; // use crate::prelude::ObjectiveFn; use ndarray::prelude::{s, Array1, Array2, Axis, Ix2, NdFloat}; use ndarray_stats::DeviationExt; @@ -38,7 +38,7 @@ where let ys = y.select(Axis(0), pos); let pred = model.forward(&xs); - let error = &pred - &ys; + let _error = &pred - &ys; for batch in (0..samples).step_by(batch_size) { let mut gradient = Array2::zeros((features.outputs(), features.inputs())); @@ -54,7 +54,7 @@ where let inner = y - &prediction; let partial_w = (-2.0 / batch_size as f64) * input.dot(&inner); - let partial_b = (-2.0 / batch_size as f64) * inner; + let _partial_b = (-2.0 / batch_size as f64) * inner; gradient -= partial_w.sum(); // let mut weights = model.weights_mut().slice_mut(s![]) // model.set_weights(weights) @@ -83,7 +83,7 @@ pub fn sgd_step( x: &Array2, y: &Array1, model: &mut Layer, - learning_rate: f64, + _learning_rate: f64, batch_size: usize, ) -> anyhow::Result where @@ -91,17 +91,17 @@ where { let layer = model.clone(); let features = layer.features(); - let (samples, _inputs) = x.dim(); + let (_samples, _inputs) = x.dim(); let mut indices: Vec = (0..features.outputs()).collect(); - let mut losses = 0.0; + let losses = 0.0; indices.shuffle(&mut rand::thread_rng()); let pos = &indices[..batch_size]; let xs = x.select(Axis(0), pos); - let ys = y.select(Axis(0), pos); + let _ys = y.select(Axis(0), pos); - let pred = model.forward(&xs); + let _pred = model.forward(&xs); Ok(losses) } @@ -114,7 +114,7 @@ pub struct Sgd { impl Sgd { pub fn step(&mut self) -> f64 { - let mut loss = 0.0; + let loss = 0.0; loss } diff --git a/ml/optim/src/primitives.rs b/ml/optim/src/primitives.rs index 01b073a1..d80ec4b6 100644 --- a/ml/optim/src/primitives.rs +++ b/ml/optim/src/primitives.rs @@ -2,7 +2,7 @@ Appellation: primitives Contrib: FL03 */ -pub use self::{constants::*, statics::*, types::*}; +pub use self::{constants::*, types::*}; mod constants { diff --git a/ml/s4/Cargo.toml b/ml/s4/Cargo.toml index 8c67013a..45daa666 100644 --- a/ml/s4/Cargo.toml +++ b/ml/s4/Cargo.toml @@ -15,9 +15,10 @@ version.workspace = true default = ["blas"] blas = [ - # "ndarray/blas", - # "concision-core/blas", - # "concision-neural/blas", + "ndarray/blas", + "concision-core/blas", + "concision-data/blas", + "concision-neural/blas", ] intel-mkl-system = [ @@ -62,16 +63,19 @@ test = true [dependencies] concision-core = { features = ["blas"], path = "../../core", version = "0.1.12" } +concision-data = { features = ["blas"], path = "../../data", version = "0.1.12" } concision-neural = { features = ["blas"], path = "../neural" } anyhow.workspace = true lazy_static.workspace = true -ndarray = { features = ["blas", "serde-1"], version = "0.15" } +ndarray = { features = ["approx", "blas", "serde-1"], version = "0.15" } ndarray-conv = "0.2" ndarray-linalg = { features = [], version = "0.16" } ndarray-rand.workspace = true ndarray-stats.workspace = true num.workspace = true +rand = "0.8" +realfft = "3" rustfft = { features = [], version = "6" } serde.workspace = true serde_json.workspace = true diff --git a/ml/s4/examples/sand.rs b/ml/s4/examples/sand.rs index 06e70af3..bf8d58df 100644 --- a/ml/s4/examples/sand.rs +++ b/ml/s4/examples/sand.rs @@ -1,14 +1,23 @@ // use concision_core as cnc; -use concision_s4 as s4; +extern crate concision_s4; -use s4::randcomplex; +use concision_core as core; +use concision_s4 as s4; -use ndarray::prelude::Ix2; +use ndarray::prelude::*; fn main() -> anyhow::Result<()> { - let c = randcomplex::([2, 2]); + let (features, samples) = (4, 16); + + let u = Array::range(0.0, features as f64, 1.0).insert_axis(Axis(1)); + let x0 = Array1::::zeros(features); - println!("{:?}", &c); + // let step = | st, u | { + // let x1 = st; + // let yk = u; + // Some(x1) + // }; + // println!("{:?}", scan(step, u, x0.to_vec())); Ok(()) } diff --git a/ml/s4/src/cmp/cache.rs b/ml/s4/src/cmp/cache.rs index 3d0fa7e9..32202017 100644 --- a/ml/s4/src/cmp/cache.rs +++ b/ml/s4/src/cmp/cache.rs @@ -2,12 +2,94 @@ Appellation: cache Contrib: FL03 */ +use crate::data::prelude::Store; use ndarray::prelude::{Array, Dimension, Ix2}; // use num::{Complex, Float}; +use std::collections::HashMap; pub struct Cache where D: Dimension, { - cache: Array, + cache: HashMap>, +} + +impl Cache +where + D: Dimension, +{ + pub fn new() -> Self { + Self { + cache: HashMap::new(), + } + } +} + +impl Store> for Cache +where + D: Dimension, +{ + fn get(&self, key: &String) -> Option<&Array> { + self.cache.get(key) + } + + fn get_mut(&mut self, key: &String) -> Option<&mut Array> { + self.cache.get_mut(key) + } + + fn insert(&mut self, key: String, value: Array) -> Option> { + self.cache.insert(key, value) + } + + fn remove(&mut self, key: &String) -> Option> { + self.cache.remove(key) + } +} + +impl Extend<(String, Array)> for Cache +where + D: Dimension, +{ + fn extend)>>(&mut self, iter: I) { + for (key, value) in iter { + self.insert(key, value); + } + } +} + +impl FromIterator<(String, Array)> for Cache +where + D: Dimension, +{ + fn from_iter)>>(iter: I) -> Self { + let mut cache = Self::new(); + for (key, value) in iter { + cache.insert(key, value); + } + cache + } +} + +impl IntoIterator for Cache +where + D: Dimension, +{ + type Item = (String, Array); + type IntoIter = std::collections::hash_map::IntoIter>; + + fn into_iter(self) -> Self::IntoIter { + self.cache.into_iter() + } +} + +impl<'a, T, D> IntoIterator for &'a mut Cache +where + D: Dimension, +{ + type Item = (&'a String, &'a mut Array); + type IntoIter = std::collections::hash_map::IterMut<'a, String, Array>; + + fn into_iter(self) -> Self::IntoIter { + self.cache.iter_mut() + } } diff --git a/ml/s4/src/cmp/kernel.rs b/ml/s4/src/cmp/kernel.rs index 073f331e..d4cb1bcb 100644 --- a/ml/s4/src/cmp/kernel.rs +++ b/ml/s4/src/cmp/kernel.rs @@ -2,30 +2,140 @@ Appellation: kernel Contrib: FL03 */ -use ndarray::prelude::Array2; -use num::Float; +use crate::core::ops::fft::{ifft, FftPlan}; +use crate::core::prelude::Conjugate; +use crate::params::DPLRParams; +use crate::prelude::cauchy; +use ndarray::prelude::{Array, Array1}; +use ndarray::ScalarOperand; +use ndarray_linalg::Scalar; +use num::complex::{Complex, ComplexFloat}; +use num::traits::{Float, FloatConst, NumOps}; +use rustfft::FftNum; -pub struct Kernel { - kernal: Array2, +pub fn omega_l(l: usize) -> Array1<::Complex> +where + T: Scalar>, + ::Real: FloatConst + NumOps<::Complex, ::Complex>, + ::Complex: + ComplexFloat::Real> + NumOps<::Real> + ScalarOperand, +{ + let f = |i: usize| -> ::Complex { + let im = T::PI().mul_complex(Complex::i() * T::from(2).unwrap()); // .neg() + T::from(i) + .unwrap() + .div_real(T::from(l).unwrap()) + .mul_complex(im) + .exp() + }; + Array::from_iter((0..l).map(f)) } -impl Kernel +pub struct Omega where - T: Float, + T: Scalar, { - pub fn new(kernal: Array2) -> Self { - Self { kernal } + omega: Array1<::Complex>, +} + +impl Omega +where + T: Scalar>, + ::Real: FloatConst + NumOps<::Complex, ::Complex>, + ::Complex: + ComplexFloat::Real> + NumOps<::Real> + ScalarOperand, +{ + pub fn new(l: usize) -> Self { + let f = |i: usize| -> ::Complex { + let im = T::PI().mul_complex(Complex::i() * T::from(2).unwrap()); // .neg() + T::from(i) + .unwrap() + .div_real(T::from(l).unwrap()) + .mul_complex(im) + .exp() + }; + let omega = Array::from_iter((0..l).map(f)); + Self { omega } } +} - pub fn square(features: usize) -> Self - where - T: Default, - { - let kernal = Array2::::default((features, features)); - Self::new(kernal) +impl Omega +where + T: Scalar, +{ + pub fn omega(&self) -> &Array1<::Complex> { + &self.omega } +} + +pub fn kernel_dplr( + dplr: &DPLRParams<::Complex>, + step: ::Real, + l: usize, +) -> Array1<::Real> +where + T: Conjugate + FftNum + Float + Scalar>, + ::Real: + FloatConst + NumOps<::Complex, ::Complex> + ScalarOperand, + ::Complex: Conjugate + ScalarOperand, +{ + // initialize some constants + let two = T::from(2).unwrap(); + // get the lambda matrix + let lambda = dplr.lambda.clone(); + // collect the relevant terms for A + let aterm = (dplr.c.conj(), dplr.q.conj()); + // collect the relevant terms for B + let bterm = (dplr.b.clone(), dplr.p.clone()); - pub fn kernal(&self) -> &Array2 { + // generate omega + let omega_l = omega_l::(l); + + let g = omega_l.mapv(|i| (T::one() - i) * (T::one() + i).recip()) * (two * step.recip()); + let c = omega_l.mapv(|i| two * (T::one() + i).recip()); + // compute the cauchy matrix + let k00 = cauchy(&(&aterm.0 * &bterm.0), &g, &lambda); + let k01 = cauchy(&(&aterm.0 * &bterm.1), &g, &lambda); + let k10 = cauchy(&(&aterm.1 * &bterm.0), &g, &lambda); + let k11 = cauchy(&(&aterm.1 * &bterm.1), &g, &lambda); + // compute the roots of unity + let at_roots = &c * (&k00 - k01 * &k11.mapv(|i| (i + T::one()).recip()) * &k10); + let plan = FftPlan::new(l); + let res = ifft(at_roots.into_raw_vec().as_slice(), &plan); + Array::from_vec(res).mapv(|i| i.re()) +} + +pub struct Kernel { + kernal: Array1, +} + +impl Kernel { + pub fn new(kernal: Array1) -> Self { + Self { kernal } + } + + pub fn kernal(&self) -> &Array1 { &self.kernal } } + +impl Kernel +where + T: Scalar>, + ::Real: Conjugate + + FftNum + + Float + + FloatConst + + NumOps<::Complex, ::Complex> + + ScalarOperand, + ::Complex: Conjugate + ScalarOperand, +{ + pub fn dplr( + dplr: &DPLRParams<::Complex>, + step: ::Real, + l: usize, + ) -> Self { + let kernal = kernel_dplr::(dplr, step, l); + Self::new(kernal) + } +} diff --git a/ml/s4/src/cmp/mod.rs b/ml/s4/src/cmp/mod.rs index 4f09370f..f27d829a 100644 --- a/ml/s4/src/cmp/mod.rs +++ b/ml/s4/src/cmp/mod.rs @@ -5,7 +5,7 @@ //! # Components //! //! -pub use self::{cache::*, kernel::*}; +pub use self::cache::*; pub(crate) mod cache; -pub(crate) mod kernel; +pub mod kernel; diff --git a/ml/s4/src/dplr/hippo.rs b/ml/s4/src/dplr/hippo.rs deleted file mode 100644 index d364c2ad..00000000 --- a/ml/s4/src/dplr/hippo.rs +++ /dev/null @@ -1,58 +0,0 @@ -/* - Appellation: hippo - Contrib: FL03 -*/ -use super::utils::*; -use ndarray::prelude::Array2; -use ndarray::ScalarOperand; -use num::complex::ComplexFloat; -use num::Float; - -pub enum HiPPOs { - HiPPO(Array2), - NPLR { - a: Array2, - p: Array2, - b: Array2, - }, - DPLR { - lambda: Array2, - p: Array2, - q: Array2, - b: Array2, - c: Array2, - }, -} - -pub struct HiPPO(Array2); - -impl HiPPO -where - T: Float, -{ - pub fn new(hippo: Array2) -> Self { - Self(hippo) - } - - pub fn hippo(&self) -> &Array2 { - &self.0 - } - - pub fn hippo_mut(&mut self) -> &mut Array2 { - &mut self.0 - } -} - -impl HiPPO -where - T: ComplexFloat + ScalarOperand, -{ - pub fn square(features: usize) -> Self { - Self(make_hippo(features)) - } - - pub fn nplr(features: usize) -> Self { - let (hippo, p, b) = make_nplr_hippo(features); - Self(hippo) - } -} diff --git a/ml/s4/src/dplr/mod.rs b/ml/s4/src/dplr/mod.rs deleted file mode 100644 index cd06181e..00000000 --- a/ml/s4/src/dplr/mod.rs +++ /dev/null @@ -1,76 +0,0 @@ -/* - Appellation: dplr - Contrib: FL03 -*/ -//! # Diagonal Plus Low Rank (DPLR) -//! -//! -pub use self::{kinds::*, utils::*}; - -pub(crate) mod kinds; - -pub mod hippo; - -pub struct LowRank { - pub mode: Mode, -} - -pub(crate) mod utils { - use crate::core::prelude::{rangespace, AsComplex, Conjugate}; - - use ndarray::prelude::{Array1, Array2, Axis}; - use ndarray::ScalarOperand; - use ndarray_linalg::{Eigh, IntoTriangular, Lapack, UPLO}; - use num::complex::{Complex, ComplexFloat}; - use num::FromPrimitive; - - pub fn make_hippo(features: usize) -> Array2 - where - T: ComplexFloat + ScalarOperand, - { - let base = rangespace((features, 1)); - let p = (&base * T::from(2).unwrap() + T::one()).mapv(T::sqrt); - let mut a = &p * &p.t(); - a = &a.into_triangular(UPLO::Lower) - &base.diag(); - -a - } - - pub fn make_nplr_hippo(features: usize) -> (Array2, Array1, Array1) - where - T: ComplexFloat + ScalarOperand, - { - let hippo = make_hippo(features); - - let base = rangespace((features,)); - let p = (&base + T::one() / T::from(2).unwrap()).mapv(T::sqrt); - let b = (&base * T::from(2).unwrap() + T::one()).mapv(T::sqrt); - (hippo, p, b) - } - - pub fn make_dplr_hippo(features: usize) -> (Array2, Array2, Array2, Array2) - where - T: AsComplex + ComplexFloat + Conjugate + FromPrimitive + Lapack + ScalarOperand, - { - let (a, p, b) = make_nplr_hippo(features); - let p = p.insert_axis(Axis(1)); - let b = b.insert_axis(Axis(1)); - - // - let s = &a + p.dot(&p.t()); - // - let sd = s.diag(); - - let a = Array2::ones(s.dim()) * sd.mean().expect("Average of diagonal is NaN"); - - // TODO: replace with eigh - let (e, v) = &(&s * T::from(T::one().neg().as_imag()).unwrap()) - .eigh(UPLO::Lower) - .expect(""); - let e = e.mapv(|x| T::from(x).unwrap()); - - let a = a + &e * T::from(T::one().as_imag()).unwrap(); - let p = v.conj().t().dot(&p); - let b = v.conj().t().dot(&b); - (a, p, b, v.clone()) - } -} diff --git a/ml/s4/src/hippo/dplr.rs b/ml/s4/src/hippo/dplr.rs new file mode 100644 index 00000000..5ca05234 --- /dev/null +++ b/ml/s4/src/hippo/dplr.rs @@ -0,0 +1,167 @@ +/* + Appellation: dplr + Contrib: FL03 +*/ +//! # Diagonal Plus Low Rank (DPLR) +//! +//! +use super::nplr::NPLR; +use crate::core::prelude::{AsComplex, Conjugate, SquareRoot}; +use ndarray::prelude::{Array, Array1, Array2, Axis}; +use ndarray::ScalarOperand; +use ndarray_linalg::{Eigh, Lapack, Scalar, UPLO}; +use num::traits::NumOps; +use num::{Complex, Num, Signed}; +use serde::{Deserialize, Serialize}; +use std::ops::{Mul, Neg}; + +pub(crate) trait DPLRScalar: + AsComplex + + Conjugate + + Scalar + + ScalarOperand + + Signed + + SquareRoot + + NumOps + + NumOps, Complex> +where + Complex: Lapack, + ::Real: Mul, Output = Complex>, +{ +} + +impl DPLRScalar for T +where + T: AsComplex + + Conjugate + + NumOps + + NumOps, Complex> + + Scalar + + ScalarOperand + + Signed + + SquareRoot, + Complex: Lapack, + ::Real: Mul, Output = Complex>, +{ +} + +pub(crate) fn dplr(features: usize) -> DPLR +where + T: DPLRScalar, + + Complex: Lapack, + ::Real: Mul, Output = Complex>, +{ + let (a, p, b) = NPLR::::new(features).into(); + + // + let s = { + // reshape the p-array from NPLR into a two-dimensional matrix + let p2 = p.clone().insert_axis(Axis(1)); + // compute s + &a + p2.dot(&p2.t()) + }; + // find the diagonal of s + let sd = s.diag(); + // create a matrix from the diagonals of s + let lambda_re = Array::ones(sd.dim()) * sd.mean().expect(""); + + let (e, v) = s + .mapv(|i: T| i * Complex::i().neg()) + .eigh(UPLO::Lower) + .expect(""); + + let lambda = { + // let lambda_im = e.mapv(|i| i * Complex::i()); + let iter = lambda_re + .into_iter() + .zip(e.into_iter()) + .map(|(i, j)| Complex::new(i, T::zero()) + T::from(j).unwrap() * Complex::i()); + Array::from_iter(iter) + }; + let p = p.mapv(AsComplex::as_re); + let b = b.mapv(AsComplex::as_re); + DPLR { + lambda, + p: v.conj().t().dot(&p), + b: v.conj().t().dot(&b), + v, + } +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct DPLR +where + T: Clone + Num, +{ + pub lambda: Array1>, + pub p: Array1>, + pub b: Array1>, + pub v: Array2>, +} + +impl DPLR +where + T: AsComplex + + Conjugate + + NumOps + + NumOps, Complex> + + Scalar + + ScalarOperand + + Signed + + SquareRoot, + Complex: Lapack, + ::Real: NumOps, Complex>, +{ + pub fn new(features: usize) -> Self { + dplr(features) + } +} + +// impl DPLR +// where +// T: S4Float, +// ::Real: FromPrimitive, +// Complex<::Real>: LinalgScalar + ScalarOperand, +// { +// pub fn new(features: usize) -> Self { +// make_dplr_hippo(features) +// } +// } + +impl + From<( + Array1>, + Array1>, + Array1>, + Array2>, + )> for DPLR +where + T: Clone + Num, +{ + fn from( + (lambda, p, b, v): ( + Array1>, + Array1>, + Array1>, + Array2>, + ), + ) -> Self { + DPLR { lambda, p, b, v } + } +} + +impl From> + for ( + Array1>, + Array1>, + Array1>, + Array2>, + ) +where + T: Clone + Num, +{ + fn from(dplr: DPLR) -> Self { + (dplr.lambda, dplr.p, dplr.b, dplr.v) + } +} diff --git a/ml/s4/src/hippo/hippo.rs b/ml/s4/src/hippo/hippo.rs new file mode 100644 index 00000000..9ffaff30 --- /dev/null +++ b/ml/s4/src/hippo/hippo.rs @@ -0,0 +1,111 @@ +/* + Appellation: hippo + Contrib: FL03 +*/ +// use super::dplr::DPLR; +use super::nplr::NPLR; +use crate::core::prelude::{genspace, SquareRoot}; +use ndarray::prelude::Array2; +use ndarray::ScalarOperand; +use num::traits::{Num, NumCast, Signed}; +use serde::{Deserialize, Serialize}; + +pub enum HiPPOs { + HiPPO(HiPPO), + // DPLR(DPLR), + NPLR(NPLR), +} + +impl HiPPOs +where + T: Num + NumCast + ScalarOperand + Signed + SquareRoot, +{ + pub fn new(features: usize) -> Self { + Self::HiPPO(HiPPO::new(features)) + } + + pub fn nplr(features: usize) -> Self { + Self::NPLR(NPLR::new(features)) + } +} + +// impl HiPPOs +// where +// T: S4Float, +// Complex<::Real>: LinalgScalar + ScalarOperand, +// { +// pub fn dplr(features: usize) -> Self { +// Self::DPLR(DPLR::new(features)) +// } +// } + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct HiPPO { + features: usize, + data: Array2, +} + +impl HiPPO { + pub fn features(&self) -> usize { + self.features + } +} + +impl HiPPO +where + T: Num + NumCast + ScalarOperand + Signed + SquareRoot, +{ + pub fn new(features: usize) -> Self { + Self { + features, + data: super::hippo(features), + } + } + + pub fn nplr(&self) -> NPLR { + let base = genspace(self.features()); + let p = (&base + T::one() / T::from(2).unwrap()).mapv(T::sqrt); + let b = (&base * T::from(2).unwrap() + T::one()).mapv(T::sqrt); + NPLR { + a: self.as_ref().clone(), + p, + b, + } + } +} + +// impl HiPPO +// where +// T: S4Float, +// { +// pub fn dplr(&self) -> DPLR { + +// } +// } + +impl AsRef> for HiPPO { + fn as_ref(&self) -> &Array2 { + &self.data + } +} + +impl AsMut> for HiPPO { + fn as_mut(&mut self) -> &mut Array2 { + &mut self.data + } +} + +impl From> for HiPPO { + fn from(a: Array2) -> Self { + Self { + features: a.dim().0, + data: a, + } + } +} + +impl From> for Array2 { + fn from(hippo: HiPPO) -> Self { + hippo.data + } +} diff --git a/ml/s4/src/dplr/kinds.rs b/ml/s4/src/hippo/kinds.rs similarity index 87% rename from ml/s4/src/dplr/kinds.rs rename to ml/s4/src/hippo/kinds.rs index decab8d7..be7dd63a 100644 --- a/ml/s4/src/dplr/kinds.rs +++ b/ml/s4/src/hippo/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -16,13 +16,13 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, EnumVariantNames}; EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] @@ -43,13 +43,13 @@ pub enum Rank { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/s4/src/hippo/mod.rs b/ml/s4/src/hippo/mod.rs new file mode 100644 index 00000000..8a0a2bbf --- /dev/null +++ b/ml/s4/src/hippo/mod.rs @@ -0,0 +1,119 @@ +/* + Appellation: hippo + Contrib: FL03 +*/ +//! # HiPPO +//! +//! +pub(crate) use self::utils::*; +pub use self::{hippo::*, kinds::*}; + +pub(crate) mod hippo; +pub(crate) mod kinds; + +pub mod dplr; +pub mod nplr; + +pub struct LowRank { + pub mode: Mode, +} + +pub(crate) mod utils { + use crate::core::prelude::{genspace, SquareRoot}; + use ndarray::prelude::{Array2, Axis}; + use ndarray::ScalarOperand; + use ndarray_linalg::{IntoTriangular, UPLO}; + use num::traits::{Num, NumCast, Signed}; + + pub(crate) fn hippo(features: usize) -> Array2 + where + T: Num + NumCast + ScalarOperand + Signed + SquareRoot, + { + let base = genspace::(features).insert_axis(Axis(1)); + let p = (&base * T::from(2).unwrap() + T::one()).mapv(T::sqrt); + let mut a = &p * &p.t(); + a = &a.into_triangular(UPLO::Lower) - &Array2::from_diag(&genspace::(features)); + -a + } +} + +#[cfg(test)] +mod tests { + use super::*; + use dplr::DPLR; + use nplr::NPLR; + + use crate::core::prelude::Conjugate; + use ndarray::prelude::{Array, Axis}; + use num::complex::ComplexFloat; + + #[test] + fn test_hippo() { + let features = 10; + + let a = hippo::(features); + let b = HiPPO::::new(features); + assert_eq!(&a, b.as_ref()); + } + + #[test] + fn test_low_rank() { + let features = 8; + + let nplr = NPLR::::new(features); + let dplr = DPLR::::new(features); + + let hippo = nplr.a.clone(); + + let v = dplr.v.clone(); + // compute the conjugate transpose of the eigenvectors + let vc = v.conj().t().to_owned(); + // create a two-dimensional array from the diagonal of the lambda matrix + let lambda = { + let ld = dplr.lambda.diag().to_owned(); + Array::from_diag(&ld) + }; + // reshape the p values + let p = nplr.p.clone().insert_axis(Axis(1)); + let pc = dplr.p.clone().insert_axis(Axis(1)); + // compute the expected values for NPLR + let a = v.dot(&lambda).dot(&vc) - &p.dot(&p.t()); + // compute the expected values for DPLR + let b = { + let tmp = lambda - pc.dot(&pc.conj().t()); + v.dot(&tmp).dot(&vc) + }; + + let err_nplr = { + let tmp = (&a - &hippo).mapv(|i| i.abs()); + tmp.mean().unwrap() + }; + assert!( + err_nplr <= 1e-4, + "Actual: {:?}\nTolerance: {:?}", + err_nplr, + 1e-4 + ); + let err_dplr = { + let tmp = (&b - &hippo).mapv(|i| i.abs()); + println!("{:?}", &tmp); + tmp.mean().unwrap() + // tmp + }; + assert!( + err_dplr <= 1e-4, + "Actual: {:?}\nTolerance: {:?}", + err_dplr, + 1e-4 + ); + } + + // #[test] + // fn test_nplr() { + // let features = 10; + + // let a = HiPPO::::new(features).nplr(); + // let b = NPLR::::new(features); + // assert_eq!(&a, &b); + // } +} diff --git a/ml/s4/src/hippo/nplr.rs b/ml/s4/src/hippo/nplr.rs new file mode 100644 index 00000000..771c88e2 --- /dev/null +++ b/ml/s4/src/hippo/nplr.rs @@ -0,0 +1,54 @@ +/* + Appellation: nplr + Contrib: FL03 +*/ +//! # Normal Plus Low Rank (NPLR) +//! +//! +use super::HiPPO; + +use crate::core::prelude::{genspace, SquareRoot}; +use ndarray::prelude::{Array1, Array2}; +use ndarray::ScalarOperand; +use num::traits::{Num, NumCast, Signed}; +use serde::{Deserialize, Serialize}; + +fn nplr(features: usize) -> (Array2, Array1, Array1) +where + T: Num + NumCast + ScalarOperand + Signed + SquareRoot, +{ + let hippo = HiPPO::::new(features); + + let base = genspace::(features); + let p = (&base + (T::one() / T::from(2).unwrap())).mapv(T::sqrt); + let b = (&base * T::from(2).unwrap() + T::one()).mapv(T::sqrt); + (hippo.into(), p, b) +} + +#[derive(Clone, Debug, Default, Deserialize, Eq, PartialEq, Serialize)] +pub struct NPLR { + pub a: Array2, + pub p: Array1, + pub b: Array1, +} + +impl NPLR +where + T: NumCast + ScalarOperand + Signed + SquareRoot, +{ + pub fn new(features: usize) -> Self { + nplr(features).into() + } +} + +impl From> for (Array2, Array1, Array1) { + fn from(nplr: NPLR) -> Self { + (nplr.a, nplr.p, nplr.b) + } +} + +impl From<(Array2, Array1, Array1)> for NPLR { + fn from((a, p, b): (Array2, Array1, Array1)) -> Self { + Self { a, p, b } + } +} diff --git a/ml/s4/src/lib.rs b/ml/s4/src/lib.rs index 349dd913..0142d999 100644 --- a/ml/s4/src/lib.rs +++ b/ml/s4/src/lib.rs @@ -13,12 +13,13 @@ pub(crate) mod specs; pub(crate) mod utils; pub mod cmp; -pub mod dplr; +pub mod hippo; pub mod ops; pub mod params; pub mod ssm; pub(crate) use concision_core as core; +pub(crate) use concision_data as data; pub(crate) use concision_neural as neural; pub mod prelude { diff --git a/ml/s4/src/model/model.rs b/ml/s4/src/model/model.rs index 2c043593..1f97adc9 100644 --- a/ml/s4/src/model/model.rs +++ b/ml/s4/src/model/model.rs @@ -8,12 +8,19 @@ use crate::prelude::SSMStore; use ndarray::prelude::{Array1, Array2, NdFloat}; use ndarray_conv::{Conv2DFftExt, PaddingMode, PaddingSize}; use ndarray_linalg::Scalar; +use num::complex::ComplexFloat; use num::Float; -// use num::complex::{Complex, ComplexFloat}; use rustfft::FftNum; - +// use std::collections::HashMap; use crate::prelude::SSMParams::*; +pub struct S4State +where + T: ComplexFloat, +{ + cache: Array1, +} + pub struct S4 where T: Float, @@ -61,7 +68,7 @@ impl S4 where T: Float, { - pub fn setup(mut self) -> Self { + pub fn setup(self) -> Self { self } } diff --git a/ml/s4/src/ops/convolve.rs b/ml/s4/src/ops/convolve.rs index 2ab15635..87f80a25 100644 --- a/ml/s4/src/ops/convolve.rs +++ b/ml/s4/src/ops/convolve.rs @@ -2,21 +2,108 @@ Appellation: convolve Contrib: FL03 */ -use crate::prelude::powmat; -use ndarray::prelude::{s, Array2, Axis, NdFloat}; +// use crate::core::ops::fft::{rfft, irfft, FftPlan}; +use crate::core::prelude::{floor_div, pad, Power}; +use crate::prelude::{irfft, rfft}; +use ndarray::linalg::Dot; +use ndarray::prelude::{array, s, Array, Array1, Array2, Axis}; +use ndarray::ScalarOperand; +use num::complex::{Complex, ComplexFloat}; +use num::traits::{Float, FloatConst, Num, NumAssignOps}; +use rustfft::FftNum; -pub fn convolve() {} - -pub fn k_convolve(a: &Array2, b: &Array2, c: &Array2, l: usize) -> Array2 +/// Generates a large convolution kernal +pub fn k_conv(a: &Array2, b: &Array2, c: &Array2, l: usize) -> Array1 where - T: NdFloat, + T: Num + ScalarOperand, + Array2: Dot, Output = Array2>, { - let b = b.clone().remove_axis(Axis(1)); - let mut res = Array2::::zeros((l, a.shape()[0])); + let f = |i: usize| c.dot(&a.pow(i).dot(b)); + + let mut store = Vec::new(); for i in 0..l { - let tmp = powmat(a, i); - let out = c.dot(&tmp.dot(&b)); - res.slice_mut(s![i, ..]).assign(&out); + store.extend(f(i)); + } + Array::from_vec(store) +} + +// pub fn casual_convolution(u: &Array1, k: &Array1) -> Array1 +// where +// T: Float + FloatConst, +// Complex: ComplexFloat + NumAssignOps, +// { +// assert!(u.shape()[0] == k.shape()[0]); +// let l = u.shape()[0]; +// let plan = FftPlan::new(l); +// let ud = rfft::(u.clone().into_raw_vec(), &plan); +// let kd = rfft::(k.clone().into_raw_vec(), &plan); + +// let ud = Array::from_vec(ud); +// let kd = Array::from_vec(kd); + +// let tmp = ud * kd; +// let res = irfft(tmp.into_raw_vec().as_slice(), &plan); +// Array::from_vec(res) +// } + +pub fn casual_convolution(u: &Array1, k: &Array1) -> Array1 +where + T: Default + FftNum + Float + FloatConst, + Complex: ComplexFloat + NumAssignOps, +{ + assert!(u.shape()[0] == k.shape()[0]); + let l = u.shape()[0]; + println!("{:?}", l); + let l2 = l * 2; + let inv_size = floor_div(l, 2) + 1; + let ud = { + let padded = pad(u.clone(), k.len(), Some(T::zero())); + Array::from_vec(rfft::(padded)) + }; + + let kd = { + let padded = pad(k.clone(), l, Some(T::zero())); + Array::from_vec(rfft::(padded)) + }; + + let mut tmp = &ud * kd; + // let a = array![Complex::new(T::zero(), T::zero())]; + // tmp.append(Axis(0), a.view()).expect(""); + let res = irfft(tmp, l2); + // let res = irfft(tmp.slice(s![0..l]).to_vec(), l2); + Array::from_vec(res[0..l].to_vec()) +} + +pub struct Filter {} + +#[cfg(test)] +mod tests { + use super::*; + use crate::core::ops::fft::*; + use crate::core::prelude::{arange, assert_approx}; + + use lazy_static::lazy_static; + use ndarray::prelude::*; + + const _FEATURES: usize = 4; + const SAMPLES: usize = 8; + + lazy_static! { + static ref EXP: Array1 = + array![-7.10542736e-15, 0.0, 1.0, 4.0, 1.0e1, 2.0e1, 3.5e1, 5.6e1]; + static ref EXP2: Array1 = + array![0.0, -7.10542736e-15, 1.0, 4.0, 1.0e1, 2.0e1, 3.5e1, 5.6e1]; + } + + // #[ignore] + #[test] + fn test_casual_convolution() { + let u = Array::range(0.0, SAMPLES as f64, 1.0); + let k = Array::range(0.0, SAMPLES as f64, 1.0); + + let res = casual_convolution(&u, &k); + for (i, j) in res.into_iter().zip(EXP2.clone().into_iter()) { + assert_approx(i, j, 1e-8); + } } - res } diff --git a/ml/s4/src/ops/discretize.rs b/ml/s4/src/ops/discretize.rs index 922b5d24..5f7e9663 100644 --- a/ml/s4/src/ops/discretize.rs +++ b/ml/s4/src/ops/discretize.rs @@ -2,81 +2,81 @@ Appellation: discretize Contrib: FL03 */ -use crate::core::prelude::Conjugate; -use crate::prelude::powmat; +use crate::core::prelude::{Conjugate, Power}; -use ndarray::{Array2, ScalarOperand}; +use ndarray::{Array, Array1, Array2, Axis, ScalarOperand}; use ndarray_linalg::{Inverse, Lapack, Scalar}; -use num::Float; +use num::complex::ComplexFloat; +use num::traits::{Float, NumOps}; -pub fn discretize( +pub fn discretize( a: &Array2, b: &Array2, c: &Array2, - step: T, -) -> anyhow::Result<(Array2, Array2, Array2)> + step: S, +) -> anyhow::Result> where - T: Lapack + Scalar + ScalarOperand, + S: Scalar + ScalarOperand + NumOps, + T: ComplexFloat + Lapack + NumOps, { - let ss = step / T::from(2).unwrap(); // half step - let eye = Array2::::eye(a.shape()[0]); + let (n, ..) = a.dim(); + let hs = step / S::from(2).unwrap(); // half step + let eye = Array2::::eye(n); - let be = (&eye - a * ss).inv().expect("Could not invert matrix"); + let bl = (&eye - a * hs).inv()?; - let ab = be.dot(&(&eye + a * ss)); - let bb = (b * ss).dot(&b.t()); + let ab = bl.dot(&(&eye + a * hs)); + let bb = (bl * step).dot(b); - Ok((ab, bb, c.clone())) + Ok((ab, bb, c.clone()).into()) } -pub fn discretize_dplr( - lambda: &Array2, - p: &Array2, - q: &Array2, - b: &Array2, - c: &Array2, +pub fn discretize_dplr( + lambda: &Array1, + p: &Array1, + q: &Array1, + b: &Array1, + c: &Array1, step: T, l: usize, -) -> anyhow::Result<(Array2, Array2, Array2)> +) -> anyhow::Result> where - T: Conjugate + Float + Lapack + Scalar + ScalarOperand, + T: Float + Conjugate + Lapack + NumOps + Scalar + ScalarOperand, + S: ComplexFloat + Conjugate + Lapack + NumOps, { - let (n, _m) = lambda.dim(); - - let eye = Array2::::eye(n); - let ss = T::from(2).unwrap() * step.recip(); - - let a = { - let tmp = Array2::from_diag(&lambda.diag()); - tmp - &p.dot(&q.conj().t()) - }; - - let a0 = &eye * ss + &a; - - let d = { - let tmp = lambda.mapv(|i| (ss - i).recip()); - Array2::from_diag(&tmp.diag()) - }; - - let qc = { - let tmp = q.conj(); - tmp.t().to_owned() - }; - let p2 = p.clone(); - + let n = lambda.dim(); + // create an identity matrix; (n, n) + let eye = Array2::::eye(n); + // compute the step size + let hs = T::from(2).unwrap() / step; + // turn the parameters into two-dimensional matricies + let b2 = b.clone().insert_axis(Axis(1)); + + let c2 = c.clone().insert_axis(Axis(0)); + + let p2 = p.clone().insert_axis(Axis(1)); + // compute the conjugate transpose of q + let qct = q.clone().conj().t().to_owned().insert_axis(Axis(0)); + // create a diagonal matrix D from the scaled eigenvalues: Dim(n, n) :: 1 / (step_size - value) + let d = Array::from_diag(&lambda.mapv(|i| (hs - i).recip())); + + // create a diagonal matrix from the eigenvalues + let a = Array::from_diag(&lambda) - &p2.dot(&q.clone().insert_axis(Axis(1)).conj().t()); + // compute A0 + let a0 = &eye * hs + &a; + // compute A1 let a1 = { - let tmp = qc.dot(&d.dot(&p2)).mapv(|i| (T::one() + i).recip()); - &d - &d.dot(&p2) * &tmp * &qc.dot(&d) + let tmp = qct.dot(&d.dot(&p2)).mapv(|i| (T::one() + i).recip()); + &d - (&d.dot(&p2) * tmp * &qct.dot(&d)) }; - - let ab = a0.dot(&a1); - let bb = a1.dot(b) * T::from(2).unwrap(); - let cb = { - let tmp = (&eye - powmat(&ab, l)).inv()?.conj(); - c.dot(&tmp) - }; - - Ok((ab, bb, cb.conj())) + // compute a-bar + let ab = a1.dot(&a0); + // compute b-bar + let bb = a1.dot(&b2) * T::from(2).unwrap(); + // compute c-bar + let cb = c2.dot(&(&eye - &ab.pow(l)).inv()?.conj()); + // return the discretized system + Ok((ab, bb, cb.conj()).into()) } pub trait Discretize @@ -88,7 +88,60 @@ where fn discretize(&self, step: T) -> Self::Output; } -pub enum DiscretizeArgs {} +#[derive(Clone, Debug)] +pub struct Discrete { + pub a: Array2, + pub b: Array2, + pub c: Array2, +} + +impl Discrete { + pub fn new(a: Array2, b: Array2, c: Array2) -> Self { + Self { a, b, c } + } + + pub fn from_features(features: usize) -> Self + where + T: Default, + { + let a = Array2::::default((features, features)); + let b = Array2::::default((features, 1)); + let c = Array2::::default((1, features)); + Self::new(a, b, c) + } +} + +impl Discrete { + pub fn discretize(&self, step: S) -> anyhow::Result + where + S: Scalar + ScalarOperand + NumOps, + T: ComplexFloat + Lapack + NumOps, + { + discretize(&self.a, &self.b, &self.c, step) + } +} + +impl From<(Array2, Array2, Array2)> for Discrete { + fn from((a, b, c): (Array2, Array2, Array2)) -> Self { + Self::new(a, b, c) + } +} + +impl From> for (Array2, Array2, Array2) { + fn from(discrete: Discrete) -> Self { + (discrete.a, discrete.b, discrete.c) + } +} + +pub enum DiscretizeArgs { + DPLR { + lambda: Array1, + p: Array1, + q: Array1, + b: Array1, + c: Array1, + }, +} pub struct Discretizer { pub step: T, diff --git a/ml/s4/src/ops/mod.rs b/ml/s4/src/ops/mod.rs index 3e9ca368..09a6e2eb 100644 --- a/ml/s4/src/ops/mod.rs +++ b/ml/s4/src/ops/mod.rs @@ -10,4 +10,29 @@ pub(crate) mod gen; pub(crate) mod scan; #[cfg(test)] -mod tests {} +mod tests { + use super::*; + use crate::core::prelude::randc_normal; + use crate::hippo::dplr::DPLR; + + const FEATURES: usize = 8; + const RNGKEY: u64 = 1; + const SAMPLES: usize = 16; + + + #[test] + fn test_discretize() { + let step = (SAMPLES as f64).recip(); + + let c = randc_normal(RNGKEY, FEATURES); + + let dplr = DPLR::::new(FEATURES); + let (lambda, p, b, _) = dplr.clone().into(); + + let _discrete = { + let tmp = discretize_dplr(&lambda, &p, &p, &b, &c, step, SAMPLES); + assert!(tmp.is_ok(), "Error: {:?}", tmp.err()); + tmp.unwrap() + }; + } +} diff --git a/ml/s4/src/ops/scan.rs b/ml/s4/src/ops/scan.rs index 9454044a..601dc810 100644 --- a/ml/s4/src/ops/scan.rs +++ b/ml/s4/src/ops/scan.rs @@ -3,17 +3,42 @@ Contrib: FL03 */ use crate::params::SSMStore; - +use ndarray::prelude::{Array1, Array2, ArrayView1,}; +use ndarray_linalg::error::LinalgError; +use ndarray_linalg::{vstack, Scalar}; use num::Float; -pub struct Scan<'a, T = f64> +pub fn scan_ssm( + a: &Array2, + b: &Array2, + c: &Array2, + u: &Array2, + x0: &Array1, +) -> Result, LinalgError> +where + T: Scalar, +{ + let step = |xs: &mut Array1, us: ArrayView1| { + let x1 = a.dot(xs) + b.dot(&us); + let y1 = c.dot(&x1); + *xs = x1; + Some(y1) + }; + let scan: Vec> = u + .outer_iter() + .scan(x0.clone(), step) + .collect(); + vstack(scan.as_slice()) +} + +pub struct Scanner<'a, T = f64> where T: Float, { model: &'a mut SSMStore, } -impl<'a, T> Scan<'a, T> +impl<'a, T> Scanner<'a, T> where T: Float, { @@ -29,3 +54,33 @@ where self.model } } + +#[cfg(test)] +mod tests { + use super::*; + + use ndarray::prelude::*; + + const FEATURES: usize = 3; + + + #[test] + fn test_scan() { + let exp = array![[0.0], [5.0], [70.0]]; + + let u = Array::range(0.0, FEATURES as f64, 1.0).insert_axis(Axis(1)); + let x0 = Array1::zeros(FEATURES); // Array1::>::zeros(FEATURES) + + let a = Array::range(0.0, (FEATURES * FEATURES) as f64, 1.0) + .into_shape((FEATURES, FEATURES)) + .unwrap(); + let b = Array::range(0.0, FEATURES as f64, 1.0).insert_axis(Axis(1)); + let c = Array::range(0.0, FEATURES as f64, 1.0).insert_axis(Axis(0)); + + + let scan = scan_ssm(&a, &b, &c, &u, &x0).expect(""); + + assert_eq!(&scan, &exp); + } + +} \ No newline at end of file diff --git a/ml/s4/src/params/dplr.rs b/ml/s4/src/params/dplr.rs new file mode 100644 index 00000000..3f8c8db2 --- /dev/null +++ b/ml/s4/src/params/dplr.rs @@ -0,0 +1,42 @@ +/* + Appellation: kernel + Contrib: FL03 +*/ +use ndarray::prelude::Array1; + +pub struct DPLRParams { + pub lambda: Array1, + pub p: Array1, + pub q: Array1, + pub b: Array1, + pub c: Array1, +} + +impl DPLRParams { + pub fn new(lambda: Array1, p: Array1, q: Array1, b: Array1, c: Array1) -> Self { + Self { lambda, p, q, b, c } + } +} + +// impl DPLRParams +// where +// T: ComplexFloat, +// ::Real: NumOps + NumOps::Real>, Complex<::Real>>, +// Complex<::Real>: NumOps + NumOps<::Real, Complex<::Real>> +// { +// pub fn kernel(&self, step: T, l: usize) -> Array1<::Real> { +// let lt = T::from(l).unwrap(); +// let omega_l = { +// let f = | i: usize | -> Complex<::Real> { +// Complex::<::Real>::i().neg() * ::Real::from(i).unwrap() * ::Real::PI() / lt +// }; +// Array::from_iter((0..l).map(f)) +// }; +// } +// } + +impl From<(Array1, Array1, Array1, Array1, Array1)> for DPLRParams { + fn from((lambda, p, q, b, c): (Array1, Array1, Array1, Array1, Array1)) -> Self { + Self::new(lambda, p, q, b, c) + } +} diff --git a/ml/s4/src/params/kinds.rs b/ml/s4/src/params/kinds.rs index 490e71d8..300a192c 100644 --- a/ml/s4/src/params/kinds.rs +++ b/ml/s4/src/params/kinds.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -16,13 +16,13 @@ use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, EnumVariantNames}; EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] @@ -75,18 +75,18 @@ impl From for SSMParams { EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] -pub enum DPLRParams { +pub enum DPLRParam { #[default] Lambda = 0, P = 1, diff --git a/ml/s4/src/params/mod.rs b/ml/s4/src/params/mod.rs index 249db5f1..13ed430d 100644 --- a/ml/s4/src/params/mod.rs +++ b/ml/s4/src/params/mod.rs @@ -1,13 +1,14 @@ /* - Appellation: store + Appellation: params Contrib: FL03 */ -pub use self::{kinds::*, store::*}; +pub use self::{dplr::*, kinds::*, store::*}; +pub(crate) mod dplr; pub(crate) mod kinds; pub(crate) mod store; -use ndarray::prelude::{Array, Array1, Array2, Ix2}; +use ndarray::prelude::{Array, Array2, Ix2}; use num::Num; use std::collections::HashMap; diff --git a/ml/s4/src/params/store.rs b/ml/s4/src/params/store.rs index 25d21608..a4380f75 100644 --- a/ml/s4/src/params/store.rs +++ b/ml/s4/src/params/store.rs @@ -27,21 +27,6 @@ impl SSMStore where T: Clone + Num, { - pub fn new(a: Array2, b: Array2, c: Array2, d: Array2) -> Self { - Self { a, b, c, d } - } - - pub fn from_features(features: usize) -> Self - where - T: Default, - { - let a = Array2::::default((features, features)); - let b = Array2::::default((features, 1)); - let c = Array2::::default((1, features)); - let d = Array2::::default((1, 1)); - Self::new(a, b, c, d) - } - pub fn ones(features: usize) -> Self { let a = Array2::::ones((features, features)); let b = Array2::::ones((features, 1)); @@ -57,6 +42,23 @@ where let d = Array2::::zeros((1, 1)); Self::new(a, b, c, d) } +} + +impl SSMStore { + pub fn new(a: Array2, b: Array2, c: Array2, d: Array2) -> Self { + Self { a, b, c, d } + } + + pub fn from_features(features: usize) -> Self + where + T: Default, + { + let a = Array2::::default((features, features)); + let b = Array2::::default((features, 1)); + let c = Array2::::default((1, features)); + let d = Array2::::default((1, 1)); + Self::new(a, b, c, d) + } pub fn a(&self) -> &Array2 { &self.a @@ -97,8 +99,9 @@ where { pub fn scan(&self, u: &Array2, x0: &Array1) -> Result, LinalgError> { let step = |xs: &mut Array1, us: ArrayView1| { - let x1 = self.a().dot(xs) + self.b().t().dot(&us); - let y1 = self.c().dot(&x1.t()); + let x1 = self.a().dot(xs) + self.b().dot(&us); + let y1 = self.c().dot(&x1); + *xs = x1; Some(y1) }; vstack( @@ -132,10 +135,7 @@ where } } -impl ops::Index for SSMStore -where - T: Float, -{ +impl ops::Index for SSMStore { type Output = Array2; fn index(&self, index: SSMParams) -> &Self::Output { @@ -149,10 +149,7 @@ where } } -impl ops::IndexMut for SSMStore -where - T: Float, -{ +impl ops::IndexMut for SSMStore { fn index_mut(&mut self, index: SSMParams) -> &mut Self::Output { use SSMParams::*; match index { @@ -164,51 +161,33 @@ where } } -impl From> for (Array2, Array2, Array2, Array2) -where - T: Float, -{ +impl From> for (Array2, Array2, Array2, Array2) { fn from(store: SSMStore) -> Self { (store.a, store.b, store.c, store.d) } } -impl<'a, T> From<&'a SSMStore> for (&'a Array2, &'a Array2, &'a Array2, &'a Array2) -where - T: Float, -{ +impl<'a, T> From<&'a SSMStore> for (&'a Array2, &'a Array2, &'a Array2, &'a Array2) { fn from(store: &'a SSMStore) -> Self { (&store.a, &store.b, &store.c, &store.d) } } -impl From<(Array2, Array2, Array2, Array2)> for SSMStore -where - T: Float, -{ +impl From<(Array2, Array2, Array2, Array2)> for SSMStore { fn from((a, b, c, d): (Array2, Array2, Array2, Array2)) -> Self { Self::new(a, b, c, d) } } -impl From> for HashMap> -where - T: Float, -{ +impl From> for HashMap> { fn from(store: SSMStore) -> Self { - let mut map = HashMap::new(); - - map.insert(SSMParams::A, store.a); - map.insert(SSMParams::B, store.b); - map.insert(SSMParams::C, store.c); - map.insert(SSMParams::D, store.d); - map + HashMap::from_iter(store.into_iter()) } } impl FromIterator<(SSMParams, Array2)> for SSMStore where - T: Default + Float, + T: Clone + Default, { fn from_iter)>>(iter: I) -> Self { let tmp = HashMap::>::from_iter(iter); @@ -236,10 +215,7 @@ where } } -impl IntoIterator for SSMStore -where - T: Float, -{ +impl IntoIterator for SSMStore { type Item = (SSMParams, Array2); type IntoIter = std::collections::hash_map::IntoIter>; diff --git a/ml/s4/src/specs.rs b/ml/s4/src/specs.rs index 0c990b8d..e3a5e10c 100644 --- a/ml/s4/src/specs.rs +++ b/ml/s4/src/specs.rs @@ -2,6 +2,10 @@ Appellation: specs Contrib: FL03 */ +use crate::core::prelude::AsComplex; +use ndarray::prelude::{Array, Dimension}; +use num::complex::ComplexFloat; +use rustfft::{FftNum, FftPlanner}; pub trait Scan { type Output; @@ -9,8 +13,42 @@ pub trait Scan { fn scan(&self, args: &T, initial_state: &S) -> Self::Output; } -pub trait StateSpace { - type Config; +pub trait NdFft { + type Output; + + fn fft(&self, args: &Self) -> Self::Output; + + fn ifft(&self, args: &Self) -> Self::Output; +} + +impl NdFft for Array +where + D: Dimension, + T: AsComplex + ComplexFloat + FftNum, +{ + type Output = Self; + + fn fft(&self, args: &Self) -> Self::Output { + let mut buffer = vec![T::zero().as_re(); args.len()]; + let mut planner = FftPlanner::new(); + let fft = planner.plan_fft_forward(args.len()); + fft.process(buffer.as_mut_slice()); + let buffer = buffer + .into_iter() + .map(|i| T::from(i).unwrap()) + .collect::>(); + Self::from_shape_vec(args.dim(), buffer).expect("") + } - fn config(&self) -> &Self::Config; + fn ifft(&self, args: &Self) -> Self::Output { + let mut buffer = vec![T::zero().as_re(); args.len()]; + let mut planner = FftPlanner::new(); + let fft = planner.plan_fft_inverse(args.len()); + fft.process(buffer.as_mut_slice()); + let buffer = buffer + .into_iter() + .map(|i| T::from(i).unwrap()) + .collect::>(); + Self::from_shape_vec(args.dim(), buffer).expect("") + } } diff --git a/ml/s4/src/ssm/mod.rs b/ml/s4/src/ssm/mod.rs index 7a2dec3b..c94a7353 100644 --- a/ml/s4/src/ssm/mod.rs +++ b/ml/s4/src/ssm/mod.rs @@ -25,7 +25,7 @@ mod tests { let step = 0.001; let config = SSMConfig::new(true, 9, 2); - let model = SSM::::create(config).setup(); - assert!(model.discretize(step).is_ok()); + // let model = SSM::::create(config).setup(); + // assert!(model.discretize(step).is_ok()); } } diff --git a/ml/s4/src/ssm/model.rs b/ml/s4/src/ssm/model.rs index 094e6cbc..84108292 100644 --- a/ml/s4/src/ssm/model.rs +++ b/ml/s4/src/ssm/model.rs @@ -4,81 +4,34 @@ */ use super::SSMConfig; use crate::neural::Forward; +use crate::ops::Discrete; use crate::params::{SSMParams::*, SSMStore}; -use crate::prelude::{discretize, k_convolve}; -use ndarray::prelude::{Array1, Array2, NdFloat}; +use crate::prelude::{discretize, k_conv}; +use ndarray::prelude::{Array1, Array2, Axis, NdFloat}; +use ndarray::ScalarOperand; use ndarray_conv::{Conv2DFftExt, PaddingMode, PaddingSize}; use ndarray_linalg::{Lapack, Scalar}; -use num::Float; +use num::complex::ComplexFloat; +use num::traits::{Float, FloatConst, Num, NumOps}; use rustfft::FftNum; -#[derive(Clone, Debug)] -pub struct Discrete { - pub a: Array2, - pub b: Array2, - pub c: Array2, -} - -impl Discrete -where - T: Float, -{ - pub fn new(a: Array2, b: Array2, c: Array2) -> Self { - Self { a, b, c } - } - - pub fn from_features(features: usize) -> Self - where - T: Default, - { - let a = Array2::::eye(features); - let b = Array2::::zeros((features, 1)); - let c = Array2::::zeros((features, features)); - Self { a, b, c } - } -} - -impl From<(Array2, Array2, Array2)> for Discrete -where - T: Float, -{ - fn from((a, b, c): (Array2, Array2, Array2)) -> Self { - Self { a, b, c } - } -} - -impl From> for (Array2, Array2, Array2) -where - T: Float, -{ - fn from(discrete: Discrete) -> Self { - (discrete.a, discrete.b, discrete.c) - } -} - -pub struct SSM -where - T: Float, -{ +pub struct SSM { cache: Array1, config: SSMConfig, - kernel: Array2, + kernel: Array1, params: SSMStore, ssm: Discrete, } -impl SSM -where - T: Float, -{ +impl SSM { pub fn create(config: SSMConfig) -> Self where - T: Default, + T: Clone + Default, { let features = config.features(); - let cache = Array1::::zeros(features); - let kernel = Array2::::zeros((features, features)); + let cache = Array1::::default(features); + let kernel = Array1::::default(features); let params = SSMStore::from_features(features); Self { cache, @@ -97,11 +50,20 @@ where &mut self.config } - pub fn kernel(&self) -> &Array2 { + pub fn discretize(&self, step: S) -> anyhow::Result> + where + S: Scalar + ScalarOperand + NumOps, + T: ComplexFloat + Lapack + NumOps, + { + let discrete = discretize(&self.params[A], &self.params[B], &self.params[C], step)?; + Ok(discrete.into()) + } + + pub fn kernel(&self) -> &Array1 { &self.kernel } - pub fn kernel_mut(&mut self) -> &mut Array2 { + pub fn kernel_mut(&mut self) -> &mut Array1 { &mut self.kernel } @@ -114,71 +76,62 @@ where } } -impl SSM -where - T: Lapack + NdFloat + Scalar, -{ - pub fn setup(mut self) -> Self { - self.kernel = self.gen_filter(); - - self.ssm = self.discretize(self.config().step_size()).expect(""); - self - } -} - -impl SSM -where - T: NdFloat + Lapack + Scalar, -{ - pub fn scan( - &self, - u: &Array2, - x0: &Array1, - ) -> Result, ndarray_linalg::error::LinalgError> { - self.params.scan(u, x0) - } - - pub fn conv(&self, u: &Array2) -> anyhow::Result> - where - T: FftNum, - { - let mode = PaddingMode::<2, T>::Const(T::zero()); - let size = PaddingSize::Full; - if let Some(res) = u.conv_2d_fft(&self.kernel, size, mode) { - Ok(res) - } else { - Err(anyhow::anyhow!("convolution failed")) - } - } - - pub fn discretize(&self, step: T) -> anyhow::Result> { - let discrete = discretize(&self.params[A], &self.params[B], &self.params[C], step)?; - Ok(discrete.into()) - } - - pub fn gen_filter(&self) -> Array2 { - k_convolve( - &self.params[A], - &self.params[B], - &self.params[C], - self.config().samples(), - ) - } -} - -impl Forward> for SSM -where - T: FftNum + Lapack + NdFloat + Scalar, -{ - type Output = anyhow::Result>; - - fn forward(&self, args: &Array2) -> Self::Output { - let res = if !self.config().decode() { - self.conv(args)? - } else { - self.scan(args, &self.cache)? - }; - let pred = res + args * &self.params[D]; - Ok(pred) - } -} +// impl SSM +// where +// T: ComplexFloat + Lapack + NumOps<::Real> + Scalar, +// ::Real: ScalarOperand + NumOps + NumOps, +// { +// pub fn setup(mut self) -> Self { +// self.kernel = self.gen_filter(); + +// self.ssm = self.discretize(self.config().step_size()).expect(""); +// self +// } + +// pub fn scan( +// &self, +// u: &Array2, +// x0: &Array1, +// ) -> Result, ndarray_linalg::error::LinalgError> { +// self.params.scan(u, x0) +// } + +// pub fn conv(&self, u: &Array2) -> anyhow::Result> +// where +// T: FftNum, +// { +// let mode = PaddingMode::<2, T>::Const(T::zero()); +// let size = PaddingSize::Full; +// if let Some(res) = u.conv_2d_fft(&self.kernel.clone().insert_axis(Axis(1)), size, mode) { +// Ok(res) +// } else { +// Err(anyhow::anyhow!("convolution failed")) +// } +// } + +// pub fn gen_filter(&self) -> Array1 { +// k_conv( +// &self.params[A], +// &self.params[B], +// &self.params[C], +// self.config().samples(), +// ) +// } +// } + +// impl Forward> for SSM +// where +// T: FftNum + Lapack + NdFloat + Scalar, +// { +// type Output = anyhow::Result>; + +// fn forward(&self, args: &Array2) -> Self::Output { +// let res = if !self.config().decode() { +// self.conv(args)? +// } else { +// self.scan(args, &self.cache)? +// }; +// let pred = res + args * &self.params[D]; +// Ok(pred) +// } +// } diff --git a/ml/s4/src/utils.rs b/ml/s4/src/utils.rs index 09ae480b..0930f85f 100644 --- a/ml/s4/src/utils.rs +++ b/ml/s4/src/utils.rs @@ -2,112 +2,28 @@ Appellation: utils Contrib: FL03 */ -use crate::core::prelude::{AsComplex, Conjugate}; +pub use self::fft::*; + use ndarray::prelude::*; use ndarray::{IntoDimension, ScalarOperand}; -use ndarray_linalg::Scalar; use ndarray_rand::rand_distr::uniform::SampleUniform; -use ndarray_rand::rand_distr::{Distribution, StandardNormal, Uniform}; +use ndarray_rand::rand_distr::{Distribution, Uniform}; use ndarray_rand::RandomExt; -use num::complex::{Complex, ComplexFloat}; -use num::traits::float::{Float, FloatConst, FloatCore}; -use num::{Num, Signed}; -use rustfft::{FftNum, FftPlanner}; - -pub fn stdnorm(shape: impl IntoDimension) -> Array -where - D: Dimension, - StandardNormal: Distribution, -{ - Array::random(shape, StandardNormal) -} +use num::complex::{Complex, ComplexDistribution,}; +use num::traits::Num; +use std::ops::Neg; -pub fn randcomplex(shape: impl IntoDimension) -> Array, D> +/// +pub fn cauchy(a: &Array, b: &Array, c: &Array) -> Array where - D: Dimension, - T: ComplexFloat, - StandardNormal: Distribution, -{ - let dim = shape.into_dimension(); - let re = Array::random(dim.clone(), StandardNormal); - let im = Array::random(dim.clone(), StandardNormal); - let mut res = Array::zeros(dim); - ndarray::azip!((re in &re, im in &im, res in &mut res) { - *res = Complex::new(*re, *im); - }); - res -} - -pub fn cauchy(v: &Array, omega: &Array, lambda: &Array) -> Array -where - D: Dimension, - T: Clone + Num + ScalarOperand + Signed, + A: Dimension, + B: Dimension, + T: Num + Neg + ScalarOperand, { - let cdot = |b: T| (v / (lambda * T::one().neg() + b)).sum(); - omega.mapv(cdot) + let cdot = |b: T| (a / (c * T::one().neg() + b)).sum(); + b.mapv(cdot) } - -pub fn cauchy_complex( - v: &Array, - omega: &Array, - lambda: &Array, -) -> Array -where - D: Dimension, - S: Dimension, - T: ComplexFloat + ScalarOperand, -{ - let cdot = |b: T| (v / (lambda * T::one().neg() + b)).sum(); - omega.mapv(cdot) -} - -// pub fn eig_sym(args: &Array2) -> (Array1, Array2) { -// let sym = args.clone().into_nalgebra().symmetric_eigen(); -// ( -// sym.eigenvalues.into_ndarray1(), -// sym.eigenvectors.into_ndarray2(), -// ) -// } - -// pub fn eig_csym(args: &Array2>) -> (Array1, Array2>) { -// let sym = args.clone().into_nalgebra().symmetric_eigen(); -// let (eig, v) = (sym.eigenvalues, sym.eigenvectors); -// (eig.into_ndarray1(), v.into_ndarray2()) -// } - -// pub fn eigh(args: &Array2) -> (Array1, Array2) { -// let na = args.clone().into_nalgebra(); -// let sym = na.symmetric_eigen(); -// let v = sym.eigenvectors; -// let eig = sym.eigenvalues.into_ndarray1(); -// let eigval = v.into_ndarray2(); -// (eig, eigval) -// } - -pub fn powmat(a: &Array2, n: usize) -> Array2 -where - T: Float + 'static, -{ - if !a.is_square() { - panic!("Matrix must be square"); - } - let mut res = a.clone(); - for _ in 1..n { - res = res.dot(a); - } - res -} - -pub fn casual_colvolution(a: &Array2, b: &Array2) -> Array2 -where - T: FftNum, -{ - let mut planner = FftPlanner::::new(); - let fft = planner.plan_fft_forward(a.len()); - - a.clone() -} - +/// pub fn logstep(a: T, b: T, shape: impl IntoDimension) -> Array where D: Dimension, @@ -115,80 +31,52 @@ where { Array::random(shape, Uniform::new(a, b)) * (b.ln() - a.ln()) + a.ln() } - -pub fn logstep_init(a: T, b: T) -> impl Fn(D) -> Array +/// Generate a random array of complex numbers with real and imaginary parts in the range [0, 1) +pub fn randc(shape: impl IntoDimension) -> Array, D> where D: Dimension, - T: NdFloat + SampleUniform, + T: Distribution + Num, + ComplexDistribution: Distribution>, { - move |shape| logstep(a, b, shape) + let distr = ComplexDistribution::::new(T::one(), T::one()); + Array::random(shape, distr) } -pub fn scanner( - a: &Array2, - b: &Array2, - c: &Array2, - u: &Array2, - x0: &Array1, -) -> Array2 -where - T: NdFloat, -{ - let step = |xs: &mut Array1, us: ArrayView1| { - let x1 = a.dot(xs) + b.t().dot(&us); - let y1 = c.dot(&x1.t()); - Some(y1) - }; - let scan = u.outer_iter().scan(x0.clone(), step).collect::>(); - let shape = [scan.len(), scan[0].len()]; - let mut res = Array2::::zeros(shape.into_dimension()); - for (i, s) in scan.iter().enumerate() { - res.slice_mut(s![i, ..]).assign(s); +pub(crate) mod fft { + use num::{Complex, NumCast}; + use realfft::RealFftPlanner; + use rustfft::FftNum; + + pub fn rfft(args: impl IntoIterator) -> Vec> + where + T: FftNum, + { + let mut buffer = Vec::from_iter(args); + // make a planner + let mut real_planner = RealFftPlanner::::new(); + // create a FFT + let r2c = real_planner.plan_fft_forward(buffer.len()); + // make a vector for storing the spectrum + let mut spectrum = r2c.make_output_vec(); + // forward transform the signal + r2c.process(&mut buffer, &mut spectrum).unwrap(); + spectrum } - res -} - -pub fn kernel_dplr( - lambda: &Array2, - p: &Array2, - q: &Array2, - b: &Array2, - c: &Array2, - step: T, - l: usize, -) -> Array1::Real>> -where - T: AsComplex + ComplexFloat + Conjugate + FloatConst + Scalar + ScalarOperand, - ::Real: NdFloat + Num + Signed + num::FromPrimitive + num::Zero, - ::Complex: ComplexFloat, -{ - let omega_l = { - let f = |i: usize| { - T::from(i).unwrap() - * T::from(Complex::new(T::one(), -T::PI() / T::from(l).unwrap())).unwrap() - }; - Array::from_iter((0..l).map(f)) - }; - let aterm = (c.conj(), q.conj()); - let bterm = (b, p); - let two = T::from(2).unwrap(); - - let g = ((&omega_l * T::one().neg() + T::one()) / (&omega_l + T::one())) * (two / step); - let c = (&omega_l + T::one()).mapv(|i| two / i); - - let k00 = cauchy_complex(&(&aterm.0 * bterm.0), &g, lambda); - let k01 = cauchy_complex(&(&aterm.0 * bterm.1), &g, lambda); - let k10 = cauchy_complex(&(&aterm.1 * bterm.0), &g, lambda); - let k11 = cauchy_complex(&(&aterm.1 * bterm.1), &g, lambda); - - let at_roots = &c * (&k00 - k01 * (&k11 + T::one()).mapv(|i| T::one() / i) * &k10); - - let mut fft_planner = FftPlanner::new(); - let fft = fft_planner.plan_fft_inverse(l); - let mut buffer = at_roots - .mapv(|i| Complex::new(i.re(), i.im())) - .into_raw_vec(); - fft.process(buffer.as_mut_slice()); - Array::from_vec(buffer) + pub fn irfft(args: impl IntoIterator>, len: usize) -> Vec + where + T: FftNum + NumCast, + { + let mut buffer = Vec::from_iter(args); + // make a planner + let mut real_planner = RealFftPlanner::::new(); + // create a FFT + let r2c = real_planner.plan_fft_inverse(len); + // make a vector for storing the spectrum + let mut spectrum = r2c.make_output_vec(); + // forward transform the signal + r2c.process(&mut buffer, &mut spectrum).unwrap(); + let scale = T::one() / T::from(len).unwrap(); + spectrum.iter().cloned().map(|i| i * scale).collect() + } } diff --git a/ml/s4/tests/conversion.rs b/ml/s4/tests/conversion.rs new file mode 100644 index 00000000..2900fef8 --- /dev/null +++ b/ml/s4/tests/conversion.rs @@ -0,0 +1,59 @@ +#[cfg(test)] +extern crate concision_s4; + +use concision_core as core; +use concision_s4 as s4; +use s4::ops::scan_ssm; + + +use core::prelude::{assert_atol, randc_normal}; +use s4::prelude::{casual_convolution, discretize_dplr, k_conv, DPLRParams}; +use s4::cmp::kernel::kernel_dplr; +use s4::hippo::dplr::DPLR; + +use ndarray::prelude::*; +use ndarray_linalg::flatten; +use num::complex::{Complex, ComplexFloat}; + +const EPSILON: f64 = 1e-4; +const FEATURES: usize = 8; +const RNGKEY: u64 = 1; +const SAMPLES: usize = 16; + +#[test] +fn test_conversion() { + let step = (SAMPLES as f64).recip(); + // Initialize a new DPLR Matrix + let dplr = DPLR::::new(FEATURES); + let (lambda, p, b, _) = dplr.clone().into(); + + // let c = randcomplex(features); + let c = randc_normal(RNGKEY, FEATURES); + // CNN Form + let kernel = { + let params = + DPLRParams::new(lambda.clone(), p.clone(), p.clone(), b.clone(), c.clone()); + kernel_dplr::(¶ms, step, SAMPLES) + }; + // RNN Form + let discrete = discretize_dplr(&lambda, &p, &p, &b, &c, step, SAMPLES).expect(""); + let (ab, bb, cb) = discrete.into(); + + let k2 = k_conv(&ab, &bb, &cb, SAMPLES); + let k2r = k2.mapv(|i| i.re()); + + assert_atol(&kernel, &k2r, EPSILON); + + let u = Array::range(0.0, SAMPLES as f64, 1.0); + let u2 = u.mapv(|i| Complex::new(i, 0.0)).insert_axis(Axis(1)); + // Apply the CNN + let y1 = casual_convolution(&u, &kernel); + + // Apply the RNN + let x0 = Array::zeros(FEATURES); + let y2 = scan_ssm(&ab, &bb, &cb, &u2, &x0).expect("Failed to scan the SSM"); + let y2r = flatten(y2.mapv(|i| i.re())); + + assert_atol(&y1, &y2r, EPSILON) + +} \ No newline at end of file diff --git a/ml/s4/tests/dplr.rs b/ml/s4/tests/dplr.rs new file mode 100644 index 00000000..f6c9ddd4 --- /dev/null +++ b/ml/s4/tests/dplr.rs @@ -0,0 +1,84 @@ +#[cfg(test)] +extern crate concision_core; +extern crate concision_s4; + +use concision_core as core; +use concision_s4 as s4; + +use lazy_static::lazy_static; +use ndarray::prelude::*; +use ndarray_linalg::flatten; +use num::complex::{Complex, ComplexFloat}; + +use core::prelude::{seeded_uniform, AsComplex, Conjugate, Power}; +use s4::cmp::kernel::kernel_dplr; +use s4::hippo::dplr::DPLR; +use s4::ops::{discretize, k_conv}; +use s4::params::DPLRParams; + +const FEATURES: usize = 4; +const RNGKEY: u64 = 1; +const SAMPLES: usize = 16; + +lazy_static! { + static ref SEEDED_C: Array2 = seeded_uniform(RNGKEY, 0.0, 1.0, (1, FEATURES)); + static ref SAMPLE_C: Array2 = array![[0.02185547, 0.20907068, 0.23742378, 0.3723395]]; + static ref SAMPLE_IM: Array2> = SAMPLE_C.clone().mapv(AsComplex::as_re); +} + +#[test] +// #[ignore = "TODO: fix this test"] +fn test_gen_dplr() { + let eye = Array2::::eye(FEATURES); + + let step = (SAMPLES as f64).recip(); + + let dplr = DPLR::::new(FEATURES); + let (lambda, p, b, _v) = dplr.into(); + + println!("{:?}", &p); + + let b2 = b.clone().insert_axis(Axis(1)); + + let p2 = p.clone().insert_axis(Axis(1)); + + let a = Array::from_diag(&lambda) - p2.dot(&p2.conj().t()); + + // let c = { + // let tmp = seeded_uniform(RNGKEY, 0.0, 1.0, (1, features)); + // println!("C:\n\n{:#?}\n", &tmp); + // tmp.mapv(AsComplex::as_re) + // }; + let c = { + let tmp = array![[0.02185547, 0.20907068, 0.23742378, 0.3723395]]; + println!("C:\n\n{:#?}\n", &tmp); + tmp.mapv(AsComplex::as_re) + }; + + // TODO: figure out why several of the signs are wrong + let discrete = { + let tmp = discretize(&a, &b2, &c, step); + assert!(tmp.is_ok(), "discretize failed: {:?}", tmp.err().unwrap()); + tmp.unwrap() + }; + + let (ab, bb, cb) = discrete.into(); + // + let ak = k_conv(&ab, &bb, &cb.conj(), SAMPLES); + // + let cc = (&eye - ab.pow(SAMPLES)).conj().t().dot(&flatten(cb)); + // + let params = DPLRParams::new(lambda, p.clone(), p.clone(), b.clone(), cc); + // + let kernal = kernel_dplr::(¶ms, step, SAMPLES); + println!("Kernal: {:?}", kernal.shape()); + + let a_real = ak.mapv(|i| i.re()); + let err = (&a_real - &kernal).mapv(|i| i.abs()); + assert!( + err.mean().unwrap() <= 1e-4, + "Error: {:?}\nTolerance: {:?}", + err.mean().unwrap(), + 1e-4 + ); +} diff --git a/ml/s4/tests/utils.rs b/ml/s4/tests/utils.rs deleted file mode 100644 index 8876b845..00000000 --- a/ml/s4/tests/utils.rs +++ /dev/null @@ -1,22 +0,0 @@ -#[cfg(test)] -extern crate concision_s4; -use concision_s4 as s4; - -use ndarray::prelude::*; -use s4::prelude::{scanner, SSMStore}; - -#[test] -fn test_scan() { - let features = 2; - - let u = Array2::ones((10, features)); - let x0 = Array1::ones(features); - - let ssm = SSMStore::::from_features(features); - let (a, b, c, _d) = ssm.clone().into(); - let scan1 = scanner(&a, &b, &c, &u, &x0); - - let scan2 = ssm.scan(&u, &x0).unwrap(); - - assert_eq!(scan1, scan2); -} diff --git a/ml/transformers/src/attention/multi/mod.rs b/ml/transformers/src/attention/multi/mod.rs index 420df30a..cf68c00e 100644 --- a/ml/transformers/src/attention/multi/mod.rs +++ b/ml/transformers/src/attention/multi/mod.rs @@ -89,7 +89,7 @@ mod tests { let (heads, seq, model) = (8, 10, 512); let data = Array2::::zeros((seq, model)); - let mask = Mask::::masked(seq).into(); + let mask = Mask::::uniform(seq).into(); let attention = MultiHeadAttention::new(heads, model); let score = attention .attention(&data, &mask) diff --git a/ml/transformers/src/attention/params/mod.rs b/ml/transformers/src/attention/params/mod.rs index 6917d2a2..a629ba23 100644 --- a/ml/transformers/src/attention/params/mod.rs +++ b/ml/transformers/src/attention/params/mod.rs @@ -15,7 +15,7 @@ //! - samples: The number of samples to draw from the attention distribution. //! //! -pub use self::{dim::*, hyperparams::*, qkv::*, utils::*}; +pub use self::{dim::*, hyperparams::*, qkv::*}; pub(crate) mod dim; pub(crate) mod hyperparams; diff --git a/ml/transformers/src/attention/params/qkv.rs b/ml/transformers/src/attention/params/qkv.rs index 9969424b..1a538909 100644 --- a/ml/transformers/src/attention/params/qkv.rs +++ b/ml/transformers/src/attention/params/qkv.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ use serde::{Deserialize, Serialize}; -use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; +use strum::{Display, EnumCount, EnumIs, EnumIter, EnumString, VariantNames}; #[derive( Clone, @@ -12,16 +12,17 @@ use strum::{Display, EnumIs, EnumIter, EnumString, EnumVariantNames}; Default, Deserialize, Display, + EnumCount, EnumIs, EnumIter, EnumString, - EnumVariantNames, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, + VariantNames, )] #[repr(usize)] #[serde(rename_all = "lowercase")] diff --git a/ml/transformers/src/codec/decode/mod.rs b/ml/transformers/src/codec/decode/mod.rs index f9114f9b..716e4454 100644 --- a/ml/transformers/src/codec/decode/mod.rs +++ b/ml/transformers/src/codec/decode/mod.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ //! # Decode -pub use self::{decoder::*, params::*, utils::*}; +pub use self::decoder::*; pub(crate) mod decoder; pub(crate) mod params; diff --git a/ml/transformers/src/codec/encode/mod.rs b/ml/transformers/src/codec/encode/mod.rs index 046a6444..49e8003f 100644 --- a/ml/transformers/src/codec/encode/mod.rs +++ b/ml/transformers/src/codec/encode/mod.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ //! # Encode -pub use self::{encoder::*, params::*, stack::*, utils::*}; +pub use self::{encoder::*, params::*, stack::*}; pub(crate) mod encoder; pub(crate) mod params; @@ -23,7 +23,7 @@ mod tests { fn test_encoder() { let (heads, seq, model) = (8, 10, 512); let _data = Array2::::zeros((seq, model)); - let _mask = Mask::::masked(seq); + let _mask = Mask::::uniform(seq); let params = EncoderParams::new(heads, model); let encoder = Encoder::new(params); diff --git a/ml/transformers/src/codec/mod.rs b/ml/transformers/src/codec/mod.rs index 38f5114a..5a21f740 100644 --- a/ml/transformers/src/codec/mod.rs +++ b/ml/transformers/src/codec/mod.rs @@ -3,7 +3,6 @@ Contrib: FL03 */ //! # Codec -pub use self::utils::*; pub mod decode; pub mod encode; diff --git a/ml/transformers/src/ffn/mod.rs b/ml/transformers/src/ffn/mod.rs index 7dd10d33..09fb9ba4 100644 --- a/ml/transformers/src/ffn/mod.rs +++ b/ml/transformers/src/ffn/mod.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ //! # Decode -pub use self::{network::*, params::*, utils::*}; +pub use self::{network::*, params::*}; pub(crate) mod network; pub(crate) mod params; diff --git a/ml/transformers/src/lib.rs b/ml/transformers/src/lib.rs index aa1d8e61..1a47f4ac 100644 --- a/ml/transformers/src/lib.rs +++ b/ml/transformers/src/lib.rs @@ -3,7 +3,7 @@ Contrib: FL03 */ //! # Concision Transformers -pub use self::{primitives::*, specs::*, utils::*}; +pub use self::primitives::*; pub(crate) mod primitives; pub(crate) mod specs; @@ -26,6 +26,4 @@ pub mod prelude { pub use crate::transform::*; pub use crate::primitives::*; - pub use crate::specs::*; - pub use crate::utils::*; }