From 7297e98daf577510658bfa659582b4332e2aec06 Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Wed, 4 Oct 2023 22:34:11 +0200 Subject: [PATCH 1/5] Add support for Complex to all the common error functions --- .../ml/metrics/common_error_functions.nim | 23 +++++++++++-------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/src/arraymancer/ml/metrics/common_error_functions.nim b/src/arraymancer/ml/metrics/common_error_functions.nim index 224083163..acebed770 100644 --- a/src/arraymancer/ml/metrics/common_error_functions.nim +++ b/src/arraymancer/ml/metrics/common_error_functions.nim @@ -16,15 +16,19 @@ import ../../tensor # #################################################### -proc squared_error*[T](y, y_true: T): T {.inline.} = +proc squared_error*[T: SomeFloat](y, y_true: T): T {.inline.} = ## Squared error for a single value, |y_true - y| ^2 result = square(y_true - y) -proc squared_error*[T](y, y_true: Tensor[T]): Tensor[T] {.noinit.} = +proc squared_error*[T: SomeFloat](y, y_true: Complex[T]): T {.inline.} = + ## Squared error for a single value, |y_true - y| ^2 + result = abs(y_true - y) ^ 2 + +proc squared_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[T] {.noinit.} = ## Element-wise squared error for a tensor, |y_true - y| ^2 result = map2_inline(y_true, y, squared_error(x,y)) -proc mean_squared_error*[T](y, y_true: Tensor[T]): T = +proc mean_squared_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T = ## Also known as MSE or L2 loss, mean squared error between elements: ## sum(|y_true - y| ^2)/m ## where m is the number of elements @@ -32,7 +36,7 @@ proc mean_squared_error*[T](y, y_true: Tensor[T]): T = # #################################################### -proc relative_error*[T: SomeFloat](y, y_true: T): T {.inline.} = +proc relative_error*[T: SomeFloat](y, y_true: T | Complex[T]): T {.inline.} = ## Relative error, |y_true - y|/max(|y_true|, |y|) ## Normally the relative error is defined as |y_true - y| / |y_true|, ## but here max is used to make it symmetric and to prevent dividing by zero, @@ -43,14 +47,14 @@ proc relative_error*[T: SomeFloat](y, y_true: T): T {.inline.} = return 0.T result = abs(y_true - y) / denom -proc relative_error*[T](y, y_true: Tensor[T]): Tensor[T] {.noinit.} = +proc relative_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[T] {.noinit.} = ## Relative error for Tensor, element-wise |y_true - x|/max(|y_true|, |x|) ## Normally the relative error is defined as |y_true - x| / |y_true|, ## but here max is used to make it symmetric and to prevent dividing by zero, ## guaranteed to return zero in the case when both values are zero. result = map2_inline(y, y_true, relative_error(x,y)) -proc mean_relative_error*[T](y, y_true: Tensor[T]): T = +proc mean_relative_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T = ## Mean relative error for Tensor, mean of the element-wise ## |y_true - y|/max(|y_true|, |y|) ## Normally the relative error is defined as |y_true - y| / |y_true|, @@ -60,16 +64,15 @@ proc mean_relative_error*[T](y, y_true: Tensor[T]): T = # #################################################### -proc absolute_error*[T: SomeFloat](y, y_true: T): T {.inline.} = +proc absolute_error*[T: SomeFloat](y, y_true: T | Complex[T]): T {.inline.} = ## Absolute error for a single value, |y_true - y| - # We require float (and not complex as complex.abs will cause issue) result = abs(y_true - y) -proc absolute_error*[T](y, y_true: Tensor[T]): Tensor[T] {.noinit.} = +proc absolute_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): Tensor[T] {.noinit.} = ## Element-wise absolute error for a tensor result = map2_inline(y, y_true, y.absolute_error(x)) -proc mean_absolute_error*[T](y, y_true: Tensor[T]): T = +proc mean_absolute_error*[T: SomeFloat](y, y_true: Tensor[T] | Tensor[Complex[T]]): T = ## Also known as L1 loss, absolute error between elements: ## sum(|y_true - y|)/m ## where m is the number of elements From 6478af1b05490c0d3abd7023b1d53c789e157c53 Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Wed, 4 Oct 2023 22:37:21 +0200 Subject: [PATCH 2/5] Add support for Complex tensors to the decomposition functions These changes were actually made by Vindaar . I only completed the SVG test. --- .../linear_algebra/decomposition.nim | 15 ++- .../helpers/decomposition_lapack.nim | 127 ++++++++++++------ tests/linear_algebra/test_linear_algebra.nim | 33 +++++ 3 files changed, 131 insertions(+), 44 deletions(-) diff --git a/src/arraymancer/linear_algebra/decomposition.nim b/src/arraymancer/linear_algebra/decomposition.nim index c2656223d..47a7e4e7b 100644 --- a/src/arraymancer/linear_algebra/decomposition.nim +++ b/src/arraymancer/linear_algebra/decomposition.nim @@ -16,7 +16,7 @@ template `^^`(s, i: untyped): untyped = # Full decompositions # ------------------------------------------- -proc symeig*[T: SomeFloat](a: Tensor[T], return_eigenvectors: static bool = false, uplo: static char = 'U'): tuple[eigenval, eigenvec: Tensor[T]] {.inline.}= +proc symeig*[T: SupportedDecomposition](a: Tensor[T], return_eigenvectors: static bool = false, uplo: static char = 'U'): tuple[eigenval, eigenvec: Tensor[T]] {.inline.}= ## Compute the eigenvalues and eigen vectors of a symmetric matrix ## Input: ## - A symmetric matrix of shape [n x n] @@ -36,7 +36,7 @@ proc symeig*[T: SomeFloat](a: Tensor[T], return_eigenvectors: static bool = fals var a = a.clone(colMajor) syevr(a, uplo, return_eigenvectors, 0, a.shape[0] - 1, result.eigenval, result.eigenvec, scratchspace) -proc symeig*[T: SomeFloat](a: Tensor[T], return_eigenvectors: static bool = false, +proc symeig*[T: SupportedDecomposition](a: Tensor[T], return_eigenvectors: static bool = false, uplo: static char = 'U', slice: HSlice[int or BackwardsIndex, int or BackwardsIndex]): tuple[eigenval, eigenvec: Tensor[T]] {.inline.}= ## Compute the eigenvalues and eigen vectors of a symmetric matrix @@ -60,7 +60,7 @@ proc symeig*[T: SomeFloat](a: Tensor[T], return_eigenvectors: static bool = fals var a = a.clone(colMajor) syevr(a, uplo, return_eigenvectors, a ^^ slice.a, a ^^ slice.b, result.eigenval, result.eigenvec, scratchspace) -proc qr*[T: SomeFloat](a: Tensor[T]): tuple[Q, R: Tensor[T]] = +proc qr*[T: SupportedDecomposition](a: Tensor[T]): tuple[Q, R: Tensor[T]] = ## Compute the QR decomposition of an input matrix ``a`` ## Decomposition is done through the Householder method ## without pivoting. @@ -90,7 +90,7 @@ proc qr*[T: SomeFloat](a: Tensor[T]): tuple[Q, R: Tensor[T]] = orgqr(result.Q, tau, scratchspace) result.Q = result.Q[_, 0.. Date: Sat, 7 Oct 2023 00:29:20 +0200 Subject: [PATCH 5/5] Apply suggestions from code review (explicitly set result) Co-authored-by: Vindaar --- src/arraymancer/linear_algebra/algebra.nim | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/arraymancer/linear_algebra/algebra.nim b/src/arraymancer/linear_algebra/algebra.nim index 1c43a7007..67079f94d 100644 --- a/src/arraymancer/linear_algebra/algebra.nim +++ b/src/arraymancer/linear_algebra/algebra.nim @@ -30,7 +30,7 @@ proc pinv*[T: SomeFloat](A: Tensor[T], rcond = 1e-15): Tensor[T] = const unit = T(1.0) for n in 0.. epsilon: unit / S[n] else: 0.0 - Vt.transpose * S_inv * U.transpose + result = Vt.transpose * S_inv * U.transpose proc pinv*[T: Complex32 | Complex64](A: Tensor[T], rcond = 1e-15): Tensor[T] = ## Compute the (Moore-Penrose) pseudo-inverse of a matrix. @@ -51,4 +51,4 @@ proc pinv*[T: Complex32 | Complex64](A: Tensor[T], rcond = 1e-15): Tensor[T] = const unit = complex(1.0) for n in 0.. epsilon: unit / S[n] else: complex(0.0) - Vt.conjugate.transpose * S_inv * U.conjugate.transpose + result = Vt.conjugate.transpose * S_inv * U.conjugate.transpose