Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Missing math #610

Merged
merged 16 commits into from
Feb 15, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
129 changes: 125 additions & 4 deletions src/arraymancer/tensor/aggregate.nim
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,14 @@ import ./data_structure,
./init_cpu,
./higher_order_foldreduce,
./operators_broadcasted,
./operators_comparison,
./higher_order_applymap,
./math_functions,
./accessors,
./accessors_macros_syntax,
./algorithms,
./private/p_empty_tensors,
math

./private/p_empty_tensors
import std/[math, macros]
import complex except Complex64, Complex32

# ### Standard aggregate functions
Expand Down Expand Up @@ -287,7 +288,7 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float =
elif p <= 0: result = min(arg).float
elif p >= 100: result = max(arg).float
else:
let a = if not isSorted: sorted(arg) else: arg
let a = if not isSorted: sorted(arg.reshape([1, arg.size]).squeeze) else: arg
let f = (arg.size - 1) * p / 100
let i = floor(f).int
if f == i.float: result = a[i].float
Expand All @@ -296,6 +297,10 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float =
let frac = f - i.float
result = (a[i].float + (a[i+1] - a[i]).float * frac)

proc median*[T](arg: Tensor[T], isSorted = false): float {.inline.} =
## Compute the median of all elements (same as `arg.percentile(50)`)
percentile(arg, 50, isSorted)

proc iqr*[T](arg: Tensor[T]): float =
## Returns the interquartile range of the 1D tensor `t`.
##
Expand Down Expand Up @@ -337,6 +342,122 @@ proc cumprod*[T](arg: Tensor[T], axis: int = 0): Tensor[T] = # from hugogranstro
else:
temp[_] = result.atAxisIndex(axis, i-1) *. tAxis

proc diff_discrete*[T](arg: Tensor[T], n=1, axis: int = -1): Tensor[T] =
## Calculate the n-th discrete difference along the given axis.
##
## The first difference is given by `out[i] = a[i+1] - a[i]` along the given axis.
## Higher differences are calculated by using diff recursively.
##
## Input:
## - A tensor
## - n: The number of times values are differenced.
## If zero, the input is returned as-is.
## - axis: The axis along which the difference is taken,
## default is the last axis.
## Returns:
## - A tensor with the n-th discrete difference along the given axis.
## It's size along that axis will be reduced by one.
## - The code in this function is heavily based upon and equivalent
## to numpy's `diff()` function.
mixin `_`
assert n >= 0, "diff order (" & $n & ") cannot be negative"
if n == 0 or arg.size == 0:
return arg
let axis = if axis == -1:
arg.shape.len + axis
else:
axis
assert axis < arg.shape.len,
"diff axis (" & $axis & ") cannot be greater than input shape length (" & $arg.shape.len & ")"
var result_shape = arg.shape
result_shape[axis] -= 1
result = zeros[T](result_shape)
for i, tAxis in enumerateAxis(arg, axis):
if unlikely(i == 0):
continue
var temp = result.atAxisIndex(axis, i-1)
when T is bool:
temp[_] = tAxis != arg.atAxisIndex(axis, i-1)
else:
temp[_] = tAxis -. arg.atAxisIndex(axis, i-1)
if n > 1:
result = diff_discrete(result, n=n-1, axis=axis)

proc unwrap_period*[T: SomeNumber](t: Tensor[T], discont: T = -1, axis = -1, period: T = default(T)): Tensor[T] {.noinit.} =
# Unwrap a tensor by taking the complement of large deltas with respect to a period.
#
# This unwraps a tensor `t` by changing elements which have an absolute
# difference from their predecessor of more than ``max(discont, period/2)``
# to their `period`-complementary values.
#
# For the default case where `period` is `2*PI` and `discont` is
# `PI`, this unwraps a radian phase `t` such that adjacent differences
# are never greater than `PI` by adding `2*k*PIi` for some integer `k`.
#
# Inputs:
# - t: Input Tensor.
# - discont: Maximum discontinuity between values. Default is `period/2`.
# Values below `period/2` are treated as if they were `period/2`.
# To have an effect different than the default, `discont` must be
# larger than `period/2`.
# - axis: Axis along which the unwrap will be done. Default is the last axis.
# - period: Size of the range over which the input wraps.
# By default, it is ``2*PI``.
Comment on lines +404 to +405
Copy link
Collaborator

@Vindaar Vindaar Feb 11, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Except it is currently default(T) :)

Ahh, in the body!

# Return:
# - Output Tensor.
#
# Notes:
# - If the discontinuity in `t` is smaller than ``period/2``,
# but larger than `discont`, no unwrapping is done because taking
# the complement would only make the discontinuity larger.
# - The code in this function is heavily based upon and equivalent
# to numpy's `unwrap()` function.
mixin `_`
let axis = if axis == -1:
t.shape.len + axis
else:
axis
let td = t.diff_discrete(axis=axis)
let period: T = if period == default(T):
when T is int:
raise newException(ValueError, "unwrap period must be specified for integer types")
else:
2.0 * PI
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
2.0 * PI
T(2.0 * PI)

possibly.

else:
period
let discont = if discont == -1:
T(period/2)
else:
discont
when T is int:
when (NimMajor, NimMinor, NimPatch) >= (2, 0, 0):
let (interval_high, rem) = divmod(period, 2)
else:
let interval_high = period div 2
let rem = period mod 2
let boundary_ambiguous = rem == 0
else:
let interval_high = period / 2
let boundary_ambiguous = true
let interval_low = -interval_high
var tdmod = (td -. interval_low).floorMod(period) +. interval_low
if boundary_ambiguous:
const zero: T = T(0)
tdmod[(tdmod ==. interval_low) and (td >. zero)] = interval_high
var ph_correct = tdmod - td
ph_correct[abs(td) <. discont] = 0
result = t.clone()
let ph_correct_cumsum = ph_correct.cumsum(axis)
if t.rank == 1:
result[1.._] = t[1.._] +. ph_correct_cumsum
else:
for i, tAxis in enumerateAxis(t, axis):
if unlikely(i < 1):
continue
let pAxis = ph_correct_cumsum.atAxisIndex(axis, i-1)
var temp = result.atAxisIndex(axis, i)
temp[_] = tAxis +. pAxis

when (NimMajor, NimMinor, NimPatch) > (1, 6, 0):
import std/atomics
proc nonzero*[T](arg: Tensor[T]): Tensor[int] =
Expand Down
117 changes: 117 additions & 0 deletions src/arraymancer/tensor/math_functions.nim
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import ./data_structure,
./higher_order_applymap,
./ufunc
import complex except Complex64, Complex32
import math

# Non-operator math functions

Expand Down Expand Up @@ -103,18 +104,134 @@ proc phase*(t: Tensor[Complex[float32]]): Tensor[float32] {.noinit.} =
## Return a Tensor with phase values of all elements
t.map_inline(phase(x))

proc sgn*[T: SomeNumber](t: Tensor[T]): Tensor[int] {.noinit.} =
## Element-wise sgn function (returns a tensor with the sign of each element)
##
## Returns:
## - -1 for negative numbers and NegInf,
## - 1 for positive numbers and Inf,
## - 0 for positive zero, negative zero and NaN
t.map_inline(sgn(x))

when (NimMajor, NimMinor, NimPatch) >= (1, 6, 0):
proc copySign*[T: SomeFloat](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
## Element-wise copySign function (combines 2 tensors, taking the magnitudes from t1 and the signs from t2)
##
## This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values
## which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers.
t1.map2_inline(t2, copySign(x, y))

proc mcopySign*[T: SomeFloat](t1: var Tensor[T], t2: Tensor[T]) =
## In-place element-wise copySign function (changes the signs of the elements of t1 to match those of t2)
##
## This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values
## which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers.
t1.apply2_inline(t2, copySign(x, y))

proc floorMod*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
## Broadcasted floorMod operation: floorMod(tensor, tensor).
result = t1.map2_inline(t2, floorMod(x, y))

proc floorMod*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noinit.} =
## Broadcasted floorMod operation: floorMod(tensor, scalar).
result = t.map_inline(floorMod(x, val))

proc floorMod*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noinit.} =
## Broadcasted floorMod operation: floorMod(scalar, tensor).
result = t.map_inline(floorMod(val, x))

proc clamp*[T](t: Tensor[T], min, max: T): Tensor[T] {.noinit.} =
## Return a Tensor with all elements clamped to the interval [min, max].
t.map_inline(clamp(x, min, max))

proc mclamp*[T](t: var Tensor[T], min, max: T) =
## Update the Tensor with all elements clamped to the interval [min, max].
t.apply_inline(clamp(x, min, max))

proc max*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
## Compare two arrays and return a new array containing the element-wise maxima.
##
## As in nim's built-in max procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.map2_inline(t2, max(x, y))

proc max*[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.} =
## Compare any number of arrays and return a new array containing the element-wise maxima.
##
## As in nim's built-in max procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
result = max(args[0], args[1])
for n in countup(2, len(args)-1):
result = max(result, args[n])

proc mmax*[T: SomeNumber](t1: var Tensor[T], t2: Tensor[T]) =
## In-place element-wise maxima of two tensors.
##
## As in nim's built-in max procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.apply2_inline(t2, max(x, y))

proc mmax*[T: SomeNumber](t1: var Tensor[T], args: varargs[Tensor[T]]) =
## In-place element-wise maxima of N tensors.
##
## As in nim's built-in max procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.apply2_inline(args[0], max(x, y))
for n in countup(1, len(args)-1):
t1.apply2_inline(args[n], max(x, y))

proc min*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} =
## Compare two arrays and return a new array containing the element-wise minima.
##
## As in nim's built-in min procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.map2_inline(t2, min(x, y))

proc min*[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.} =
## Compare any number of arrays and return a new array containing the element-wise minima.
##
## As in nim's built-in min procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
result = min(args[0], args[1])
for n in countup(2, len(args)-1):
result = min(result, args[n])

proc mmin*[T: SomeNumber](t1: var Tensor[T], t2: Tensor[T]) =
## In-place element-wise minima of two tensors.
##
## As in nim's built-in min procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.apply2_inline(t2, min(x, y))

proc mmin*[T: SomeNumber](t1: var Tensor[T], args: varargs[Tensor[T]]) =
## In-place element-wise minima of N tensors.
##
## As in nim's built-in min procedure if one of the elements being compared is a NaN,
## then the non NaN element is returned.
t1.apply2_inline(args[0], min(x, y))
for n in countup(1, len(args)-1):
t1.apply2_inline(args[n], min(x, y))

proc square*[T](x: T): T {.inline.} =
## Return `x*x`
x*x

makeUniversal(square)

proc classify*[T: SomeFloat](t: Tensor[T]): Tensor[FloatClass] {.noinit.} =
## Element-wise classify function (returns a tensor with the float class of each element).
##
## Returns:
## A FloatClass tensor where each value is one of the following:
## - fcNormal: value is an ordinary nonzero floating point value
## - fcSubnormal: value is a subnormal (a very small) floating point value
## - fcZero: value is zero
## - fcNegZero: value is the negative zero
## - fcNan: value is Not a Number (NaN)
## - fcInf: value is positive infinity
## - fcNegInf: value is negative infinity
t.map_inline(classify(x))

type ConvolveMode* = enum full, same, valid

proc convolveImpl[T: SomeNumber | Complex32 | Complex64](
Expand Down
36 changes: 34 additions & 2 deletions src/arraymancer/tensor/operators_blas_l1.nim
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
./data_structure,
./accessors, ./higher_order_applymap,
nimblas
import complex except Complex32, Complex64

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 20 in src/arraymancer/tensor/operators_blas_l1.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

# ####################################################################
# BLAS Level 1 (Vector dot product, Addition, Scalar to Vector/Matrix)
Expand Down Expand Up @@ -73,16 +73,48 @@
## Element-wise multiplication by a scalar
a * t

proc `/`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
## Element-wise division by a float scalar
returnEmptyIfEmpty(t)
t.map_inline(x / a)
when T is SomeInteger:
t.map_inline(x div a)
else:
t.map_inline(x / a)

proc `div`*[T: SomeInteger](t: Tensor[T], a: T): Tensor[T] {.noinit.} =
## Element-wise division by an integer
returnEmptyIfEmpty(t)
t.map_inline(x div a)

proc `mod`*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noinit.} =
## Broadcasted modulo operation
returnEmptyIfEmpty(t)
result = t.map_inline(x mod val)

proc `mod`*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noinit.} =
## Broadcasted modulo operation
returnEmptyIfEmpty(t)
result = t.map_inline(val mod x)

# Unsupported operations (these must be done using the broadcasted operators)
proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: Tensor[T], val: T): Tensor[T] {.noinit,inline.} =
## Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead
{.error: "To add a scalar to a tensor you must use the `+.` operator (instead of a plain `+` operator)".}

# Unsupported operations
proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, a: Tensor[T]): Tensor[T] {.noinit,inline.} =
## Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead
{.error: "To add a tensor to a scalar you must use the `+.` operator (instead of a plain `+` operator)".}

proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: Tensor[T], val: T): Tensor[T] {.noinit,inline.} =
## Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead
{.error: "To subtract a scalar from a tensor you must use the `-.` operator (instead of a plain `-` operator)".}

# Unsupported operations
proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, a: Tensor[T]): Tensor[T] {.noinit,inline.} =
## Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead
{.error: "To subtract a tensor from a scalar you must use the `-.` operator (instead of a plain `-` operator)".}

# #########################################################
# # Tensor-scalar in-place linear algebra

Expand Down
18 changes: 17 additions & 1 deletion src/arraymancer/tensor/operators_broadcasted.nim
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
./private/p_empty_tensors

import std/math
import complex except Complex64, Complex32

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-1-6)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (version-2-0)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / linux-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

Check warning on line 22 in src/arraymancer/tensor/operators_broadcasted.nim

View workflow job for this annotation

GitHub Actions / macos-amd64-c (devel)

imported and not used: 'complex' [UnusedImport]

# #########################################################
# # Broadcasting Tensor-Tensor
Expand All @@ -39,7 +39,6 @@
## Element-wise multiplication (Hadamard product).
##
## And broadcasted element-wise multiplication.

let (tmp_a, tmp_b) = broadcast2(a, b)
result = map2_inline(tmp_a, tmp_b, x * y)

Expand All @@ -53,6 +52,13 @@
else:
result = map2_inline(tmp_a, tmp_b, x / y )

proc `mod`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.} =
## Tensor element-wise modulo operation
##
## And broadcasted element-wise modulo operation.
let (tmp_a, tmp_b) = broadcast2(a, b)
result = map2_inline(tmp_a, tmp_b, x mod y)

# ##############################################
# # Broadcasting in-place Tensor-Tensor

Expand Down Expand Up @@ -118,6 +124,16 @@
returnEmptyIfEmpty(t)
result = t.map_inline(x - val)

proc `*.`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noinit.} =
## Broadcasted multiplication for tensor * scalar.
returnEmptyIfEmpty(t)
result = t.map_inline(x * val)

proc `*.`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], val: T): Tensor[T] {.noinit.} =
## Broadcasted multiplication for scalar * tensor.
returnEmptyIfEmpty(t)
result = t.map_inline(x * val)

proc `/.`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noinit.} =
## Broadcasted division
returnEmptyIfEmpty(t)
Expand Down
Loading
Loading