diff --git a/src/arraymancer/tensor/aggregate.nim b/src/arraymancer/tensor/aggregate.nim index 76fed241..ae3d2dd6 100644 --- a/src/arraymancer/tensor/aggregate.nim +++ b/src/arraymancer/tensor/aggregate.nim @@ -16,13 +16,14 @@ import ./data_structure, ./init_cpu, ./higher_order_foldreduce, ./operators_broadcasted, + ./operators_comparison, ./higher_order_applymap, ./math_functions, ./accessors, + ./accessors_macros_syntax, ./algorithms, - ./private/p_empty_tensors, - math - + ./private/p_empty_tensors +import std/[math, macros] import complex except Complex64, Complex32 # ### Standard aggregate functions @@ -287,7 +288,7 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float = elif p <= 0: result = min(arg).float elif p >= 100: result = max(arg).float else: - let a = if not isSorted: sorted(arg) else: arg + let a = if not isSorted: sorted(arg.reshape([1, arg.size]).squeeze) else: arg let f = (arg.size - 1) * p / 100 let i = floor(f).int if f == i.float: result = a[i].float @@ -296,6 +297,10 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float = let frac = f - i.float result = (a[i].float + (a[i+1] - a[i]).float * frac) +proc median*[T](arg: Tensor[T], isSorted = false): float {.inline.} = + ## Compute the median of all elements (same as `arg.percentile(50)`) + percentile(arg, 50, isSorted) + proc iqr*[T](arg: Tensor[T]): float = ## Returns the interquartile range of the 1D tensor `t`. ## @@ -337,6 +342,122 @@ proc cumprod*[T](arg: Tensor[T], axis: int = 0): Tensor[T] = # from hugogranstro else: temp[_] = result.atAxisIndex(axis, i-1) *. tAxis +proc diff_discrete*[T](arg: Tensor[T], n=1, axis: int = -1): Tensor[T] = + ## Calculate the n-th discrete difference along the given axis. + ## + ## The first difference is given by `out[i] = a[i+1] - a[i]` along the given axis. + ## Higher differences are calculated by using diff recursively. + ## + ## Input: + ## - A tensor + ## - n: The number of times values are differenced. + ## If zero, the input is returned as-is. + ## - axis: The axis along which the difference is taken, + ## default is the last axis. + ## Returns: + ## - A tensor with the n-th discrete difference along the given axis. + ## It's size along that axis will be reduced by one. + ## - The code in this function is heavily based upon and equivalent + ## to numpy's `diff()` function. + mixin `_` + assert n >= 0, "diff order (" & $n & ") cannot be negative" + if n == 0 or arg.size == 0: + return arg + let axis = if axis == -1: + arg.shape.len + axis + else: + axis + assert axis < arg.shape.len, + "diff axis (" & $axis & ") cannot be greater than input shape length (" & $arg.shape.len & ")" + var result_shape = arg.shape + result_shape[axis] -= 1 + result = zeros[T](result_shape) + for i, tAxis in enumerateAxis(arg, axis): + if unlikely(i == 0): + continue + var temp = result.atAxisIndex(axis, i-1) + when T is bool: + temp[_] = tAxis != arg.atAxisIndex(axis, i-1) + else: + temp[_] = tAxis -. arg.atAxisIndex(axis, i-1) + if n > 1: + result = diff_discrete(result, n=n-1, axis=axis) + +proc unwrap_period*[T: SomeNumber](t: Tensor[T], discont: T = -1, axis = -1, period: T = default(T)): Tensor[T] {.noinit.} = + # Unwrap a tensor by taking the complement of large deltas with respect to a period. + # + # This unwraps a tensor `t` by changing elements which have an absolute + # difference from their predecessor of more than ``max(discont, period/2)`` + # to their `period`-complementary values. + # + # For the default case where `period` is `2*PI` and `discont` is + # `PI`, this unwraps a radian phase `t` such that adjacent differences + # are never greater than `PI` by adding `2*k*PIi` for some integer `k`. + # + # Inputs: + # - t: Input Tensor. + # - discont: Maximum discontinuity between values. Default is `period/2`. + # Values below `period/2` are treated as if they were `period/2`. + # To have an effect different than the default, `discont` must be + # larger than `period/2`. + # - axis: Axis along which the unwrap will be done. Default is the last axis. + # - period: Size of the range over which the input wraps. + # By default, it is ``2*PI``. + # Return: + # - Output Tensor. + # + # Notes: + # - If the discontinuity in `t` is smaller than ``period/2``, + # but larger than `discont`, no unwrapping is done because taking + # the complement would only make the discontinuity larger. + # - The code in this function is heavily based upon and equivalent + # to numpy's `unwrap()` function. + mixin `_` + let axis = if axis == -1: + t.shape.len + axis + else: + axis + let td = t.diff_discrete(axis=axis) + let period: T = if period == default(T): + when T is int: + raise newException(ValueError, "unwrap period must be specified for integer types") + else: + 2.0 * PI + else: + period + let discont = if discont == -1: + T(period/2) + else: + discont + when T is int: + when (NimMajor, NimMinor, NimPatch) >= (2, 0, 0): + let (interval_high, rem) = divmod(period, 2) + else: + let interval_high = period div 2 + let rem = period mod 2 + let boundary_ambiguous = rem == 0 + else: + let interval_high = period / 2 + let boundary_ambiguous = true + let interval_low = -interval_high + var tdmod = (td -. interval_low).floorMod(period) +. interval_low + if boundary_ambiguous: + const zero: T = T(0) + tdmod[(tdmod ==. interval_low) and (td >. zero)] = interval_high + var ph_correct = tdmod - td + ph_correct[abs(td) <. discont] = 0 + result = t.clone() + let ph_correct_cumsum = ph_correct.cumsum(axis) + if t.rank == 1: + result[1.._] = t[1.._] +. ph_correct_cumsum + else: + for i, tAxis in enumerateAxis(t, axis): + if unlikely(i < 1): + continue + let pAxis = ph_correct_cumsum.atAxisIndex(axis, i-1) + var temp = result.atAxisIndex(axis, i) + temp[_] = tAxis +. pAxis + when (NimMajor, NimMinor, NimPatch) > (1, 6, 0): import std/atomics proc nonzero*[T](arg: Tensor[T]): Tensor[int] = diff --git a/src/arraymancer/tensor/math_functions.nim b/src/arraymancer/tensor/math_functions.nim index c6c82b54..f97d52bb 100644 --- a/src/arraymancer/tensor/math_functions.nim +++ b/src/arraymancer/tensor/math_functions.nim @@ -18,6 +18,7 @@ import ./data_structure, ./higher_order_applymap, ./ufunc import complex except Complex64, Complex32 +import math # Non-operator math functions @@ -103,18 +104,134 @@ proc phase*(t: Tensor[Complex[float32]]): Tensor[float32] {.noinit.} = ## Return a Tensor with phase values of all elements t.map_inline(phase(x)) +proc sgn*[T: SomeNumber](t: Tensor[T]): Tensor[int] {.noinit.} = + ## Element-wise sgn function (returns a tensor with the sign of each element) + ## + ## Returns: + ## - -1 for negative numbers and NegInf, + ## - 1 for positive numbers and Inf, + ## - 0 for positive zero, negative zero and NaN + t.map_inline(sgn(x)) + +when (NimMajor, NimMinor, NimPatch) >= (1, 6, 0): + proc copySign*[T: SomeFloat](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Element-wise copySign function (combines 2 tensors, taking the magnitudes from t1 and the signs from t2) + ## + ## This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values + ## which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers. + t1.map2_inline(t2, copySign(x, y)) + + proc mcopySign*[T: SomeFloat](t1: var Tensor[T], t2: Tensor[T]) = + ## In-place element-wise copySign function (changes the signs of the elements of t1 to match those of t2) + ## + ## This uses nim's copySign under the hood, and thus has the same properties. That is, it works for values + ## which are NaN, infinity or zero (all of which can carry a sign) but does not work for integers. + t1.apply2_inline(t2, copySign(x, y)) + +proc floorMod*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted floorMod operation: floorMod(tensor, tensor). + result = t1.map2_inline(t2, floorMod(x, y)) + +proc floorMod*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noinit.} = + ## Broadcasted floorMod operation: floorMod(tensor, scalar). + result = t.map_inline(floorMod(x, val)) + +proc floorMod*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted floorMod operation: floorMod(scalar, tensor). + result = t.map_inline(floorMod(val, x)) + proc clamp*[T](t: Tensor[T], min, max: T): Tensor[T] {.noinit.} = + ## Return a Tensor with all elements clamped to the interval [min, max]. t.map_inline(clamp(x, min, max)) proc mclamp*[T](t: var Tensor[T], min, max: T) = + ## Update the Tensor with all elements clamped to the interval [min, max]. t.apply_inline(clamp(x, min, max)) +proc max*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Compare two arrays and return a new array containing the element-wise maxima. + ## + ## As in nim's built-in max procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.map2_inline(t2, max(x, y)) + +proc max*[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.} = + ## Compare any number of arrays and return a new array containing the element-wise maxima. + ## + ## As in nim's built-in max procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + result = max(args[0], args[1]) + for n in countup(2, len(args)-1): + result = max(result, args[n]) + +proc mmax*[T: SomeNumber](t1: var Tensor[T], t2: Tensor[T]) = + ## In-place element-wise maxima of two tensors. + ## + ## As in nim's built-in max procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.apply2_inline(t2, max(x, y)) + +proc mmax*[T: SomeNumber](t1: var Tensor[T], args: varargs[Tensor[T]]) = + ## In-place element-wise maxima of N tensors. + ## + ## As in nim's built-in max procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.apply2_inline(args[0], max(x, y)) + for n in countup(1, len(args)-1): + t1.apply2_inline(args[n], max(x, y)) + +proc min*[T: SomeNumber](t1, t2: Tensor[T]): Tensor[T] {.noinit.} = + ## Compare two arrays and return a new array containing the element-wise minima. + ## + ## As in nim's built-in min procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.map2_inline(t2, min(x, y)) + +proc min*[T: SomeNumber](args: varargs[Tensor[T]]): Tensor[T] {.noinit.} = + ## Compare any number of arrays and return a new array containing the element-wise minima. + ## + ## As in nim's built-in min procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + result = min(args[0], args[1]) + for n in countup(2, len(args)-1): + result = min(result, args[n]) + +proc mmin*[T: SomeNumber](t1: var Tensor[T], t2: Tensor[T]) = + ## In-place element-wise minima of two tensors. + ## + ## As in nim's built-in min procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.apply2_inline(t2, min(x, y)) + +proc mmin*[T: SomeNumber](t1: var Tensor[T], args: varargs[Tensor[T]]) = + ## In-place element-wise minima of N tensors. + ## + ## As in nim's built-in min procedure if one of the elements being compared is a NaN, + ## then the non NaN element is returned. + t1.apply2_inline(args[0], min(x, y)) + for n in countup(1, len(args)-1): + t1.apply2_inline(args[n], min(x, y)) + proc square*[T](x: T): T {.inline.} = ## Return `x*x` x*x makeUniversal(square) +proc classify*[T: SomeFloat](t: Tensor[T]): Tensor[FloatClass] {.noinit.} = + ## Element-wise classify function (returns a tensor with the float class of each element). + ## + ## Returns: + ## A FloatClass tensor where each value is one of the following: + ## - fcNormal: value is an ordinary nonzero floating point value + ## - fcSubnormal: value is a subnormal (a very small) floating point value + ## - fcZero: value is zero + ## - fcNegZero: value is the negative zero + ## - fcNan: value is Not a Number (NaN) + ## - fcInf: value is positive infinity + ## - fcNegInf: value is negative infinity + t.map_inline(classify(x)) + type ConvolveMode* = enum full, same, valid proc convolveImpl[T: SomeNumber | Complex32 | Complex64]( diff --git a/src/arraymancer/tensor/operators_blas_l1.nim b/src/arraymancer/tensor/operators_blas_l1.nim index 4e479f8a..3729b059 100644 --- a/src/arraymancer/tensor/operators_blas_l1.nim +++ b/src/arraymancer/tensor/operators_blas_l1.nim @@ -73,16 +73,48 @@ proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): ## Element-wise multiplication by a scalar a * t -proc `/`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} = +proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} = ## Element-wise division by a float scalar returnEmptyIfEmpty(t) - t.map_inline(x / a) + when T is SomeInteger: + t.map_inline(x div a) + else: + t.map_inline(x / a) proc `div`*[T: SomeInteger](t: Tensor[T], a: T): Tensor[T] {.noinit.} = ## Element-wise division by an integer returnEmptyIfEmpty(t) t.map_inline(x div a) +proc `mod`*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noinit.} = + ## Broadcasted modulo operation + returnEmptyIfEmpty(t) + result = t.map_inline(x mod val) + +proc `mod`*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted modulo operation + returnEmptyIfEmpty(t) + result = t.map_inline(val mod x) + +# Unsupported operations (these must be done using the broadcasted operators) +proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: Tensor[T], val: T): Tensor[T] {.noinit,inline.} = + ## Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead + {.error: "To add a scalar to a tensor you must use the `+.` operator (instead of a plain `+` operator)".} + +# Unsupported operations +proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, a: Tensor[T]): Tensor[T] {.noinit,inline.} = + ## Mathematical addition of tensors and scalars is undefined. Must use a broadcasted addition instead + {.error: "To add a tensor to a scalar you must use the `+.` operator (instead of a plain `+` operator)".} + +proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: Tensor[T], val: T): Tensor[T] {.noinit,inline.} = + ## Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead + {.error: "To subtract a scalar from a tensor you must use the `-.` operator (instead of a plain `-` operator)".} + +# Unsupported operations +proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, a: Tensor[T]): Tensor[T] {.noinit,inline.} = + ## Mathematical subtraction of tensors and scalars is undefined. Must use a broadcasted addition instead + {.error: "To subtract a tensor from a scalar you must use the `-.` operator (instead of a plain `-` operator)".} + # ######################################################### # # Tensor-scalar in-place linear algebra diff --git a/src/arraymancer/tensor/operators_broadcasted.nim b/src/arraymancer/tensor/operators_broadcasted.nim index fd6d176f..66103128 100644 --- a/src/arraymancer/tensor/operators_broadcasted.nim +++ b/src/arraymancer/tensor/operators_broadcasted.nim @@ -39,7 +39,6 @@ proc `*.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Te ## Element-wise multiplication (Hadamard product). ## ## And broadcasted element-wise multiplication. - let (tmp_a, tmp_b) = broadcast2(a, b) result = map2_inline(tmp_a, tmp_b, x * y) @@ -53,6 +52,13 @@ proc `/.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Te else: result = map2_inline(tmp_a, tmp_b, x / y ) +proc `mod`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise modulo operation + ## + ## And broadcasted element-wise modulo operation. + let (tmp_a, tmp_b) = broadcast2(a, b) + result = map2_inline(tmp_a, tmp_b, x mod y) + # ############################################## # # Broadcasting in-place Tensor-Tensor @@ -118,6 +124,16 @@ proc `-.`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], val: T returnEmptyIfEmpty(t) result = t.map_inline(x - val) +proc `*.`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noinit.} = + ## Broadcasted multiplication for tensor * scalar. + returnEmptyIfEmpty(t) + result = t.map_inline(x * val) + +proc `*.`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], val: T): Tensor[T] {.noinit.} = + ## Broadcasted multiplication for scalar * tensor. + returnEmptyIfEmpty(t) + result = t.map_inline(x * val) + proc `/.`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noinit.} = ## Broadcasted division returnEmptyIfEmpty(t) diff --git a/src/arraymancer/tensor/ufunc.nim b/src/arraymancer/tensor/ufunc.nim index 3ed05a4b..f1576faf 100644 --- a/src/arraymancer/tensor/ufunc.nim +++ b/src/arraymancer/tensor/ufunc.nim @@ -75,7 +75,7 @@ template makeUniversalLocal*(func_name: untyped) = # Unary functions from Nim math library makeUniversal(fac) -#makeUniversal(classify) +makeUniversal(isNaN) #makeUniversal(isPowerOfTwo) #makeUniversal(nextPowerOfTwo) #makeUniversal(countBits32) @@ -89,6 +89,9 @@ makeUniversal(exp) makeUniversal(arccos) makeUniversal(arcsin) makeUniversal(arctan) +makeUniversal(arccosh) +makeUniversal(arcsinh) +makeUniversal(arctanh) makeUniversal(cos) makeUniversal(cosh) makeUniversal(sinh) diff --git a/tests/tensor/test_aggregate.nim b/tests/tensor/test_aggregate.nim index 9ea3be2d..88e03dc7 100644 --- a/tests/tensor/test_aggregate.nim +++ b/tests/tensor/test_aggregate.nim @@ -243,6 +243,78 @@ proc main() = [1, 9, 45], [3, 12, 12]].toTensor + test "diff": + let a = arange(12).reshape([3, 4]) + block: + # Test diffs along the default axis + let expected_diff1_axis1 = ones[int](3, 3) + let expected_diff2_axis1 = zeros[int](3, 2) + check a.diff_discrete(0) == a + check a.diff_discrete == expected_diff1_axis1 + check a.diff_discrete(2) == expected_diff2_axis1 + block: + # Test diffs along a different axis + let expected_diff1_axis0 = 4 * ones[int](2, 4) + let expected_diff2_axis0 = zeros[int](1, 4) + check a.diff_discrete(0, axis=0) == a + check a.diff_discrete(axis=0) == expected_diff1_axis0 + check a.diff_discrete(2, axis=0) == expected_diff2_axis0 + block: + # Test boolean diffs + let b = [true, true, false, false, true].toTensor + let expected_bool_diff1 = [false, true, false, true].toTensor + let expected_bool_diff2 = [true, true, true].toTensor + check b.diff_discrete(0) == b + check b.diff_discrete() == expected_bool_diff1 + check b.diff_discrete(2) == expected_bool_diff2 + + test "unwrap_period": + block: + # Single dimension unwrap_period + let phase_deg = (linspace(0, 720, 9) mod 360).asType(int) -. 180 + let expected = linspace(0, 720, 9).asType(int) -. 180 + check unwrap_period(phase_deg, period=360) == expected + + # Check that unwrap_period also works with floats + check unwrap_period(phase_deg.asType(float), period=360.0) == expected.asType(float) + + let phase_rad = (linspace(0.0, 4*PI, 9).floorMod(2*PI)) -. PI + let expected_rad = linspace(0.0, 4*PI, 9) -. PI + check unwrap_period(phase_rad, period=2*PI) == expected_rad + check unwrap_period(phase_rad) == expected_rad + block: + # Multiple dimension unwrap_period through axis + let a = arange(0, 60*5, 5).reshape([3,4,5]) + let expected_axis0 = [[[ 0, 5, 10, 15, 20], + [25, 30, 35, 40, 45], + [50, 55, 60, 65, 70], + [75, 80, 85, 90, 95]], + [[-2, 3, 8, 13, 18], + [23, 28, 33, 38, 43], + [48, 53, 58, 63, 68], + [73, 78, 83, 88, 93]], + [[-4, 1, 6, 11, 16], + [21, 26, 31, 36, 41], + [46, 51, 56, 61, 66], + [71, 76, 81, 86, 91]]].toTensor + let expected_axis2 = [[[ 0, -1, -2, -3, -4], + [25, 24, 23, 22, 21], + [50, 49, 48, 47, 46], + [75, 74, 73, 72, 71]], + [[100, 99, 98, 97, 96], + [125, 124, 123, 122, 121], + [150, 149, 148, 147, 146], + [175, 174, 173, 172, 171]], + [[200, 199, 198, 197, 196], + [225, 224, 223, 222, 221], + [250, 249, 248, 247, 246], + [275, 274, 273, 272, 271]]].toTensor + check unwrap_period(a, axis=0, period=6) == expected_axis0 + check unwrap_period(a, axis=2, period=6) == expected_axis2 + # When the axis is not specified, the innermost axis is used + check unwrap_period(a, period=6) == expected_axis2 + # Check that unwrap_period also works with floats + check unwrap_period(a.asType(float), period=6.0) == expected_axis2.asType(float) test "Nonzero": block: diff --git a/tests/tensor/test_broadcasting.nim b/tests/tensor/test_broadcasting.nim index eb1e358f..8577a6ad 100644 --- a/tests/tensor/test_broadcasting.nim +++ b/tests/tensor/test_broadcasting.nim @@ -151,6 +151,15 @@ proc main() = [1.0/20.0], [1.0/30.0]].toTensor.asType(Complex[float64]) + block: # Modulo operation + let a = [1, 10, 20, 30].toTensor().reshape(4,1) + let b = [2, 3, 4].toTensor().reshape(1,3) + + check: a mod b == [[1, 1, 1], + [0, 1, 2], + [0, 2, 0], + [0, 0, 2]].toTensor + test "Implicit tensor-tensor broadcasting - basic in-place operations +.=, -.=, *.=, /.=": block: # Addition # Note: We can't broadcast the lhs with in-place operations @@ -231,6 +240,72 @@ proc main() = [10.0, 4, 2], [15.0, 6, 3]].toTensor.asType(Complex[float64]) + test "Implicit tensor-scalar broadcasting - basic operations +, -, *, /, mod": + block: # Addition + var a = [0, 10, 20, 30].toTensor().reshape(4,1) + var a_c = [0, 10, 20, 30].toTensor().reshape(4,1).asType(Complex[float64]) + + a = 100 +. a +. 200 + a_c = complex64(100.0, 0.0) +. a_c +. complex64(200.0, 0.0) + check: a == [[300], + [310], + [320], + [330]].toTensor + check: a_c == [[300], + [310], + [320], + [330]].toTensor.asType(Complex[float64]) + + block: # Substraction + var a = [0, 10, 20, 30].toTensor().reshape(4,1) + var a_c = [0, 10, 20, 30].toTensor().reshape(4,1).asType(Complex[float64]) + + a = 100 -. a -. 200 + a_c = complex64(100.0, 0.0) -. a_c -. complex64(200.0, 0.0) + check: a == [[-100], + [-110], + [-120], + [-130]].toTensor + check: a_c == [[-100], + [-110], + [-120], + [-130]].toTensor.asType(Complex[float64]) + + block: # Multiplication + var a = [0, 10, 20, 30].toTensor().reshape(4,1) + var a_c = [0, 10, 20, 30].toTensor().reshape(4,1).asType(Complex[float64]) + + a = 10 *. a *. 20 + a_c = complex64(10.0, 0.0) *. a_c *. complex64(20.0, 0.0) + check: a == [[0], + [2000], + [4000], + [6000]].toTensor + check: a_c == [[0], + [2000], + [4000], + [6000]].toTensor.asType(Complex[float64]) + + block: # Division + var a = [0, 10, 20, 30].toTensor().reshape(4,1) + var a_c = [0, 10, 20, 30].toTensor().reshape(4,1).asType(Complex[float64]) + + a = (a /. 2) / 5 + a_c = (a_c /. complex[float64](2.0)) / complex[float64](5.0) + check: a == [[0], + [1], + [2], + [3]].toTensor + check: a_c == [[0], + [1], + [2], + [3]].toTensor.asType(Complex[float64]) + + block: # Modulo operation + let a = [2, 5, 10].toTensor().reshape(1,3) + + check: a mod 3 == [[2, 2, 1]].toTensor + check: 3 mod a == [[1, 3, 3]].toTensor test "Implicit tensor-scalar broadcasting - basic operations +.=, -.=, .^=": block: # Addition diff --git a/tests/tensor/test_math_functions.nim b/tests/tensor/test_math_functions.nim index 799a3021..17608b38 100644 --- a/tests/tensor/test_math_functions.nim +++ b/tests/tensor/test_math_functions.nim @@ -104,6 +104,57 @@ proc main() = check: a_c.phase == expected_phases + test "Sign functions": + var a = [-5.3, 42.0, -0.0, 0.01, 10.7, -0.001, 0.9, -125.3].toTensor + let expected_signs = [-1, 1, 0, 1, 1, -1, 1, -1].toTensor() + check: a.sgn() == expected_signs + when (NimMajor, NimMinor, NimPatch) >= (1, 6, 0): + let new_signs = arange(-4.0, 4.0) + a.mcopySign(new_signs) + let expected = [-5.3, -42.0, -0.0, -0.01, 10.7, 0.001, 0.9, 125.3].toTensor + check: a == expected + + test "Modulo functions": + var a = arange(-70.7, 50.0, 34.7) + let expected_floormod_ts = [1.3, -0.0, 1.7, 0.4].toTensor() + let expected_floormod_st = [-67.7, -33.0, -0.9, 3.0].toTensor() + check: expected_floormod_ts.mean_absolute_error(a.floorMod(3.0)) < 1e-9 + check: expected_floormod_st.mean_absolute_error(floorMod(3.0, a)) < 1e-9 + + test "min-max": + var a = arange(-70, 50, 34) + var b = arange(53, -73, -34) + let expected_min = [-70, -36, -15, -49].toTensor() + let expected_max = [53, 19, -2, 32].toTensor() + check expected_min == min(a, b) + check expected_max == max(a, b) + + # N-element versions + let c = [100, -500, -100, 500].toTensor() + let expected_n_min = [-70, -500, -100, -49].toTensor() + let expected_n_max = [100, 19, -2, 500].toTensor() + check expected_n_min == min(a, b, c) + check expected_n_max == max(a, b, c) + + # In-place versions + var d = a.clone() + d.mmax(b) + check expected_max == d + d.mmax(b, c) + check expected_n_max == d + d = a.clone() + d.mmin(b) + check expected_min == d + d.mmin(b, c) + check expected_n_min == d + + test "isNaN & classify": + var a = [0.0, -0.0, 1.0/0.0, -1.0/0.0, 0.0/0.0].toTensor + let expected_isNaN = [false, false, false, false, true].toTensor() + let expected_classification = [fcZero, fcNegZero, fcInf, fcNegInf, fcNaN].toTensor() + check: expected_isNaN == a.isNaN + check: expected_classification == a.classify + test "1-D convolution": block: let a = arange(4)