From 3147a4f286d5bc653af1c847eecf28fe542fe84e Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Tue, 26 Mar 2024 10:31:50 +0100 Subject: [PATCH] Add support for more mixed Complex-Real operations This change adds support (almost all of) the same mixed Complex-Real operations that nim's complex module support. This also changes a bit some of the existing mixed ops. In particular, we used to support mixed ops of Complex64 with _any_ kind of number (including integers) but they did not support Complex32 - float32 ops. While being able to mix Complex64 with ints (for example) was nice, it was not consistent with nim's own complex library and also it would have required adding many more operator overloads, so I decided to just be consistent at the cost of a little usability in a small number of cases. --- src/arraymancer/tensor/operators_blas_l1.nim | 74 ++++++ .../tensor/operators_broadcasted.nim | 238 +++++++++++++----- tests/tensor/test_operators_blas.nim | 48 +++- 3 files changed, 301 insertions(+), 59 deletions(-) diff --git a/src/arraymancer/tensor/operators_blas_l1.nim b/src/arraymancer/tensor/operators_blas_l1.nim index 57627a61..3fa4caf1 100644 --- a/src/arraymancer/tensor/operators_blas_l1.nim +++ b/src/arraymancer/tensor/operators_blas_l1.nim @@ -45,10 +45,26 @@ proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Ten ## Tensor addition map2_inline(a, b, x + y) +proc `+`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Mixed Complex[T] + Real Tensor addition + map2_inline(a, b, x + y) + +proc `+`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Mixed Real + Complex[T] Tensor addition + map2_inline(a, b, x + y) + proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noinit.} = ## Tensor substraction map2_inline(a, b, x - y) +proc `-`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Mixed Complex[T] - Real Tensor subtraction + map2_inline(a, b, x - y) + +proc `-`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Mixed Real - Complex[T] Tensor subtraction + map2_inline(a, b, x - y) + # ######################################################### # # Tensor-Tensor in-place linear algebra @@ -56,10 +72,18 @@ proc `+=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: ## Tensor in-place addition a.apply2_inline(b, x + y) +proc `+=`*[T: SomeNumber](a: var Tensor[Complex[T]], b: Tensor[T]) = + ## Mixed Complex + Real Tensor in-place addition + a.apply2_inline(b, x + y) + proc `-=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) = ## Tensor in-place substraction a.apply2_inline(b, x - y) +proc `-=`*[T: SomeNumber](a: var Tensor[Complex[T]], b: Tensor[T]) = + ## Mixed Complex - Real Tensor in-place substraction + a.apply2_inline(b, x - y) + # ######################################################### # # Tensor-scalar linear algebra @@ -68,10 +92,28 @@ proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: T, t: Tensor[T]): returnEmptyIfEmpty(t) t.map_inline(x * a) +proc `*`*[T: SomeNumber](a: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Real * Complex multiplication by a scalar + returnEmptyIfEmpty(t) + t.map_inline(x * a) + +proc `*`*[T: SomeNumber](a: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Complex * Real multiplication by a scalar + returnEmptyIfEmpty(t) + t.map_inline(x * a) + proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} = ## Element-wise multiplication by a scalar a * t +proc `*`*[T: SomeNumber](t: Tensor[Complex[T]], a: T): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Complex * Real multiplication by a scalar + a * t + +proc `*`*[T: SomeNumber](t: Tensor[T], a: Complex[T]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Real * Complex multiplication by a scalar + a * t + proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noinit.} = ## Element-wise division by a float scalar returnEmptyIfEmpty(t) @@ -80,6 +122,26 @@ proc `/`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): else: t.map_inline(x / a) +proc `/`*[T: SomeNumber](a: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Real scalar / Complex tensor division + returnEmptyIfEmpty(t) + t.map_inline(a / x ) + +proc `/`*[T: SomeNumber](a: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Complex scalar / Real tensor division + returnEmptyIfEmpty(t) + t.map_inline(a / x) + +proc `/`*[T: SomeNumber](t: Tensor[Complex[T]], a: T): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Complex tensor / Real scalar division + returnEmptyIfEmpty(t) + t.map_inline(x / a) + +proc `/`*[T: SomeNumber](t: Tensor[T], a: Complex[T]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise mixed Real tensor / Complex scalar division + returnEmptyIfEmpty(t) + t.map_inline(x / a) + proc `div`*[T: SomeInteger](t: Tensor[T], a: T): Tensor[T] {.noinit.} = ## Element-wise division by an integer returnEmptyIfEmpty(t) @@ -123,12 +185,24 @@ proc `*=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], a: return t.apply_inline(x * a) +proc `*=`*[T: SomeNumber](t: var Tensor[Complex[T]], a: T) = + ## Element-wise mixed Complex * Real multiplication by a scalar (in-place) + if t.size == 0: + return + t.apply_inline(x * a) + proc `/=`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: var Tensor[T], a: T) = ## Element-wise division by a scalar (in-place) if t.size == 0: return t.apply_inline(x / a) +proc `/=`*[T: SomeNumber](t: var Tensor[Complex[T]], a: T) = + ## Element-wise mixed Complex / Real division by a scalar (in-place) + if t.size == 0: + return + t.apply_inline(x / a) + proc `/=`*[T: SomeInteger](t: var Tensor[T], a: T) = ## Element-wise division by a scalar (in-place) if t.size == 0: diff --git a/src/arraymancer/tensor/operators_broadcasted.nim b/src/arraymancer/tensor/operators_broadcasted.nim index 4a64ed7e..f5bb9dd9 100644 --- a/src/arraymancer/tensor/operators_broadcasted.nim +++ b/src/arraymancer/tensor/operators_broadcasted.nim @@ -52,6 +52,13 @@ proc `/.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Te else: result = map2_inline(tmp_a, tmp_b, x / y ) +proc `^.`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noinit.} = + ## Tensor element-wise exponentiation + ## + ## And broadcasted element-wise exponentiation. + let (tmp_a, tmp_b) = broadcast2(a, b) + result = map2_inline(tmp_a, tmp_b, pow(x, y)) + proc `mod`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noinit.} = ## Tensor element-wise modulo operation ## @@ -195,81 +202,196 @@ proc `/.=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], v # ################################################# -# # Mixed Complex64 tensor - real scalar operations +# # Mixed complex - real tensor operations # -# It is always possible to operate on a Complex64 tensor with a real scalar -# without loss of precission, so we allow it without explicit type conversion. -# This makes complex tensor arithmetic more convenient to use. +# Since nim's built-in complex module supports mixed complex-real operations +# we allow them too (but in tensor form). This makes such mixed arithmetic +# more efficient in addition to more convenient to use. -proc `+.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted addition for real scalar + Complex64 tensor - ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - complex_val +. t +proc `+.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit,inline.} = + ## Broadcasted addition for tensors of incompatible but broadcastable shape. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x + y) -proc `+.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted addition for real scalar + Complex64 tensor - ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - t +. complex_val +proc `+.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit,inline.} = + ## Broadcasted addition for tensors of incompatible but broadcastable shape. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x + y) -proc `-.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted subtraction for real scalar - Complex64 tensor - ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - complex_val -. t +proc `-.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit,inline.} = + ## Broadcasted subtraction for tensors of incompatible but broadcastable shape. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x - y) -proc `-.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted subtraction for real scalar - Complex64 tensor - ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - t -. complex_val +proc `-.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit,inline.} = + ## Broadcasted subtraction for tensors of incompatible but broadcastable shape. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x - y) -proc `*.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted multiplication for real scalar * Complex64 tensor +proc `*.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise multiplication (Hadamard product). ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - complex_val *. t + ## And broadcasted element-wise multiplication. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x * y) -proc `*.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted multiplication for real scalar * Complex64 tensor +proc `*.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Element-wise multiplication (Hadamard product). ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - t *. complex_val + ## And broadcasted element-wise multiplication. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x * y) -proc `/.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted division for real scalar / Complex64 tensor +proc `/.`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise division ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - complex_val /. t + ## And broadcasted element-wise division. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + when T is SomeInteger: + result = map2_inline(a, b, x div y ) + else: + result = map2_inline(a, b, x / y ) -proc `/.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted division for real scalar / Complex64 tensor +proc `/.`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise division ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - t /. complex_val + ## And broadcasted element-wise division. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + when T is SomeInteger: + result = map2_inline(a, b, x div y ) + else: + result = map2_inline(a, b, x / y ) -proc `^.`*[T: SomeNumber](val: T, t: Tensor[Complex64]): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted exponentiation for real scalar ^ Complex64 tensor +proc `^.`*[T: SomeFloat](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise exponentiation for real complex ^ scalar tensors + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, pow(x, complex(y))) + +proc `^.`*[T: SomeFloat](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise exponentiation for real scalar ^ complex tensors + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, pow(complex(x), y)) + +proc `mod`*[T: SomeNumber](a: Tensor[Complex[T]], b: Tensor[T]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise modulo operation ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - complex_val ^. t + ## And broadcasted element-wise modulo operation. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x mod y) -proc `^.`*[T: SomeNumber](t: Tensor[Complex64], val: T): Tensor[Complex64] {.noinit, inline.} = - ## Broadcasted exponentiation for real scalar ^ Complex64 tensor +proc `mod`*[T: SomeNumber](a: Tensor[T], b: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit.} = + ## Tensor element-wise modulo operation ## - ## The scalar is automatically converted to Complex64 before the operation. - let complex_val = complex(float64(val)) - t ^. complex_val + ## And broadcasted element-wise modulo operation. + #check_shape(a, b, relaxed_rank1_check = RelaxedRankOne) + result = map2_inline(a, b, x mod y) + +# ################################################# +# # Mixed complex tensor - real scalar operations +# +# Since nim's built-in complex module supports mixed complex-real operations +# we allow them too (but in tensor form). This makes such mixed arithmetic +# more efficient in addition to more convenient to use. + +proc `+.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted addition for real scalar + complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val + x) + +proc `+.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted addition for real scalar + complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val + x) + +proc `+.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted addition for real scalar + complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x + val) + +proc `+.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted addition for real scalar + complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x + val) + +proc `-.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted subtraction for real scalar - complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val - x) + +proc `-.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted subtraction for real scalar - complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val + x) + +proc `-.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted subtraction for real scalar - complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x - val) + +proc `-.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted subtraction for real scalar - complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x - val) + +proc `*.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted multiplication for real scalar * complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val * x) + +proc `*.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted multiplication for real scalar * complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val * x) + +proc `*.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted multiplication for real scalar * complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x * val) + +proc `*.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted multiplication for real scalar * complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x * val) + +proc `/.`*[T: SomeNumber](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted division for real scalar / complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val / x) + +proc `/.`*[T: SomeNumber](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted division for real scalar / complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(val / x) + +proc `/.`*[T: SomeNumber](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted division for real scalar / complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x / val) + +proc `/.`*[T: SomeNumber](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted division for real scalar / complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(x / val) + +proc `^.`*[T: SomeFloat](val: T, t: Tensor[Complex[T]]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted exponentiation for real scalar ^ complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(pow(complex(val), x)) + +proc `^.`*[T: SomeFloat](val: Complex[T], t: Tensor[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted exponentiation for real scalar ^ complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(pow(val, x)) + +proc `^.`*[T: SomeFloat](t: Tensor[Complex[T]], val: T): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted exponentiation for real scalar ^ complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(pow(x, val)) + +proc `^.`*[T: SomeFloat](t: Tensor[T], val: Complex[T]): Tensor[Complex[T]] {.noinit, inline.} = + ## Broadcasted exponentiation for real scalar ^ complex tensor + returnEmptyIfEmpty(t) + result = t.map_inline(pow(complex(x), val)) proc `+.=`*[T: SomeNumber](t: var Tensor[Complex64], val: T) {.inline.} = ## Complex64 tensor in-place addition with a real scalar. diff --git a/tests/tensor/test_operators_blas.nim b/tests/tensor/test_operators_blas.nim index 33521768..c6cec178 100644 --- a/tests/tensor/test_operators_blas.nim +++ b/tests/tensor/test_operators_blas.nim @@ -445,7 +445,7 @@ proc main() = check: n1 * n2 == n1n2 - test "complex matrix product": + test "Complex matrix product": # [[1. + 1.j, 2. + 2.j, 3. + 3.j] [[1. + 1.j, 4. + 4.j], [[0. + 28.j, 0. + 64.j], # [4. +4.j, 5. + 5.j, 6. + 6.j]] * [2. + 2.j, 5. +5.j], == [0. + 64.j, 0. + 154.j] # [3. + 3.j, 6. + 6.j]] @@ -457,5 +457,51 @@ proc main() = let m6 = [[28,64],[64,154]].toTensor().asType(Complex[float64]) check: m5 == m6 * complex64(0,1) + test "Mixed complex - real operations": + let r = arange(1.0, 4.0) + let c = complex(arange(1.0, 4.0)) + + check: c + r == c + complex(r) + check: c +. r == c +. complex(r) + check: r + c == complex(r) + c + check: r +. c == complex(r) +. c + check: c +. 5.0 == c +. complex(5.0) + check: 5.0 +. c == complex(5.0) +. c + check: r +. complex64(5.0) == c +. complex(5.0) + check: complex64(5.0) +. r == c +. complex(5.0) + + check: c - r == c - complex(r) + check: c -. r == c -. complex(r) + check: r - c == complex(r) - c + check: c -. 5.0 == c -. complex(5.0) + check: 5.0 -. c == complex(5.0) -. c + check: r -. complex64(5.0) == c -. complex(5.0) + + check: c *. r == c *. complex(r) + check: r *. c == complex(r) *. c + check: c * 5.0 == c * complex(5.0) + check: 5.0 * c == complex(5.0) * c + check: r * complex(5.0) == c * complex(5.0) + check: complex(5.0) * r == c * complex(5.0) + check: c *. 5.0 == c * complex(5.0) + check: 5.0 *. c == complex(5.0) * c + check: r *. complex(5.0) == c * complex(5.0) + check: complex(5.0) *. r == c * complex(5.0) + + check: c /. r == c /. complex(r) + check: r /. c == complex(r) /. c + check: c / 5.0 == c / complex(5.0) + check: mean_absolute_error(5.0 /. c, complex(5.0) /. c) < 1e-9 + check: mean_absolute_error(r / complex(5.0), c / complex(5.0)) < 1e-9 + check: c /. 5.0 == c / complex(5.0) + check: mean_absolute_error(r /. complex(5.0), c / complex(5.0)) < 1e-9 + + check: c ^. r == c ^. complex(r) + check: r ^. c == complex(r) ^. c + check: c ^. 5.0 == c ^. complex(5.0) + check: 5.0 ^. c == complex(5.0) ^. c + check: r ^. complex64(5.0) == c ^. complex(5.0) + + main() GC_fullCollect()