-
-
Notifications
You must be signed in to change notification settings - Fork 95
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Missing math #610
Merged
Merged
Missing math #610
Changes from all commits
Commits
Show all changes
16 commits
Select commit
Hold shift + click to select a range
eec37ff
Add documentation for the clamp and mclamp ufuncs
AngelEzquerra 20310f9
Add a number of missing math functions
AngelEzquerra 0af48d0
Improve the compilation error message when trying to use the + and - …
AngelEzquerra a59b962
Add .* version of the Tensor * Scalar operators
AngelEzquerra f856922
Add support for higher rank tensors to the percentile function
AngelEzquerra cbd1434
Add a median function
AngelEzquerra 7485ddd
Add broadcast support to the `mod` operator
AngelEzquerra adf1d23
Add support for integer division to the / version of the Tensor / Sca…
AngelEzquerra de925a9
Do not implement nor test copySign for nim versions earlier than 1.6
AngelEzquerra ac7bdf0
Add a diff function
AngelEzquerra 5f1b6e8
Add an unwrap function
AngelEzquerra fc1cbb1
Add support for calculating the element-wise max and min of multiple …
AngelEzquerra ec0f8ca
Rename aggregate/unwrap into unwrap_period
AngelEzquerra 8074e10
Rename aggregate/diff into diff_discrete
AngelEzquerra 4728f15
Improve the documentation of diff_discrete
AngelEzquerra dded08f
Add missing isNaN and classify functions
AngelEzquerra File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
|
@@ -16,13 +16,14 @@ import ./data_structure, | |||||
./init_cpu, | ||||||
./higher_order_foldreduce, | ||||||
./operators_broadcasted, | ||||||
./operators_comparison, | ||||||
./higher_order_applymap, | ||||||
./math_functions, | ||||||
./accessors, | ||||||
./accessors_macros_syntax, | ||||||
./algorithms, | ||||||
./private/p_empty_tensors, | ||||||
math | ||||||
|
||||||
./private/p_empty_tensors | ||||||
import std/[math, macros] | ||||||
import complex except Complex64, Complex32 | ||||||
|
||||||
# ### Standard aggregate functions | ||||||
|
@@ -287,7 +288,7 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float = | |||||
elif p <= 0: result = min(arg).float | ||||||
elif p >= 100: result = max(arg).float | ||||||
else: | ||||||
let a = if not isSorted: sorted(arg) else: arg | ||||||
let a = if not isSorted: sorted(arg.reshape([1, arg.size]).squeeze) else: arg | ||||||
let f = (arg.size - 1) * p / 100 | ||||||
let i = floor(f).int | ||||||
if f == i.float: result = a[i].float | ||||||
|
@@ -296,6 +297,10 @@ proc percentile*[T](arg: Tensor[T], p: int, isSorted = false): float = | |||||
let frac = f - i.float | ||||||
result = (a[i].float + (a[i+1] - a[i]).float * frac) | ||||||
|
||||||
proc median*[T](arg: Tensor[T], isSorted = false): float {.inline.} = | ||||||
## Compute the median of all elements (same as `arg.percentile(50)`) | ||||||
percentile(arg, 50, isSorted) | ||||||
|
||||||
proc iqr*[T](arg: Tensor[T]): float = | ||||||
## Returns the interquartile range of the 1D tensor `t`. | ||||||
## | ||||||
|
@@ -337,6 +342,122 @@ proc cumprod*[T](arg: Tensor[T], axis: int = 0): Tensor[T] = # from hugogranstro | |||||
else: | ||||||
temp[_] = result.atAxisIndex(axis, i-1) *. tAxis | ||||||
|
||||||
proc diff_discrete*[T](arg: Tensor[T], n=1, axis: int = -1): Tensor[T] = | ||||||
## Calculate the n-th discrete difference along the given axis. | ||||||
## | ||||||
## The first difference is given by `out[i] = a[i+1] - a[i]` along the given axis. | ||||||
## Higher differences are calculated by using diff recursively. | ||||||
## | ||||||
## Input: | ||||||
## - A tensor | ||||||
## - n: The number of times values are differenced. | ||||||
## If zero, the input is returned as-is. | ||||||
## - axis: The axis along which the difference is taken, | ||||||
## default is the last axis. | ||||||
## Returns: | ||||||
## - A tensor with the n-th discrete difference along the given axis. | ||||||
## It's size along that axis will be reduced by one. | ||||||
## - The code in this function is heavily based upon and equivalent | ||||||
## to numpy's `diff()` function. | ||||||
mixin `_` | ||||||
assert n >= 0, "diff order (" & $n & ") cannot be negative" | ||||||
if n == 0 or arg.size == 0: | ||||||
return arg | ||||||
let axis = if axis == -1: | ||||||
arg.shape.len + axis | ||||||
else: | ||||||
axis | ||||||
assert axis < arg.shape.len, | ||||||
"diff axis (" & $axis & ") cannot be greater than input shape length (" & $arg.shape.len & ")" | ||||||
var result_shape = arg.shape | ||||||
result_shape[axis] -= 1 | ||||||
result = zeros[T](result_shape) | ||||||
for i, tAxis in enumerateAxis(arg, axis): | ||||||
if unlikely(i == 0): | ||||||
continue | ||||||
var temp = result.atAxisIndex(axis, i-1) | ||||||
when T is bool: | ||||||
temp[_] = tAxis != arg.atAxisIndex(axis, i-1) | ||||||
else: | ||||||
temp[_] = tAxis -. arg.atAxisIndex(axis, i-1) | ||||||
if n > 1: | ||||||
result = diff_discrete(result, n=n-1, axis=axis) | ||||||
|
||||||
proc unwrap_period*[T: SomeNumber](t: Tensor[T], discont: T = -1, axis = -1, period: T = default(T)): Tensor[T] {.noinit.} = | ||||||
# Unwrap a tensor by taking the complement of large deltas with respect to a period. | ||||||
# | ||||||
# This unwraps a tensor `t` by changing elements which have an absolute | ||||||
# difference from their predecessor of more than ``max(discont, period/2)`` | ||||||
# to their `period`-complementary values. | ||||||
# | ||||||
# For the default case where `period` is `2*PI` and `discont` is | ||||||
# `PI`, this unwraps a radian phase `t` such that adjacent differences | ||||||
# are never greater than `PI` by adding `2*k*PIi` for some integer `k`. | ||||||
# | ||||||
# Inputs: | ||||||
# - t: Input Tensor. | ||||||
# - discont: Maximum discontinuity between values. Default is `period/2`. | ||||||
# Values below `period/2` are treated as if they were `period/2`. | ||||||
# To have an effect different than the default, `discont` must be | ||||||
# larger than `period/2`. | ||||||
# - axis: Axis along which the unwrap will be done. Default is the last axis. | ||||||
# - period: Size of the range over which the input wraps. | ||||||
# By default, it is ``2*PI``. | ||||||
# Return: | ||||||
# - Output Tensor. | ||||||
# | ||||||
# Notes: | ||||||
# - If the discontinuity in `t` is smaller than ``period/2``, | ||||||
# but larger than `discont`, no unwrapping is done because taking | ||||||
# the complement would only make the discontinuity larger. | ||||||
# - The code in this function is heavily based upon and equivalent | ||||||
# to numpy's `unwrap()` function. | ||||||
mixin `_` | ||||||
let axis = if axis == -1: | ||||||
t.shape.len + axis | ||||||
else: | ||||||
axis | ||||||
let td = t.diff_discrete(axis=axis) | ||||||
let period: T = if period == default(T): | ||||||
when T is int: | ||||||
raise newException(ValueError, "unwrap period must be specified for integer types") | ||||||
else: | ||||||
2.0 * PI | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Suggested change
possibly. |
||||||
else: | ||||||
period | ||||||
let discont = if discont == -1: | ||||||
T(period/2) | ||||||
else: | ||||||
discont | ||||||
when T is int: | ||||||
when (NimMajor, NimMinor, NimPatch) >= (2, 0, 0): | ||||||
let (interval_high, rem) = divmod(period, 2) | ||||||
else: | ||||||
let interval_high = period div 2 | ||||||
let rem = period mod 2 | ||||||
let boundary_ambiguous = rem == 0 | ||||||
else: | ||||||
let interval_high = period / 2 | ||||||
let boundary_ambiguous = true | ||||||
let interval_low = -interval_high | ||||||
var tdmod = (td -. interval_low).floorMod(period) +. interval_low | ||||||
if boundary_ambiguous: | ||||||
const zero: T = T(0) | ||||||
tdmod[(tdmod ==. interval_low) and (td >. zero)] = interval_high | ||||||
var ph_correct = tdmod - td | ||||||
ph_correct[abs(td) <. discont] = 0 | ||||||
result = t.clone() | ||||||
let ph_correct_cumsum = ph_correct.cumsum(axis) | ||||||
if t.rank == 1: | ||||||
result[1.._] = t[1.._] +. ph_correct_cumsum | ||||||
else: | ||||||
for i, tAxis in enumerateAxis(t, axis): | ||||||
if unlikely(i < 1): | ||||||
continue | ||||||
let pAxis = ph_correct_cumsum.atAxisIndex(axis, i-1) | ||||||
var temp = result.atAxisIndex(axis, i) | ||||||
temp[_] = tAxis +. pAxis | ||||||
|
||||||
when (NimMajor, NimMinor, NimPatch) > (1, 6, 0): | ||||||
import std/atomics | ||||||
proc nonzero*[T](arg: Tensor[T]): Tensor[int] = | ||||||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Except it is currentlydefault(T
) :)Ahh, in the body!