Skip to content

Commit

Permalink
Relax some shape checks for rank-1 tensors (#625)
Browse files Browse the repository at this point in the history
* Relax some shape checks for rank-1 tensors

Consider that rank-1 tensors are "shapeless" and can fit both into [1, N] and [N, 1] tensors, for example.

This greatly reduces the need to use .reshape after converting a seq or array into a tensor in assignments, and reduces errors that happen at runtime for code that is conceptually correct. For example, with this change this will work:
```
var t = eye(3, 3)
t[_,0] = [1, 2, 3].toTensor
```

* allow disabling relaxed rank 1 checks using `-d:RelaxedRankOne=false`

---------

Co-authored-by: Vindaar <basti90@gmail.com>
  • Loading branch information
AngelEzquerra and Vindaar authored Mar 2, 2024
1 parent 7f9359b commit 6a41201
Show file tree
Hide file tree
Showing 3 changed files with 61 additions and 5 deletions.
11 changes: 9 additions & 2 deletions src/arraymancer/tensor/private/p_accessors_macros_write.nim
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,13 @@ import ../../laser/private/nested_containers,
# #########################################################################
# Slicing macros - write access

## `RelaxedRankOne` is a CT variable exposed to the user to recover the old behavior
## of how rank 1 tensors are treated in mutating slices.
## If set to `false` using `-d:RelaxedRankOne=false`, slice assignments using rank 1
## arrays / seqs / tensors have to match exactly. If it is `true`, only the number of
## input elements have to match for a more convenient interface.
const RelaxedRankOne* {.booldefine.} = true

# #########################################################################
# Setting a single value

Expand Down Expand Up @@ -80,7 +87,7 @@ template slicerMutImpl_oa[T](t: var Tensor[T], slices: openArray[SteppedSlice],

var sliced = t.slicer(slices)
when compileOption("boundChecks"):
check_shape(sliced, oa)
check_shape(sliced, oa, relaxed_rank1_check = RelaxedRankOne)

var data = toSeq(flatIter(oa))
when compileOption("boundChecks"):
Expand Down Expand Up @@ -140,7 +147,7 @@ template slicerMutImpl_T[T](t: var Tensor[T], slices: openArray[SteppedSlice], t
var sliced = t.slicer(slices)

when compileOption("boundChecks"):
check_shape(sliced, t2)
check_shape(sliced, t2, relaxed_rank1_check = RelaxedRankOne)

apply2_inline(sliced, t2):
y
Expand Down
13 changes: 12 additions & 1 deletion src/arraymancer/tensor/private/p_checks.nim
Original file line number Diff line number Diff line change
Expand Up @@ -93,14 +93,25 @@ func check_start_end*(a, b: int, dim_size: int) {.inline.} =
". Slicing must be done between 0 (inclusive) and " &
$dim_size & " (exclusive).")

func check_shape*(a: Tensor; b: Tensor|openArray) {.inline.}=
func check_shape*(a: Tensor; b: Tensor|openArray;
relaxed_rank1_check: static[bool] = false) {.inline.} =
## Compare shape

when b is Tensor:
let b_shape = b.shape
else:
let b_shape = b.getShape()

when relaxed_rank1_check:
# When b is a rank-1 tensor, just check its size (i.e. make a rank-1
# tensor of size n "fit" into a [1, n] rank-2 tensor, for example)
when b is Tensor:
let b_rank = b.rank
else:
let b_rank = b_shape.len
if b_rank == 1 and b.len == a.len:
return

if unlikely(a.shape != b_shape):
raise newException(IndexDefect, "Your tensors or openArrays do not have the same shape: " &
$a.shape &
Expand Down
42 changes: 40 additions & 2 deletions tests/tensor/test_fancy_indexing.nim
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,31 @@ proc main() =

check: y == exp

test "Masked assign tensor or openArray via fancy indexing":
block: # y[y > 50] = np.array([-100, -200])
var y = x.clone()
# Assing a tensor
y[y >. 50] = [-100, -200].toTensor()

let exp = [[ 4, -100, 2],
[ 3, 4, -200],
[ 1, 8, 7],
[ 8, 6, 8]].toTensor()

check: y == exp

block: # y[y > 50] = [-100, -200]
var y = x.clone()
# Assing an openArray
y[y >. 50] = [-100, -200]

let exp = [[ 4, -100, 2],
[ 3, 4, -200],
[ 1, 8, 7],
[ 8, 6, 8]].toTensor()

check: y == exp

test "Masked axis assign value via fancy indexing":
block: # y[:, y.sum(axis = 0) > 50] = -100
var y = x.clone()
Expand All @@ -146,11 +171,11 @@ proc main() =
check: y == exp

test "Masked axis assign tensor via fancy indexing - invalid Numpy syntaxes":
block: # y[:, y.sum(axis = 0) > 50] = np.array([10, 20, 30, 40])
block: # y[:, y.sum(axis = 0) > 50] = np.array([[10, 20, 30, 40]])
var y = x.clone()

expect(IndexDefect):
y[_, y.sum(axis = 0) >. 50] = [10, 20, 30, 40].toTensor()
y[_, y.sum(axis = 0) >. 50] = [[10, 20, 30, 40]].toTensor()

test "Masked axis assign broadcastable 1d tensor via fancy indexing":
block: # y[:, y.sum(axis = 0) > 50] = np.array([[10], [20], [30], [40]])
Expand All @@ -175,6 +200,19 @@ proc main() =

check: y == exp

block:
# Assigning a rank-1 tensor into an axis of the same size is supported
# Note that this is not supported by numpy
var y = x.clone()
y[_, y.sum(axis = 0) >. 50] = [10, 20, 30, 40].toTensor()

let exp = [[ 4, 10, 10],
[ 3, 20, 20],
[ 1, 30, 30],
[ 8, 40, 40]].toTensor()

check: y == exp

# TODO - only broadcastable tensor assign are supported at the moment
# test "Masked axis assign multidimensional tensor via fancy indexing":
# block: # y[:, y.sum(axis = 0) > 50] = np.array([[10, 50], [20, 60], [30, 70], [40, 80]])
Expand Down

0 comments on commit 6a41201

Please sign in to comment.