Skip to content

Commit

Permalink
Merge branch 'ershi/tests-use-assert-np-equal' into 'main'
Browse files Browse the repository at this point in the history
Use assert_np_equal for more information in tests

See merge request omniverse/warp!529
  • Loading branch information
shi-eric committed May 29, 2024
2 parents ebcc90d + 544bd9a commit 7fcbddf
Show file tree
Hide file tree
Showing 6 changed files with 85 additions and 82 deletions.
16 changes: 8 additions & 8 deletions warp/tests/test_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -420,10 +420,10 @@ def test_view(test, device):
wp_arr_e = wp.array(np_arr_e, dtype=wp.vec4, device=device)
wp_arr_f = wp.array(np_arr_e, dtype=wp.quat, device=device)

assert np.array_equal(np_arr_a.view(dtype=np.float32), wp_arr_a.view(dtype=wp.float32).numpy())
assert np.array_equal(np_arr_b.view(dtype=np.uint32), wp_arr_b.view(dtype=wp.uint32).numpy())
assert np.array_equal(np_arr_c.view(dtype=np.float16), wp_arr_c.view(dtype=wp.float16).numpy())
assert np.array_equal(np_arr_d.view(dtype=np.uint16), wp_arr_d.view(dtype=wp.uint16).numpy())
assert_np_equal(wp_arr_a.view(dtype=wp.float32).numpy(), np_arr_a.view(dtype=np.float32))
assert_np_equal(wp_arr_b.view(dtype=wp.uint32).numpy(), np_arr_b.view(dtype=np.uint32))
assert_np_equal(wp_arr_c.view(dtype=wp.float16).numpy(), np_arr_c.view(dtype=np.float16))
assert_np_equal(wp_arr_d.view(dtype=wp.uint16).numpy(), np_arr_d.view(dtype=np.uint16))
assert_array_equal(wp_arr_e.view(dtype=wp.quat), wp_arr_f)


Expand Down Expand Up @@ -484,7 +484,7 @@ def test_transpose(test, device):
check = wp.zeros(shape=(2, 3), dtype=int, device=device)

wp.launch(compare_2darrays, dim=(2, 3), inputs=[arr_transpose, arr_compare, check], device=device)
assert np.array_equal(check.numpy(), np.ones((2, 3), dtype=int))
assert_np_equal(check.numpy(), np.ones((2, 3), dtype=int))

# test transpose in square 3d case
# wp does not support copying from/to non-contiguous arrays so check in kernel
Expand All @@ -495,21 +495,21 @@ def test_transpose(test, device):
check = wp.zeros(shape=(3, 2, 2), dtype=int, device=device)

wp.launch(compare_3darrays, dim=(3, 2, 2), inputs=[arr_transpose, arr_compare, check], device=device)
assert np.array_equal(check.numpy(), np.ones((3, 2, 2), dtype=int))
assert_np_equal(check.numpy(), np.ones((3, 2, 2), dtype=int))

# test transpose in square 3d case without axes supplied
arr_transpose = arr.transpose()
arr_compare = wp.array3d(np_arr.transpose(), dtype=float, device=device)
check = wp.zeros(shape=(2, 2, 3), dtype=int, device=device)

wp.launch(compare_3darrays, dim=(2, 2, 3), inputs=[arr_transpose, arr_compare, check], device=device)
assert np.array_equal(check.numpy(), np.ones((2, 2, 3), dtype=int))
assert_np_equal(check.numpy(), np.ones((2, 2, 3), dtype=int))

# test transpose in 1d case (should be noop)
np_arr = np.array([1, 2, 3], dtype=float)
arr = wp.array(np_arr, dtype=float, device=device)

assert np.array_equal(np_arr.transpose(), arr.transpose().numpy())
assert_np_equal(arr.transpose().numpy(), np_arr.transpose())


def test_fill_scalar(test, device):
Expand Down
12 changes: 6 additions & 6 deletions warp/tests/test_ctypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def test_vec2_arg(test, device, n):
wp.launch(add_vec2, dim=n, inputs=[dest, c], device=device)

# ensure type can round-trip from Python->GPU->Python
test.assertTrue(np.array_equal(dest.numpy(), np.tile(c, (n, 1))))
assert_np_equal(dest.numpy(), np.tile(c, (n, 1)))


def test_vec2_transform(test, device, n):
Expand All @@ -65,16 +65,16 @@ def test_vec2_transform(test, device, n):
m = np.array(((3.0, -1.0), (2.5, 4.0)))

wp.launch(transform_vec2, dim=n, inputs=[dest_right, dest_left, m, c], device=device)
test.assertTrue(np.array_equal(dest_right.numpy(), np.tile(m @ c, (n, 1))))
test.assertTrue(np.array_equal(dest_left.numpy(), np.tile(c @ m, (n, 1))))
assert_np_equal(dest_right.numpy(), np.tile(m @ c, (n, 1)))
assert_np_equal(dest_left.numpy(), np.tile(c @ m, (n, 1)))


def test_vec3_arg(test, device, n):
dest = wp.zeros(n=n, dtype=wp.vec3, device=device)
c = np.array((1.0, 2.0, 3.0))

wp.launch(add_vec3, dim=n, inputs=[dest, c], device=device)
test.assertTrue(np.array_equal(dest.numpy(), np.tile(c, (n, 1))))
assert_np_equal(dest.numpy(), np.tile(c, (n, 1)))


def test_vec3_transform(test, device, n):
Expand All @@ -84,8 +84,8 @@ def test_vec3_transform(test, device, n):
m = np.array(((1.0, 2.0, 3.0), (4.0, 5.0, 6.0), (7.0, 8.0, 9.0)))

wp.launch(transform_vec3, dim=n, inputs=[dest_right, dest_left, m, c], device=device)
test.assertTrue(np.array_equal(dest_right.numpy(), np.tile(m @ c, (n, 1))))
test.assertTrue(np.array_equal(dest_left.numpy(), np.tile(c @ m, (n, 1))))
assert_np_equal(dest_right.numpy(), np.tile(m @ c, (n, 1)))
assert_np_equal(dest_left.numpy(), np.tile(c @ m, (n, 1)))


def test_transform_multiply(test, device, n):
Expand Down
2 changes: 1 addition & 1 deletion warp/tests/test_hash_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def test_hashgrid_query(test, device):

print(f"Passed: {np.array_equal(counts, counts_ref)}")

test.assertTrue(np.array_equal(counts, counts_ref))
assert_np_equal(counts, counts_ref)


def test_hashgrid_inputs(test, device):
Expand Down
7 changes: 4 additions & 3 deletions warp/tests/test_linear_solvers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,11 @@
import numpy as np

import warp as wp
from warp.context import runtime # noqa: E402
from warp.optim.linear import bicgstab, cg, cr, gmres, preconditioner
from warp.tests.unittest_utils import *

wp.init() # For runtime.core.is_cutlass_enabled()


def _check_linear_solve(test, A, b, func, *args, **kwargs):
# test from zero
Expand Down Expand Up @@ -169,11 +170,11 @@ class TestLinearSolvers(unittest.TestCase):

devices = get_test_devices()

if not runtime.core.is_cutlass_enabled():
if not wp.context.runtime.core.is_cutlass_enabled():
devices = [d for d in devices if not d.is_cuda]
print("Skipping CUDA linear solver tests because CUTLASS is not supported in this build")

if runtime.core.is_debug_enabled():
if wp.context.runtime.core.is_debug_enabled():
# cutlass-based matmul is *very* slow in debug mode -- skip
devices = [d for d in devices if not d.is_cuda]
print("Skipping CUDA linear solver tests in debug mode")
Expand Down
71 changes: 36 additions & 35 deletions warp/tests/test_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,10 @@
import numpy as np

import warp as wp
from warp.context import runtime # noqa: E402
from warp.tests.unittest_utils import *

wp.init() # For wp.context.runtime.core.is_cutlass_enabled()


class gemm_test_bed_runner:
def __init__(self, dtype, device):
Expand Down Expand Up @@ -78,7 +79,7 @@ def run_and_verify(self, m, n, k, batch_count, alpha, beta):
tape.backward(grads={D: ones})

D_np = alpha * (A.numpy() @ B.numpy()) + beta * C.numpy()
assert np.array_equal(D_np, D.numpy())
assert_np_equal(D.numpy(), D_np)

adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones.numpy())
Expand All @@ -91,15 +92,15 @@ def run_and_verify(self, m, n, k, batch_count, alpha, beta):
tape.backward(grads={D: ones})

D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert np.array_equal(D_np, D.numpy())
assert_np_equal(D.numpy(), D_np)

adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()

assert np.array_equal(adj_A_np, A.grad.numpy())
assert np.array_equal(adj_B_np, B.grad.numpy())
assert np.array_equal(adj_C_np, C.grad.numpy())
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)

def run(self):
Ms = [64, 128, 512]
Expand Down Expand Up @@ -200,9 +201,9 @@ def run_and_verify(self, m, n, k, batch_count, alpha, beta):
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})

D_np = alpha * (A.numpy() @ B.numpy()) + beta * C1.numpy()
assert np.array_equal(D_np, D1.numpy())
assert np.array_equal(D_np, D2.numpy())
assert np.array_equal(D_np, D3.numpy())
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)

adj_A_np = alpha * (ones1.numpy() @ B.numpy().transpose())
adj_B_np = alpha * (A.numpy().transpose() @ ones1.numpy())
Expand All @@ -221,23 +222,23 @@ def run_and_verify(self, m, n, k, batch_count, alpha, beta):
tape.backward(grads={D1: ones1, D2: ones2, D3: ones3})

D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C1.numpy()
assert np.array_equal(D_np, D1.numpy())
assert np.array_equal(D_np, D2.numpy())
assert np.array_equal(D_np, D3.numpy())
assert_np_equal(D1.numpy(), D_np)
assert_np_equal(D2.numpy(), D_np)
assert_np_equal(D3.numpy(), D_np)

adj_A_np = alpha * np.matmul(ones1.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones1.numpy())
adj_C_np = beta * ones1.numpy()

assert np.array_equal(adj_A_np, A.grad.numpy())
assert np.array_equal(adj_A_np, ATT1.grad.numpy())
assert np.array_equal(adj_A_np, ATT2.grad.numpy())
assert np.array_equal(adj_B_np, B.grad.numpy())
assert np.array_equal(adj_B_np, BTT1.grad.numpy())
assert np.array_equal(adj_B_np, BTT2.grad.numpy())
assert np.array_equal(adj_C_np, C1.grad.numpy())
assert np.array_equal(adj_C_np, C2.grad.numpy())
assert np.array_equal(adj_C_np, C3.grad.numpy())
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(ATT1.grad.numpy(), adj_A_np)
assert_np_equal(ATT2.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(BTT1.grad.numpy(), adj_B_np)
assert_np_equal(BTT2.grad.numpy(), adj_B_np)
assert_np_equal(C1.grad.numpy(), adj_C_np)
assert_np_equal(C2.grad.numpy(), adj_C_np)
assert_np_equal(C3.grad.numpy(), adj_C_np)

def run(self):
m = 16
Expand All @@ -257,13 +258,13 @@ def test_f16(test, device):
gemm_test_bed_runner_transpose(wp.float16, device).run()


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_f32(test, device):
gemm_test_bed_runner(wp.float32, device).run()
gemm_test_bed_runner_transpose(wp.float32, device).run()


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_f64(test, device):
gemm_test_bed_runner(wp.float64, device).run()
gemm_test_bed_runner_transpose(wp.float64, device).run()
Expand All @@ -275,7 +276,7 @@ def matrix_sum_kernel(arr: wp.array2d(dtype=float), loss: wp.array(dtype=float))
wp.atomic_add(loss, 0, arr[i, j])


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_tape(test, device):
rng = np.random.default_rng(42)
low = -4.5
Expand Down Expand Up @@ -315,7 +316,7 @@ def test_tape(test, device):
assert_array_equal(A.grad, wp.zeros_like(A))


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_operator(test, device):
rng = np.random.default_rng(42)
low = -4.5
Expand Down Expand Up @@ -351,7 +352,7 @@ def test_operator(test, device):
assert_array_equal(A.grad, wp.zeros_like(A))


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_large_batch_count(test, device):
rng = np.random.default_rng(42)
low = -4.5
Expand Down Expand Up @@ -391,18 +392,18 @@ def test_large_batch_count(test, device):
tape.backward(grads={D: ones})

D_np = alpha * np.matmul(A.numpy(), B.numpy()) + beta * C.numpy()
assert np.array_equal(D_np, D.numpy())
assert_np_equal(D.numpy(), D_np)

adj_A_np = alpha * np.matmul(ones.numpy(), B.numpy().transpose((0, 2, 1)))
adj_B_np = alpha * np.matmul(A.numpy().transpose((0, 2, 1)), ones.numpy())
adj_C_np = beta * ones.numpy()

assert np.array_equal(adj_A_np, A.grad.numpy())
assert np.array_equal(adj_B_np, B.grad.numpy())
assert np.array_equal(adj_C_np, C.grad.numpy())
assert_np_equal(A.grad.numpy(), adj_A_np)
assert_np_equal(B.grad.numpy(), adj_B_np)
assert_np_equal(C.grad.numpy(), adj_C_np)


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_adjoint_accumulation(test, device):
a_np = np.ones(shape=(2, 3))
b_np = np.ones(shape=(3, 2))
Expand All @@ -426,12 +427,12 @@ def test_adjoint_accumulation(test, device):
grads = {d2_wp: d_grad}
tape.backward(grads=grads)

assert np.array_equal(a_wp.grad.numpy(), 4.0 * np.ones(shape=(2, 3)))
assert np.array_equal(b_wp.grad.numpy(), 4.0 * np.ones(shape=(3, 2)))
assert np.array_equal(c_wp.grad.numpy(), np.ones(shape=(2, 2)))
assert_np_equal(a_wp.grad.numpy(), 4.0 * np.ones(shape=(2, 3)))
assert_np_equal(b_wp.grad.numpy(), 4.0 * np.ones(shape=(3, 2)))
assert_np_equal(c_wp.grad.numpy(), np.ones(shape=(2, 2)))


@unittest.skipUnless(runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
@unittest.skipUnless(wp.context.runtime.core.is_cutlass_enabled(), "Warp was not built with CUTLASS support")
def test_cuda_graph_capture(test, device):
@wp.kernel
def mat_sum(mat: wp.array2d(dtype=Any), loss: wp.array(dtype=Any)):
Expand Down
Loading

0 comments on commit 7fcbddf

Please sign in to comment.