Skip to content

Commit

Permalink
Merge branch 'release-1.3.1' into 'release-1.3'
Browse files Browse the repository at this point in the history
Release 1.3.1

See merge request omniverse/warp!648
  • Loading branch information
c0d1f1ed committed Jul 28, 2024
2 parents 701113d + 318023a commit 2183806
Show file tree
Hide file tree
Showing 8 changed files with 49 additions and 18 deletions.
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# CHANGELOG

## [1.3.1] - 2024-07-27

- Remove ``wp.synchronize()`` from PyTorch autograd function example
- ``Tape.check_kernel_array_access()`` and ``Tape.reset_array_read_flags()`` are now private methods.
- Fix reporting unmatched argument types

## [1.3.0] - 2024-07-25

- Warp Core improvements
Expand Down
12 changes: 0 additions & 12 deletions docs/modules/interoperability.rst
Original file line number Diff line number Diff line change
Expand Up @@ -226,9 +226,6 @@ In the following example, we demonstrate how Warp may be used to evaluate the Ro
class Rosenbrock(torch.autograd.Function):
@staticmethod
def forward(ctx, xy, num_points):
# ensure Torch operations complete before running Warp
wp.synchronize_device()

ctx.xy = wp.from_torch(xy, dtype=pvec2, requires_grad=True)
ctx.num_points = num_points

Expand All @@ -242,16 +239,10 @@ In the following example, we demonstrate how Warp may be used to evaluate the Ro
outputs=[ctx.z]
)

# ensure Warp operations complete before returning data to Torch
wp.synchronize_device()

return wp.to_torch(ctx.z)

@staticmethod
def backward(ctx, adj_z):
# ensure Torch operations complete before running Warp
wp.synchronize_device()

# map incoming Torch grads to our output variables
ctx.z.grad = wp.from_torch(adj_z)

Expand All @@ -265,9 +256,6 @@ In the following example, we demonstrate how Warp may be used to evaluate the Ro
adjoint=True
)

# ensure Warp operations complete before returning data to Torch
wp.synchronize_device()

# return adjoint w.r.t. inputs
return (wp.to_torch(ctx.xy.grad), None)

Expand Down
6 changes: 6 additions & 0 deletions exts/omni.warp.core/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# CHANGELOG

## [1.3.1] - 2024-07-27

- Remove ``wp.synchronize()`` from PyTorch autograd function example
- ``Tape.check_kernel_array_access()`` and ``Tape.reset_array_read_flags()`` are now private methods.
- Fix reporting unmatched argument types

## [1.3.0] - 2024-07-25

- Warp Core improvements
Expand Down
6 changes: 6 additions & 0 deletions exts/omni.warp/docs/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# CHANGELOG

## [1.3.1] - 2024-07-27

- Remove ``wp.synchronize()`` from PyTorch autograd function example
- ``Tape.check_kernel_array_access()`` and ``Tape.reset_array_read_flags()`` are now private methods.
- Fix reporting unmatched argument types

## [1.3.0] - 2024-07-25

- Warp Core improvements
Expand Down
4 changes: 2 additions & 2 deletions warp/builtins.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def fn(arg_types, arg_values):
return default

if not sametypes(arg_types):
raise RuntimeError(f"Input types must be the same, found: {[type_repr(t) for t in arg_types]}")
raise RuntimeError(f"Input types must be the same, got {[type_repr(t) for t in arg_types.values()]}")

arg_type_0 = next(iter(arg_types.values()))
return arg_type_0
Expand Down Expand Up @@ -414,7 +414,7 @@ def scalar_sametypes_value_func(arg_types: Mapping[str, type], arg_values: Mappi
return Scalar

if not sametypes(arg_types):
raise RuntimeError(f"Input types must be exactly the same, {list(arg_types)}")
raise RuntimeError(f"Input types must be exactly the same, got {[type_repr(t) for t in arg_types.values()]}")

return scalar_infer_type(arg_types)

Expand Down
2 changes: 1 addition & 1 deletion warp/context.py
Original file line number Diff line number Diff line change
Expand Up @@ -4778,7 +4778,7 @@ def pack_args(args, params, adjoint=False):

# detect illegal inter-kernel read/write access patterns if verification flag is set
if warp.config.verify_autograd_array_access:
runtime.tape.check_kernel_array_access(kernel, fwd_args)
runtime.tape._check_kernel_array_access(kernel, fwd_args)


def synchronize():
Expand Down
6 changes: 3 additions & 3 deletions warp/tape.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ def record_scope_end(self, remove_scope_if_empty=True):
else:
self.scopes.append((len(self.launches), None, None))

def check_kernel_array_access(self, kernel, args):
def _check_kernel_array_access(self, kernel, args):
"""Detect illegal inter-kernel write after read access patterns during launch capture"""
adj = kernel.adj
kernel_name = adj.fun_name
Expand Down Expand Up @@ -256,7 +256,7 @@ def reset(self):
self.scopes = []
self.zero()
if wp.config.verify_autograd_array_access:
self.reset_array_read_flags()
self._reset_array_read_flags()

def zero(self):
"""
Expand All @@ -271,7 +271,7 @@ def zero(self):
else:
g.zero_()

def reset_array_read_flags(self):
def _reset_array_read_flags(self):
"""
Reset all recorded array read flags to False
"""
Expand Down
25 changes: 25 additions & 0 deletions warp/tests/test_codegen.py
Original file line number Diff line number Diff line change
Expand Up @@ -467,6 +467,28 @@ def kernel_4_fn():
wp.launch(kernel, dim=1)


def test_error_unmatched_arguments(test, device):
def kernel_1_fn():
a = 1 * 1.0

def kernel_2_fn():
x = wp.dot(wp.vec2(1.0, 2.0), wp.vec2h(wp.float16(1.0), wp.float16(2.0)))

kernel = wp.Kernel(func=kernel_1_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Input types must be the same, got \['int32', 'float32'\]",
):
wp.launch(kernel, dim=1)

kernel = wp.Kernel(func=kernel_2_fn)
with test.assertRaisesRegex(
RuntimeError,
r"Input types must be exactly the same, got \[\"vector\(length=2, dtype=<class 'warp.types.float32'>\)\", \"vector\(length=2, dtype=<class 'warp.types.float16'>\)\"\]",
):
wp.launch(kernel, dim=1)


@wp.kernel
def test_call_syntax():
expected_pow = 16.0
Expand Down Expand Up @@ -618,6 +640,9 @@ class TestCodeGen(unittest.TestCase):
add_function_test(
TestCodeGen, func=test_error_collection_construct, name="test_error_collection_construct", devices=devices
)
add_function_test(
TestCodeGen, func=test_error_unmatched_arguments, name="test_error_unmatched_arguments", devices=devices
)

add_kernel_test(TestCodeGen, name="test_call_syntax", kernel=test_call_syntax, dim=1, devices=devices)

Expand Down

0 comments on commit 2183806

Please sign in to comment.