Skip to content

Commit

Permalink
Revert "Add MaskedTensor passthrough: unfold, F.Unfold, F.Fold, stack (
Browse files Browse the repository at this point in the history
…pytorch#125262)"

This reverts commit f685018.

Reverted pytorch#125262 on behalf of https://github.com/ZainRizvi due to Hi, this PR appears to be calling maskedtensor tests to fail on main. Please rebase your changes onto the latest trunk build to repro the failure. test_maskedtensor.py::TestOperatorsCUDA::test_like_empty_like_layout1_cuda_bool [GH job link](https://github.com/pytorch/pytorch/actions/runs/10604716811/job/29393256312) [HUD commit link](https://hud.pytorch.org/pytorch/pytorch/commit/f685018ea9d08f98cbd7106028db134f967f74d3) ([comment](pytorch#125262 (comment)))
  • Loading branch information
pytorchmergebot authored and tolleybot committed Sep 14, 2024
1 parent b24ebbe commit 3d8b818
Show file tree
Hide file tree
Showing 8 changed files with 14 additions and 56 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/native/Col2Im.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ static void col2im_out_cpu_template(

output.resize_({batch_size, n_output_plane, output_height, output_width});

AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(kBFloat16, kHalf, kBool,
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kBFloat16, kHalf,
input.scalar_type(), "col2im_out_cpu", [&] {
Tensor input_n = Tensor();
Tensor output_n = Tensor();
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/Im2Col.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ static void im2col_out_cpu_template(

output.resize_({batch_size, n_output_plane, output_length});

AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(kBFloat16, kHalf, kBool,
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kBFloat16, kHalf,
input.scalar_type(), "im2col_out_cpu", [&] {
Tensor input_n;
Tensor output_n;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/Col2Im.cu
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ void col2im_out_cuda_template(
output.resize_({batch_size, n_output_plane, output_height, output_width});
int64_t output_batch_stride = output.stride(0);

AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(kHalf, kBFloat16, kBool,
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "col2im_out_cuda", [&] {
int64_t height_col = (output_height + 2 * pad_height -
(dilation_height * (kernel_height - 1) + 1)) /
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/Im2Col.cu
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ static void im2col_out_cuda_template(
output.resize_({batch_size, n_output_plane, output_length});

// Launch kernel
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND3(kHalf, kBFloat16, kBool,
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "im2col_out_cuda", [&] {
Tensor input_n;
Tensor output_n;
Expand Down
3 changes: 0 additions & 3 deletions docs/source/masked.rst
Original file line number Diff line number Diff line change
Expand Up @@ -283,11 +283,9 @@ The following ops are currently supported:
kron
meshgrid
narrow
nn.functional.unfold
ravel
select
split
stack
t
transpose
vsplit
Expand All @@ -296,7 +294,6 @@ The following ops are currently supported:
Tensor.expand_as
Tensor.reshape
Tensor.reshape_as
Tensor.unfold
Tensor.view

Other functions
Expand Down
50 changes: 8 additions & 42 deletions test/test_maskedtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,18 +68,6 @@ def _compare_mts(mt1, mt2, rtol=1e-05, atol=1e-08):
if not _tensors_match(a, b, exact=False, rtol=rtol, atol=atol):
raise ValueError("The data in MaskedTensor mt1 and MaskedTensor mt2 do not match")

def _compare_forward_backward(data, mask, fn):
mt = masked_tensor(data, mask, requires_grad=True)
masked_res = fn(mt)
masked_res.sum().backward()

t = data.masked_fill(~mask, float("-inf")).detach().clone().requires_grad_()
tensor_res = fn(t)
tensor_res.sum().backward()

_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mt.grad, t.grad, atol=1e-06)


def _create_random_mask(shape, device):
return make_tensor(shape, device=device, dtype=torch.bool)
Expand Down Expand Up @@ -178,8 +166,15 @@ def test_softmax(self, device):
],
device=device
)
mt = masked_tensor(data, mask, requires_grad=True)
masked_res = torch.softmax(mt, -1)
masked_res.sum().backward()
xinf = data.masked_fill(~mask, float("-inf")).detach().clone().requires_grad_()
tensor_res = torch.softmax(xinf, -1)
tensor_res.sum().backward()

_compare_forward_backward(data, mask, lambda t: torch.softmax(t, -1))
_compare_mt_t(masked_res, tensor_res)
_compare_mt_t(mt.grad, xinf.grad, atol=1e-06)

def test_where(self, device):
data = torch.tensor([-10.0, -5, 0, 5, 10, 50, 60, 70, 80, 90, 100], device=device)
Expand All @@ -199,35 +194,6 @@ def test_where(self, device):
_compare_mt_t(mx.grad, x.grad)
_compare_mt_t(my.grad, y.grad)

def test_unfold(self, device):
data = torch.rand(5, 5, device=device)
mask = torch.rand(5, 5, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: t.unfold(1, 2, 2))

def test_nn_unfold(self, device):
data = torch.rand(2, 5, 3, 4, device=device)
mask = torch.rand(2, 5, 3, 4, device=device) > 0.5
_compare_forward_backward(data, mask, lambda t: torch.nn.functional.unfold(t, kernel_size=(2, 3)))

def test_stack(self, device):
masked_tensors = [
masked_tensor(
torch.rand(2, 5, 3, 4, device=device),
torch.rand(2, 5, 3, 4, device=device) > 0.5,
requires_grad=True,
) for _ in range(3)
]

data_tensors = [mt.get_data().detach().clone().requires_grad_() for mt in masked_tensors]
masked_res = torch.stack(masked_tensors)
tensor_res = torch.stack(data_tensors)

masked_res.sum().backward()
tensor_res.sum().backward()
_compare_mt_t(masked_res, tensor_res)
for mt, t in zip(masked_tensors, data_tensors):
_compare_mt_t(mt.grad, t.grad, atol=1e-06)

def test_to_sparse(self, device):
for sample in _generate_sample_data(device=device):
data = sample.input
Expand Down
5 changes: 0 additions & 5 deletions torch/masked/maskedtensor/passthrough.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,6 @@
torch.ops.aten._reshape_alias,
torch.ops.aten.cat,
torch.ops.aten.unsqueeze,
torch.ops.aten.unfold,
torch.ops.aten.unfold_backward,
torch.ops.aten.im2col,
torch.ops.aten.col2im,
torch.ops.aten.stack,
]


Expand Down
4 changes: 2 additions & 2 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -15266,8 +15266,8 @@ def sample_inputs_alias_copy(op_info, device, dtype, requires_grad, **kwargs):
autodiff_nonfusible_nodes=["aten::hardswish"]),
OpInfo('nn.functional.unfold',
aten_name='im2col',
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16, torch.bool),
dtypes=floating_and_complex_types_and(torch.half, torch.bfloat16),
dtypesIfCUDA=floating_and_complex_types_and(torch.half, torch.bfloat16),
sample_inputs_func=sample_inputs_nn_unfold,
# Runs very slowly on slow gradcheck - alternatively reduce input sizes
gradcheck_fast_mode=True,
Expand Down

0 comments on commit 3d8b818

Please sign in to comment.