diff --git a/test/test_mps.py b/test/test_mps.py index 6fc27d3ac71b8..f30131800ed7e 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -7200,7 +7200,7 @@ class TestConsistency(TestCase): 'torch.int32', 'torch.int64', 'torch.uint8'], - 'corrcoef': ['torch.float32'], + 'corrcoef': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'cos': ['torch.float32', 'torch.int16', 'torch.int32', @@ -7209,7 +7209,7 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.uint8'], - 'cov': ['torch.float32'], + 'cov': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'deg2rad': ['torch.bool', 'torch.float16', 'torch.float32', @@ -7282,19 +7282,22 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.bool'], 'fliplr': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.bool'], 'flipud': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.bool'], 'float': ['torch.float32'], 'floor': ['torch.float32'], 'half': ['torch.float16'], @@ -7483,7 +7486,8 @@ class TestConsistency(TestCase): 'nn.functional.softsign': ['torch.float16', 'torch.float32', 'torch.int16', - 'torch.uint8'], + 'torch.uint8', + 'torch.int32'], 'nn.functional.tanhshrink': ['torch.float32', 'torch.int16', 'torch.int32', @@ -7541,7 +7545,8 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.bool'], 'repeat_interleave': ['torch.bool', 'torch.float16', 'torch.float32', @@ -7582,7 +7587,8 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.bool'], 'round': ['torch.float32'], 'rsqrt': ['torch.float32', 'torch.int16', @@ -7622,7 +7628,7 @@ class TestConsistency(TestCase): 'torch.float32', 'torch.int16', 'torch.int32', - 'torch.int64'], + 'torch.int64',], 'softmax': ['torch.float32'], 'special.ndtr': ['torch.bool', 'torch.float32', @@ -7635,7 +7641,8 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'], + 'torch.uint8', + 'torch.float32'], 'sqrt': ['torch.float32', 'torch.int16', 'torch.int32', @@ -7705,7 +7712,9 @@ class TestConsistency(TestCase): 'torch.float16', 'torch.float32', 'torch.int16', - 'torch.uint8'], + 'torch.uint8', + 'torch.int32', + 'torch.int64'], 'trunc': ['torch.float32'], 'unbind': ['torch.bool', 'torch.float16', @@ -7762,7 +7771,178 @@ class TestConsistency(TestCase): 'torch.int16', 'torch.int32', 'torch.int64', - 'torch.uint8'] + 'torch.uint8'], + 'H': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'T': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'bool': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'as_strided': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'broadcast_tensors': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'broadcast_to': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'cartesian_prod': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'diagonal': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'dsplit': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'dstack': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'expand_as': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'expand': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'hsplit': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'mH': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'mT': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'movedim': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'narrow': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'permute': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'reshape': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'select': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'split_with_sizes': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'tile': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'transpose': ["torch.bool", + "torch.float16", + "torch.float32", + "torch.int16", + "torch.int32", + "torch.int64", + "torch.uint8"], + 'float': ['torch.bool', + 'torch.float16', + 'torch.int16', + 'torch.int32', + 'torch.uint8', + 'torch.float32'], + 'gradient': ['torch.float16', + 'torch.int16', + 'torch.int32', + 'torch.float32',], } # These ops that are problematic. So never run them even when @@ -7781,7 +7961,6 @@ class TestConsistency(TestCase): # These were moved from ALLOWLIST to BLOCK as they are not working # locally - 'tile': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], '__radd__': ['torch.bool', 'torch.uint8'], '__rmul__': ['torch.uint8'], 'add': ['torch.bool', 'torch.uint8'], @@ -7792,39 +7971,23 @@ class TestConsistency(TestCase): # Functions that are flaky # These are detected as "ok" by the expect case but actually fail to run sometimes - 'H': None, - 'T': None, - 'as_strided': None, - 'broadcast_tensors': None, 'broadcast': None, - 'broadcast_to': None, - 'diagonal': None, 'divfloor_rounding': None, 'divno_rounding_mode': None, 'divtrunc_rounding': None, - 'dsplit': None, - 'hsplit': None, 'empty': None, - 'expand_as': None, - 'expand': None, 'ge': None, 'ne': None, 'le': None, 'lt': None, 'gt': None, - 'transpose': None, 'splitlist_args': None, - 'select': None, - 'reshape': None, 'reshape_as': None, - 'permute': None, 'norm': None, 'nn.functional.pixel_unshuffle': None, 'nn.functional.pixel_shuffle': None, 'nn.functional.cross_entropy': None, 'nn.functional.one_hot': None, - 'narrow': None, - 'movedim': None, 'minreduction_with_dim': None, 'minreduction_no_dim': None, 'minbinary': None, @@ -7835,8 +7998,6 @@ class TestConsistency(TestCase): 'maxbinary': None, 'maximum': None, 'minimum': None, - 'mT': None, - 'mH': None, 'outer': None, 'softmaxwith_dtype': None, 'rounddecimals_neg_3': None, @@ -7846,15 +8007,11 @@ class TestConsistency(TestCase): 'nn.functional.softminwith_dtype': None, 'nn.functional.feature_alpha_dropoutwith_train': None, 'log_softmaxdtype': None, - 'split_with_sizes': None, 'trapezoid': None, 'eq': None, 'mul': None, - 'cartesian_prod': None, 'nonzero': None, - 'bool': None, 'inner': None, - 'dstack': None, 'take_along_dim': None, # New block list ops that need investigation @@ -7887,11 +8044,9 @@ class TestConsistency(TestCase): 'clamp_max': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'clamp_min': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'constant_pad_nd': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'corrcoef': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'cos': ['torch.bool'], 'cosh': ['torch.bool'], 'count_nonzero': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'cov': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'diff': ['torch.bool', 'torch.uint8'], 'eig': ['torch.float32'], 'empty_like': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], @@ -7906,9 +8061,8 @@ class TestConsistency(TestCase): 'fft.rfft2': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'fft.rfft': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'fft.rfftn': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'float': ['torch.bool', 'torch.float16', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], + 'float': ['torch.int64'], 'gather': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'gradient': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32'], 'half': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'index_put': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'index_select': ['torch.uint8'], @@ -7948,7 +8102,6 @@ class TestConsistency(TestCase): 'nn.functional.normalize': ['torch.float32'], 'nn.functional.pad': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64'], 'nn.functional.pairwise_distance': ['torch.uint8'], - 'nn.functional.softsign': ['torch.int32'], 'nn.functional.triplet_margin_loss': ['torch.uint8'], 'nn.functional.triplet_margin_with_distance_loss': ['torch.uint8'], 'nn.functional.upsample_nearest': ['torch.float32'], @@ -7974,17 +8127,9 @@ class TestConsistency(TestCase): 'tanh': ['torch.bool'], 'tensor_split': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'topk': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'trapz': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], - 'true_divide': ['torch.int32', 'torch.int64'], + 'trapz': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'], 'nn.functional.local_response_norm': ['torch.int64'], - 'flip': ['torch.bool'], - 'fliplr': ['torch.bool'], - 'flipud': ['torch.bool'], 'index_select': ['torch.bool'], - 'repeat': ['torch.bool'], - 'rot90': ['torch.bool'], - 'tile': ['torch.bool'], - 'split': ['torch.float32'], } # Used for accept mode only