Skip to content

Commit

Permalink
Update allow and block lists (#88)
Browse files Browse the repository at this point in the history
* Update allow and block lists

* Add one more to block list

* Add more blocked ops

Co-authored-by: abhipathak97 <abhipathak97@mps10.scv.apple.com>
  • Loading branch information
2 people authored and kulinseth committed Nov 5, 2022
1 parent e5c5fe2 commit db1b253
Showing 1 changed file with 128 additions and 0 deletions.
128 changes: 128 additions & 0 deletions test/test_mps.py
Original file line number Diff line number Diff line change
Expand Up @@ -7680,6 +7680,134 @@ class TestConsistency(TestCase):
'inner': None,
'dstack': None,
'take_along_dim': None,

# New block list ops that need investigation
'__rdiv__': ['torch.bool', 'torch.int64'],
'__rpow__': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.uint8'],
'_masked.amax': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.amin': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.argmax': ['torch.float16', 'torch.float32', 'torch.int32'],
'_masked.argmin': ['torch.float16', 'torch.float32', 'torch.int32'],
'_masked.logsumexp': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.mean': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.prod': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.std': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.sum': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'_masked.var': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'acos': ['torch.bool'],
'acosh': ['torch.bool'],
'amax': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'amin': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'as_strided_scatter': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'asin': ['torch.bool'],
'asinh': ['torch.bool'],
'atan2': ['torch.bool', 'torch.int16', 'torch.int32', 'torch.uint8'],
'atan': ['torch.bool'],
'atanh': ['torch.bool'],
'bernoulli': ['torch.float32'],
'byte': ['torch.float16', 'torch.float32'],
'char': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64'],
'clamp': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'clamp_max': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'clamp_min': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'constant_pad_nd': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'corrcoef': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'cos': ['torch.bool'],
'cosh': ['torch.bool'],
'count_nonzero': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'cov': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'diff': ['torch.bool', 'torch.uint8'],
'eig': ['torch.float32'],
'empty_like': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'erf': ['torch.bool'],
'exp2': ['torch.bool'],
'exp': ['torch.bool'],
'fft.fft': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.ifft': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.ihfft2': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.ihfft': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.ihfftn': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.rfft2': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.rfft': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'fft.rfftn': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'float': ['torch.bool', 'torch.float16', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'gather': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'gradient': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32'],
'half': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'index_put': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'index_select': ['torch.uint8'],
'int': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int64', 'torch.uint8'],
'linalg.eigvals': ['torch.float32'],
'linalg.multi_dot': ['torch.float32'],
'log10': ['torch.bool'],
'log1p': ['torch.bool', 'torch.int16', 'torch.int32', 'torch.uint8'],
'log2': ['torch.bool'],
'log': ['torch.bool'],
'logical_and': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'logical_or': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'logical_xor': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'logsumexp': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'matmul': ['torch.uint8'],
'mean': ['torch.float16', 'torch.float32'],
'native_layer_norm': ['torch.float32'],
'neg': ['torch.uint8'],
'new_empty': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'nn.functional.adaptive_avg_pool1d': ['torch.float32'],
'nn.functional.adaptive_avg_pool2d': ['torch.float32'],
'nn.functional.avg_pool1d': ['torch.float32', 'torch.int64'],
'nn.functional.avg_pool2d': ['torch.float32', 'torch.int64'],
'nn.functional.bilinear': ['torch.float32'],
'nn.functional.conv_transpose2d': ['torch.float32'],
'nn.functional.cosine_embedding_loss': ['torch.uint8'],
'nn.functional.cosine_similarity': ['torch.float32'],
'nn.functional.dropout2d': ['torch.float32'],
'nn.functional.dropout3d': ['torch.float32'],
'nn.functional.dropout': ['torch.float32'],
'nn.functional.gelu': ['torch.float32'],
'nn.functional.interpolate': ['torch.float32', 'torch.float32', 'torch.float32'],
'nn.functional.layer_norm': ['torch.float32'],
'nn.functional.margin_ranking_loss': ['torch.uint8'],
'nn.functional.max_pool1d': ['torch.float32'],
'nn.functional.max_pool2d': ['torch.float32'],
'nn.functional.normalize': ['torch.float32'],
'nn.functional.pad': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64'],
'nn.functional.pairwise_distance': ['torch.uint8'],
'nn.functional.softsign': ['torch.int32'],
'nn.functional.triplet_margin_loss': ['torch.uint8'],
'nn.functional.triplet_margin_with_distance_loss': ['torch.uint8'],
'nn.functional.upsample_nearest': ['torch.float32'],
'normal': ['torch.float16', 'torch.float32', 'torch.float16', 'torch.float32'],
'pow': ['torch.float32', 'torch.int16', 'torch.int32', 'torch.uint8'],
'prod': ['torch.bool', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'put': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'rand_like': ['torch.float16', 'torch.float32'],
'randint_like': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'randn_like': ['torch.float16', 'torch.float32'],
'reciprocal': ['torch.bool'],
'rsqrt': ['torch.bool'],
'scatter_add': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'scatter': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'short': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int32', 'torch.int64', 'torch.uint8'],
'sigmoid': ['torch.bool', 'torch.int16', 'torch.int32', 'torch.uint8'],
'sin': ['torch.bool'],
'sinh': ['torch.bool'],
'sqrt': ['torch.bool'],
'sub': ['torch.float16', 'torch.uint8'],
'sum': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'tan': ['torch.bool', 'torch.float32'],
'tanh': ['torch.bool'],
'tensor_split': ['torch.bool', 'torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'topk': ['torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'trapz': ['torch.float16', 'torch.float32', 'torch.int16', 'torch.int32', 'torch.int64', 'torch.uint8'],
'true_divide': ['torch.int32', 'torch.int64'],
'nn.functional.local_response_norm': ['torch.int64'],
'flip': ['torch.bool'],
'fliplr': ['torch.bool'],
'flipud': ['torch.bool'],
'index_select': ['torch.bool'],
'repeat': ['torch.bool'],
'rot90': ['torch.bool'],
'tile': ['torch.bool'],
}

# Used for accept mode only
Expand Down

0 comments on commit db1b253

Please sign in to comment.