@@ -2911,6 +2911,14 @@ def test_eq(self):
29112911
29122912 self .assertEqual (result_cpu , result_mps .to ('cpu' ))
29132913
2914+ def test_signed_vs_unsigned_comparison (self ):
2915+ cpu_x = torch .tensor ((- 1 , 2 , 3 ), device = 'cpu' , dtype = torch .uint8 )
2916+ mps_x = torch .tensor ((- 1 , 2 , 3 ), device = 'mps' , dtype = torch .uint8 )
2917+ # in the comparison of signed vs. unsigned we should always cast to unsigned
2918+ self .assertEqual (cpu_x == - 1 , mps_x == - 1 )
2919+ self .assertEqual (cpu_x > - 1 , mps_x > - 1 )
2920+ self .assertEqual (cpu_x < - 1 , mps_x < - 1 )
2921+
29142922 def test_eq_int64 (self ):
29152923 values1 = [[[1 , 2 , 3 ], [4 , 5 , 6 ]], [[7 , 8 , 9 ], [10 , 11 , 12 ]]]
29162924 values2 = [[[1 , 2 , 15 ], [4 , 5 , 6 ]], [[7 , 8 , 9 ], [0 , 11 , 12 ]]]
@@ -8173,7 +8181,7 @@ class TestConsistency(TestCase):
81738181 'nn.functional.conv1d' : ['f32' ],
81748182 'nn.functional.conv2d' : ['f32' ],
81758183 'nn.functional.conv_transpose1d' : ['f32' ],
8176- 'nn.functional.cosine_embedding_loss' : ['b8' , 'f32' , 'i16' , 'i32' , 'i64' ],
8184+ 'nn.functional.cosine_embedding_loss' : ['b8' , 'f32' , 'i16' , 'i32' , 'i64' , 'u8' ],
81778185 'nn.functional.cosine_similarity' : ['f32' ],
81788186 'nn.functional.elu' : ['f32' ],
81798187 'nn.functional.feature_alpha_dropout' : ['b8' , 'f16' , 'f32' , 'i16' , 'i32' , 'i64' , 'u8' ],
@@ -8550,7 +8558,6 @@ class TestConsistency(TestCase):
85508558 'nn.functional.avg_pool2d' : ['torch.float32' , 'torch.int64' ],
85518559 'nn.functional.bilinear' : ['torch.float32' ],
85528560 'nn.functional.conv_transpose2d' : ['torch.float32' ],
8553- 'nn.functional.cosine_embedding_loss' : ['torch.uint8' ],
85548561 'nn.functional.interpolate' : ['torch.float32' , 'torch.float32' , 'torch.float32' ],
85558562 'nn.functional.max_pool1d' : ['torch.float32' ],
85568563 'nn.functional.max_pool2d' : ['torch.float32' ],
0 commit comments