@@ -1253,41 +1253,52 @@ def test_avoid_useless_subtensors(self):
12531253 @pytest .mark .parametrize ("linker" , ["cvm" , "py" ])
12541254 def test_perform (self , linker ):
12551255
1256- a = pytensor .shared (5 )
1256+ a = pytensor .shared (np .full ((3 , 1 , 1 ), 5 ))
1257+ s_0 = iscalar ("s_0" )
12571258 s_1 = iscalar ("s_1" )
1258- shape = (s_1 , 1 )
1259+ shape = (s_0 , s_1 , 1 )
12591260
12601261 bcast_res = broadcast_to (a , shape )
1261- assert bcast_res .broadcastable == (False , True )
1262+ assert bcast_res .broadcastable == (False , False , True )
12621263
12631264 bcast_fn = pytensor .function (
1264- [s_1 ], bcast_res , mode = Mode (optimizer = None , linker = linker )
1265+ [s_0 , s_1 ], bcast_res , mode = Mode (optimizer = None , linker = linker )
12651266 )
12661267 bcast_fn .vm .allow_gc = False
12671268
1268- bcast_at = bcast_fn (4 )
1269- bcast_np = np .broadcast_to (5 , (4 , 1 ))
1269+ bcast_at = bcast_fn (3 , 4 )
1270+ bcast_np = np .broadcast_to (5 , (3 , 4 , 1 ))
12701271
12711272 assert np .array_equal (bcast_at , bcast_np )
12721273
1273- bcast_var = bcast_fn .maker .fgraph .outputs [0 ].owner .inputs [0 ]
1274- bcast_in = bcast_fn .vm .storage_map [a ]
1275- bcast_out = bcast_fn .vm .storage_map [bcast_var ]
1274+ with pytest .raises (ValueError ):
1275+ bcast_fn (5 , 4 )
12761276
12771277 if linker != "py" :
1278+ bcast_var = bcast_fn .maker .fgraph .outputs [0 ].owner .inputs [0 ]
1279+ bcast_in = bcast_fn .vm .storage_map [a ]
1280+ bcast_out = bcast_fn .vm .storage_map [bcast_var ]
12781281 assert np .shares_memory (bcast_out [0 ], bcast_in [0 ])
12791282
1283+ def test_make_node_error_handling (self ):
1284+ with pytest .raises (
1285+ ValueError ,
1286+ match = "Broadcast target shape has 1 dims, which is shorter than input with 2 dims" ,
1287+ ):
1288+ broadcast_to (at .zeros ((3 , 4 )), (5 ,))
1289+
12801290 @pytest .mark .skipif (
12811291 not config .cxx , reason = "G++ not available, so we need to skip this test."
12821292 )
1283- def test_memory_leak (self ):
1293+ @pytest .mark .parametrize ("valid" , (True , False ))
1294+ def test_memory_leak (self , valid ):
12841295 import gc
12851296 import tracemalloc
12861297
12871298 from pytensor .link .c .cvm import CVM
12881299
12891300 n = 100_000
1290- x = pytensor .shared (np .ones (n , dtype = np .float64 ))
1301+ x = pytensor .shared (np .ones (( 1 , n ) , dtype = np .float64 ))
12911302 y = broadcast_to (x , (5 , n ))
12921303
12931304 f = pytensor .function ([], y , mode = Mode (optimizer = None , linker = "cvm" ))
@@ -1303,8 +1314,17 @@ def test_memory_leak(self):
13031314 blocks_last = None
13041315 block_diffs = []
13051316 for i in range (1 , 50 ):
1306- x .set_value (np .ones (n ))
1307- _ = f ()
1317+ if valid :
1318+ x .set_value (np .ones ((1 , n )))
1319+ _ = f ()
1320+ else :
1321+ x .set_value (np .ones ((2 , n )))
1322+ try :
1323+ _ = f ()
1324+ except ValueError :
1325+ pass
1326+ else :
1327+ raise RuntimeError ("Should have failed" )
13081328 _ = gc .collect ()
13091329 blocks_i , _ = tracemalloc .get_traced_memory ()
13101330 if blocks_last is not None :
@@ -1313,7 +1333,7 @@ def test_memory_leak(self):
13131333 blocks_last = blocks_i
13141334
13151335 tracemalloc .stop ()
1316- assert np .allclose (np .mean (block_diffs ), 0 )
1336+ assert np .all (np .array (block_diffs ) <= ( 0 + 1e-8 ) )
13171337
13181338 @pytest .mark .parametrize (
13191339 "fn,input_dims" ,
0 commit comments