Skip to content

Commit

Permalink
Removed test code from blocked helper func
Browse files Browse the repository at this point in the history
Scaffold seamless wrapper insertion in reproj_interpolate as per pull request astropy#214
  • Loading branch information
AlistairSymonds committed Aug 25, 2022
1 parent e483bdc commit 1fce3d4
Showing 1 changed file with 9 additions and 15 deletions.
24 changes: 9 additions & 15 deletions reproject/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,14 +146,14 @@ def _block(reproject_func, input_data, wcs_out_sub, shape_out, i_range, j_range,
res = reproject_func(input_data=input_data, output_projection=wcs_out_sub,
shape_out=shape_out, return_footprint=return_footprint)

gc.collect()
#gc.collect()
#print("Worker thread %d: %0.3f MB" %
# (psutil.Process().pid, psutil.Process().memory_info().rss / 1e6))

return {'i':i_range, 'j':j_range, 'block':res}

def reproject_blocked(reproject_func, block_size=(4000,4000), output_array=None, output_footprint=None,
parallel=False, **kwargs):
parallel=True, **kwargs):

if kwargs.get('return_footprint') == False and output_footprint is not None:
raise TypeError("If no footprint is needed, an output_footprint should not be passed in")
Expand All @@ -178,12 +178,11 @@ def reproject_blocked(reproject_func, block_size=(4000,4000), output_array=None,

if parallel == True or type(parallel) is int:
if type(parallel) is int:
print('doing the other one' + str(parallel))
proc_pool = futures.ProcessPoolExecutor(max_workers=parallel)
else:
print("tr_ue")

proc_pool = futures.ProcessPoolExecutor()
print("Workers: "+str(proc_pool._max_workers))

num_blocks = ((output_array.shape[0] // block_size[0])+1) * ((output_array.shape[1] // block_size[1])+1)
print(output_array.shape[0] // block_size[0])
sequential_blocks_done = 0
Expand All @@ -196,8 +195,9 @@ def reproject_blocked(reproject_func, block_size=(4000,4000), output_array=None,
wcs_out_sub.wcs.crpix[0] -= jmin
wcs_out_sub.wcs.crpix[1] -= imin

#if sequential input data and reinsert block into main array immediately

if proc_pool is None:
# if sequential input data and reinsert block into main array immediately
completed_block = _block(reproject_func=reproject_func, input_data=kwargs['input_data'], wcs_out_sub=wcs_out_sub,
shape_out=shape_out_sub, return_footprint=kwargs['return_footprint'],
j_range=(jmin, jmax), i_range = (imin, imax))
Expand All @@ -207,16 +207,13 @@ def reproject_blocked(reproject_func, block_size=(4000,4000), output_array=None,
output_footprint[imin:imax, jmin:jmax] = completed_block['block'][1][:]

sequential_blocks_done += 1
#print("Completed blocks: " + str(sequential_blocks_done) + "/" + str(num_blocks)
# + " [" + str(sequential_blocks_done / num_blocks) + "]")
#if parallel just submit all work items and move on to waiting for them to be done
else:
# if parallel just submit all work items and move on to waiting for them to be done
future = proc_pool.submit(_block, reproject_func=reproject_func, input_data=(array_in, wcs_in), wcs_out_sub=wcs_out_sub,
shape_out=shape_out_sub, return_footprint=kwargs['return_footprint'],
j_range=(jmin, jmax), i_range = (imin, imax))
blocks_futures.append(future)

gc.collect()

# If a parallel implementation is being used that means the blocks have not been reassembled yet and must be done now
if proc_pool is not None:
Expand All @@ -229,13 +226,10 @@ def reproject_blocked(reproject_func, block_size=(4000,4000), output_array=None,

if kwargs['return_footprint']:
output_footprint[i_range[0]:i_range[1], j_range[0]:j_range[1]] = completed_block['block'][1][:]

completed_future_count += 1
#print("Completed blocks: " +str(completed_future_count) +"/" + str(num_blocks)
# + " ["+str(completed_future_count/num_blocks) + "]")
#
#print("Main thread mem usage: %0.3f MB" %
# (psutil.Process().memory_info().rss / 1e6))
idx = blocks_futures.index(completed_future)
#ensure memory used by returned data is freed,
completed_future._result = None
del blocks_futures[idx], completed_future
proc_pool.shutdown()
Expand Down

0 comments on commit 1fce3d4

Please sign in to comment.