Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Issue-168: Updating Usage of 'np.where' Function #169

Merged
merged 1 commit into from
May 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ ramp_fitting
- Correct the "averaging" of the final image slope by properly excluding
variances as a part of the denominator from integrations with invalid slopes.
[#167]
- Removing the usage of ``numpy.where`` where possible for perfomance
reasons. [#169]

Changes to API
--------------
Expand Down
35 changes: 11 additions & 24 deletions src/stcal/ramp_fitting/ols_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -943,10 +943,7 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting):
gain_sect = gain_2d[rlo:rhi, :]

# Reset all saturated groups in the input data array to NaN
where_sat = np.where(np.bitwise_and(gdq_sect, ramp_data.flags_saturated))

data_sect[where_sat] = np.NaN
del where_sat
data_sect[np.bitwise_and(gdq_sect, ramp_data.flags_saturated).astype(bool)] = np.NaN

# Calculate the slope of each segment
# note that the name "opt_res", which stands for "optional results",
Expand All @@ -965,11 +962,8 @@ def ramp_fit_slopes(ramp_data, gain_2d, readnoise_2d, save_opt, weighting):
num_seg_per_int[num_int, rlo:rhi, :] = num_seg.reshape(sect_shape)

# Populate integ-spec slice which is set if 0th group has SAT
wh_sat0 = np.where(np.bitwise_and(gdq_sect[0, :, :], ramp_data.flags_saturated))
if len(wh_sat0[0]) > 0:
sat_0th_group_int[num_int, rlo:rhi, :][wh_sat0] = 1

del wh_sat0
sat_0th_group_int[num_int, rlo:rhi, :][np.bitwise_and(
gdq_sect[0, :, :], ramp_data.flags_saturated).astype(bool)] = 1

pixeldq_sect = pixeldq[rlo:rhi, :].copy()
dq_int[num_int, rlo:rhi, :] = utils.dq_compress_sect(
Expand Down Expand Up @@ -1424,8 +1418,7 @@ def ramp_fit_overall(
# For invalid slope calculations set to NaN. Pixels flagged as SATURATED or
# DO_NOT_USE have invalid data.
invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use
wh_invalid = np.where(np.bitwise_and(final_pixeldq, invalid_data))
c_rates[wh_invalid] = np.nan
c_rates[np.bitwise_and(final_pixeldq, invalid_data).astype(bool)] = np.nan

if dq_int is not None:
del dq_int
Expand Down Expand Up @@ -1497,11 +1490,11 @@ def calc_power(snr):
weighting exponent, 1-D float
"""
pow_wt = snr.copy() * 0.0
pow_wt[np.where(snr > 5.)] = 0.4
pow_wt[np.where(snr > 10.)] = 1.0
pow_wt[np.where(snr > 20.)] = 3.0
pow_wt[np.where(snr > 50.)] = 6.0
pow_wt[np.where(snr > 100.)] = 10.0
pow_wt[snr > 5.] = 0.4
pow_wt[snr > 10.] = 1.0
pow_wt[snr > 20.] = 3.0
pow_wt[snr > 50.] = 6.0
pow_wt[snr > 100.] = 10.0

return pow_wt.ravel()

Expand Down Expand Up @@ -1717,9 +1710,7 @@ def calc_slope(data_sect, gdq_sect, frame_time, opt_res, save_opt, rn_sect,
# Find CRs in the ramp.
jump_det = ramp_data.flags_jump_det
mask_2d_jump = mask_2d.copy()
wh_jump = np.where(gdq_sect_r == jump_det)
mask_2d_jump[wh_jump] = True
del wh_jump
mask_2d_jump[gdq_sect_r == jump_det] = True

# Add back possible CRs at the beginning of a ramp that were excluded
# above.
Expand Down Expand Up @@ -3201,11 +3192,7 @@ def fit_1_group(slope_s, intercept_s, variance_s, sig_intercept_s,
sig_intercept_s = slope_s * 0.

# For saturated pixels, overwrite slope with benign values.
wh_sat0 = np.where(np.logical_not(mask_2d[0, :]))

if len(wh_sat0[0]) > 0:
sat_pix = wh_sat0[0]
slope_s[sat_pix] = 0.
slope_s[np.logical_not(mask_2d[0, :])] = 0.

return slope_s, intercept_s, variance_s, sig_intercept_s, sig_slope_s

Expand Down
23 changes: 8 additions & 15 deletions src/stcal/ramp_fitting/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -509,11 +509,9 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg
gdq_2d_nan = gdq_2d.copy() # group dq with SATS will be replaced by nans
gdq_2d_nan = gdq_2d_nan.astype(np.float32)

wh_sat = np.where(np.bitwise_and(gdq_2d, ramp_data.flags_saturated))
if len(wh_sat[0]) > 0:
gdq_2d_nan[wh_sat] = np.nan # set all SAT groups to nan

del wh_sat
# set all SAT groups to nan
gdq_2d_nan[np.bitwise_and(
gdq_2d, ramp_data.flags_saturated).astype(bool)] = np.nan

# Get lengths of semiramps for all pix [number_of_semiramps, number_of_pix]
segs = np.zeros_like(gdq_2d)
Expand Down Expand Up @@ -564,9 +562,8 @@ def calc_slope_vars(ramp_data, rn_sect, gain_sect, gdq_sect, group_time, max_seg

# Create a version 1 less for later calculations for the variance due to
# Poisson, with a floor=1 to handle single-group segments
wh_pos_3 = np.where(segs_beg_3 > 1)
segs_beg_3_m1 = segs_beg_3.copy()
segs_beg_3_m1[wh_pos_3] -= 1
segs_beg_3_m1[segs_beg_3 > 1] -= 1
segs_beg_3_m1[segs_beg_3_m1 < 1] = 1

# For a segment, the variance due to Poisson noise
Expand Down Expand Up @@ -736,8 +733,7 @@ def output_integ(ramp_data, slope_int, dq_int, effintim, var_p3, var_r3, var_bot

data = slope_int / effintim
invalid_data = ramp_data.flags_saturated | ramp_data.flags_do_not_use
wh_invalid = np.where(np.bitwise_and(dq_int, invalid_data))
data[wh_invalid] = np.nan
data[np.bitwise_and(dq_int, invalid_data).astype(bool)] = np.nan

err = np.sqrt(var_both3)
dq = dq_int
Expand Down Expand Up @@ -1381,7 +1377,7 @@ def set_if_total_integ(final_dq, integ_dq, flag, set_flag):

# Find where flag is set
test_dq = np.zeros(integ_dq.shape, dtype=np.uint32)
test_dq[np.where(np.bitwise_and(integ_dq, flag))] = 1
test_dq[np.bitwise_and(integ_dq, flag).astype(bool)] = 1

# Sum over all integrations
test_sum = test_dq.sum(axis=0)
Expand Down Expand Up @@ -1499,10 +1495,7 @@ def compute_median_rates(ramp_data):
gdq_sect = ramp_data.groupdq[integ, :, :, :]

# Reset all saturated groups in the input data array to NaN
where_sat = np.where(np.bitwise_and(gdq_sect, ramp_data.flags_saturated))

data_sect[where_sat] = np.NaN
del where_sat
data_sect[np.bitwise_and(gdq_sect, ramp_data.flags_saturated).astype(bool)] = np.NaN

data_sect = data_sect / group_time

Expand Down Expand Up @@ -1644,7 +1637,7 @@ def groups_saturated_in_integration(intdq, sat_flag, num_sat_groups):
The number of saturated groups in an integration of interest.
"""
sat_groups = np.zeros(intdq.shape, dtype=int)
sat_groups[np.where(np.bitwise_and(intdq, sat_flag))] = 1
sat_groups[np.bitwise_and(intdq, sat_flag).astype(bool)] = 1
nsat_groups = sat_groups.sum(axis=0)
wh_nsat_groups = np.where(nsat_groups == num_sat_groups)

Expand Down
36 changes: 36 additions & 0 deletions tests/test_ramp_fitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -1291,6 +1291,42 @@ def test_invalid_integrations():
np.testing.assert_allclose(cerr[:, 0, 0], check, tol, tol)


def test_one_group():
"""
Test ngroups = 1
"""
nints, ngroups, nrows, ncols = 1, 1, 1, 1
rnval, gval = 10., 5.
frame_time, nframes, groupgap = 10.736, 4, 1

dims = nints, ngroups, nrows, ncols
var = rnval, gval
tm = frame_time, nframes, groupgap

ramp, gain, rnoise = create_blank_ramp_data(dims, var, tm)

ramp.data[0, 0, 0, 0] = 105.31459

save_opt, ncores, bufsize, algo = False, "none", 1024 * 30000, "OLS"
slopes, cube, ols_opt, gls_opt = ramp_fit_data(
ramp, bufsize, save_opt, rnoise, gain, algo,"optimal", ncores, dqflags)

tol = 1e-5
sdata, sdq, svp, svr, serr = slopes
assert abs(sdata[0, 0] - 1.9618962) < tol
assert sdq[0, 0] == 0
assert abs(svp[0, 0] - 0.02923839) < tol
assert abs(svr[0, 0] - 0.03470363) < tol
assert abs(serr[0, 0] - 0.2528676) < tol

cdata, cdq, cvp, cvr, cerr = cube
assert abs(sdata[0, 0] - cdata[0, 0, 0]) < tol
assert sdq[0, 0] == cdq[0, 0, 0]
assert abs(svp[0, 0] - cvp[0, 0, 0]) < tol
assert abs(svr[0, 0] - cvr[0, 0, 0]) < tol
assert abs(serr[0, 0] - cerr[0, 0, 0]) < tol


def create_blank_ramp_data(dims, var, tm):
"""
Create empty RampData classes, as well as gain and read noise arrays,
Expand Down