Skip to content

Commit

Permalink
ssd addition
Browse files Browse the repository at this point in the history
  • Loading branch information
laspsandoval committed May 14, 2024
1 parent 2db8146 commit 8fd16af
Show file tree
Hide file tree
Showing 4 changed files with 131 additions and 93 deletions.
187 changes: 110 additions & 77 deletions imap_processing/tests/ultra/unit/test_ultra_l1b_theta_0.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,10 @@
get_front_y_position,
get_particle_velocity,
get_path_length,
get_ssd_positions,
)


@pytest.fixture()
def decom_ultra_events(ccsds_path_theta_0, xtce_path):
"""Data for decom_ultra_events"""
Expand Down Expand Up @@ -46,13 +48,13 @@ def indices_start_type_1_or_2(decom_ultra_events, decom_ultra_aux):

# Create the dataset
events_dataset = create_dataset(
{ULTRA_EVENTS.apid[0]: decom_ultra_events,
ULTRA_AUX.apid[0]: decom_ultra_aux}
{ULTRA_EVENTS.apid[0]: decom_ultra_events, ULTRA_AUX.apid[0]: decom_ultra_aux}
)

# Remove start_type with fill values
events_dataset = events_dataset.where(events_dataset["START_TYPE"] !=
GlobalConstants.INT_FILLVAL, drop=True)
events_dataset = events_dataset.where(
events_dataset["START_TYPE"] != GlobalConstants.INT_FILLVAL, drop=True
)

# Check top and bottom
index_1 = np.where(events_dataset["START_TYPE"] == 1)[0]
Expand All @@ -70,12 +72,12 @@ def indices_stop_type_1_or_2(decom_ultra_events, decom_ultra_aux):

# Create the dataset
events_dataset = create_dataset(
{ULTRA_EVENTS.apid[0]: decom_ultra_events,
ULTRA_AUX.apid[0]: decom_ultra_aux}
{ULTRA_EVENTS.apid[0]: decom_ultra_events, ULTRA_AUX.apid[0]: decom_ultra_aux}
)
# Remove start_type with fill values
events_dataset = events_dataset.where(events_dataset["START_TYPE"] !=
GlobalConstants.INT_FILLVAL, drop=True)
events_dataset = events_dataset.where(
events_dataset["START_TYPE"] != GlobalConstants.INT_FILLVAL, drop=True
)

# Check top and bottom
index_1 = np.where(events_dataset["STOP_TYPE"] == 1)[0]
Expand All @@ -84,44 +86,66 @@ def indices_stop_type_1_or_2(decom_ultra_events, decom_ultra_aux):
return index_1, index_2, events_dataset


@pytest.fixture()
def indices_stop_type_8_to_15(decom_ultra_events, decom_ultra_aux):
"""
A pytest fixture to extract indices from events_dataset where STOP_TYPE is 1 or 2
and COUNT is not 0. Assumes the dataset is structured with an 'epoch' dimension.
"""

# Create the dataset
events_dataset = create_dataset(
{ULTRA_EVENTS.apid[0]: decom_ultra_events, ULTRA_AUX.apid[0]: decom_ultra_aux}
)
# Remove start_type with fill values
events_dataset = events_dataset.where(
events_dataset["START_TYPE"] != GlobalConstants.INT_FILLVAL, drop=True
)

# Check top and bottom
index = np.where(events_dataset["STOP_TYPE"] >= 8)[0]

return index, events_dataset


def test_xf(
indices_start_type_1_or_2,
events_fsw_comparison_theta_0,
):

indices_1, indices_2, events_dataset = indices_start_type_1_or_2

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
selected_rows_1 = df_filt.iloc[indices_1]
selected_rows_2 = df_filt.iloc[indices_2]

xf_1 = get_front_x_position(events_dataset["START_TYPE"].data[indices_1],
events_dataset["START_POS_TDC"].data[indices_1])
xf_2 = get_front_x_position(events_dataset["START_TYPE"].data[indices_2],
events_dataset["START_POS_TDC"].data[indices_2])
xf_1 = get_front_x_position(
events_dataset["START_TYPE"].data[indices_1],
events_dataset["START_POS_TDC"].data[indices_1],
)
xf_2 = get_front_x_position(
events_dataset["START_TYPE"].data[indices_2],
events_dataset["START_POS_TDC"].data[indices_2],
)

# The value 180 was added to xf_1 since that is the offset from the FSW xft_off
assert np.allclose(xf_1+180, selected_rows_1.Xf.values.astype('float'), rtol=1e-3)
assert np.allclose(xf_1 + 180, selected_rows_1.Xf.values.astype("float"), rtol=1e-3)
# The value 25 was subtracted from xf_2 since that is the offset from the FSW xft_off
assert np.allclose(xf_2-25, selected_rows_2.Xf.values.astype('float'), rtol=1e-3)
assert np.allclose(xf_2 - 25, selected_rows_2.Xf.values.astype("float"), rtol=1e-3)


@pytest.fixture()
def tof(indices_stop_type_1_or_2,
events_fsw_comparison_theta_0):
def tof(indices_stop_type_1_or_2, events_fsw_comparison_theta_0):
indices_1, indices_2, events_dataset = indices_stop_type_1_or_2
indices = np.concatenate((indices_1,indices_2))
indices = np.concatenate((indices_1, indices_2))
indices.sort()

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
selected_rows_1 = df_filt.iloc[indices]

tof, t2, xb, yb = get_back_positions(
indices,
events_dataset,
selected_rows_1.Xf.values.astype('float')
indices, events_dataset, selected_rows_1.Xf.values.astype("float")
)
return tof, t2, xb, yb

Expand All @@ -133,42 +157,70 @@ def test_xb_yb(
):
_, _, xb, yb = tof
indices_1, indices_2, events_dataset = indices_stop_type_1_or_2
indices = np.concatenate((indices_1,indices_2))
indices = np.concatenate((indices_1, indices_2))

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
selected_rows_1 = df_filt.iloc[indices]

np.testing.assert_array_equal(xb[indices],
selected_rows_1["Xb"].astype('float'))
np.testing.assert_array_equal(yb[indices],
selected_rows_1["Yb"].astype('float'))
np.testing.assert_array_equal(xb[indices], selected_rows_1["Xb"].astype("float"))
np.testing.assert_array_equal(yb[indices], selected_rows_1["Yb"].astype("float"))


@pytest.fixture()
def tof_ssd(indices_stop_type_8_to_15, events_fsw_comparison_theta_0):
indices, events_dataset = indices_stop_type_8_to_15

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
selected_rows_1 = df_filt.iloc[indices]

tof, xb, yb = get_ssd_positions(
indices, events_dataset, selected_rows_1.Xf.values.astype("float")
)
return xb, yb, tof


def test_xb_yb_ssd(
indices_stop_type_8_to_15,
tof_ssd,
events_fsw_comparison_theta_0,
):
xb, yb, tof = tof_ssd
indices_1, indices_2, events_dataset = indices_stop_type_1_or_2
indices = np.concatenate((indices_1, indices_2))

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]
selected_rows_1 = df_filt.iloc[indices]

np.testing.assert_array_equal(xb[indices], selected_rows_1["Xb"].astype("float"))
np.testing.assert_array_equal(yb[indices], selected_rows_1["Yb"].astype("float"))


def test_yf(
indices_start_type_1_or_2,
indices_stop_type_1_or_2,
events_fsw_comparison_theta_0,
tof
tof,
):
_, _, events_dataset = indices_start_type_1_or_2
index_1, index_2, events_dataset = indices_start_type_1_or_2

df = pd.read_csv(events_fsw_comparison_theta_0)
df_filt = df[df["StartType"] != -1]

d, yf = get_front_y_position(events_dataset,
df_filt.Yb.values.astype('float'))
d, yf = get_front_y_position(events_dataset, df_filt.Yb.values.astype("float"))

assert yf == pytest.approx(df_filt["Yf"].astype('float'), 1e-3)
assert yf == pytest.approx(df_filt["Yf"].astype("float"), 1e-3)

xf_test = df_filt["Xf"].astype('float').values
yf_test = df_filt["Yf"].astype('float').values
xf_test = df_filt["Xf"].astype("float").values
yf_test = df_filt["Yf"].astype("float").values

xb_test = df_filt["Xb"].astype('float').values
yb_test = df_filt["Yb"].astype('float').values
xb_test = df_filt["Xb"].astype("float").values
yb_test = df_filt["Yb"].astype("float").values

r = get_path_length((xf_test, yf_test), (xb_test, yb_test), d)
assert r == pytest.approx(df_filt["r"].astype('float'), rel=1e-3)
assert r == pytest.approx(df_filt["r"].astype("float"), rel=1e-3)

# TODO: test get_energy_pulse_height
# pulse_height = events_dataset["ENERGY_PH"].data[index]
Expand All @@ -177,49 +229,30 @@ def test_yf(
# TODO: needs lookup table to test bin
tof, t2, xb, yb = tof
indices_1, indices_2, events_dataset = indices_stop_type_1_or_2
indices = np.concatenate((indices_1,indices_2))
indices = np.concatenate((indices_1, indices_2))
indices.sort()

energy = df_filt["Xf"].iloc[indices].astype('float')
r = df_filt["Xf"].iloc[indices].astype('float')

ctof, bin = determine_species_pulse_height(energy, tof[indices], r)
assert ctof * 100 == pytest.approx(df_filt["cTOF"].iloc[indices].astype('float'), rel=1e-3)
#
# energy = float(df["Xf"].iloc[index])
#
# # TODO: needs lookup table to test bin
# ctof, bin = determine_species_pulse_height(energy, tof, r)
# assert ctof * 100 == pytest.approx(float(df["cTOF"].iloc[index]), rel=1e-3)


def test_positions_3(
tests_indices,
events_fsw_comparison_theta_0,
):
"""TODO."""

indices, events_dataset = tests_indices

df = pd.read_csv(events_fsw_comparison_theta_0)
df.replace("FILL", GlobalConstants.INT_FILLVAL, inplace=True)
selected_rows = df.iloc[indices]


if events_dataset["STOP_TYPE"].data[index] in [1, 2]:

energy = float(df["Xf"].iloc[index])

energy = df_filt["Xf"].iloc[indices].astype("float")
r = df_filt["r"].iloc[indices].astype("float")

ctof, bin = determine_species_pulse_height(energy, tof[indices] * 100, r)
assert ctof.values == pytest.approx(
df_filt["cTOF"].iloc[indices].astype("float").values, rel=1e-3
)

velocity = get_particle_velocity((xf, yf), (xb, yb), d, tof)
vhat_x, vhat_y, vhat_z = get_particle_velocity(
(xf_test[indices], yf_test[indices]),
(xb_test[indices], yb_test[indices]),
d[indices],
tof[indices],
)

assert velocity[0] == pytest.approx(
float(df["vhatX"].iloc[index]), rel=1e-2
)
assert velocity[1] == pytest.approx(
float(df["vhatY"].iloc[index]), rel=1e-2
)
assert velocity[2] == pytest.approx(
float(df["vhatZ"].iloc[index]), rel=1e-2
)
assert vhat_x == pytest.approx(
df_filt["vhatX"].iloc[indices].astype("float").values, rel=1e-2
)
assert vhat_y == pytest.approx(
df_filt["vhatY"].iloc[indices].astype("float").values, rel=1e-2
)
assert vhat_z == pytest.approx(
df_filt["vhatZ"].iloc[indices].astype("float").values, rel=1e-2
)
35 changes: 20 additions & 15 deletions imap_processing/ultra/l1b/ultra_l1b.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

# TODO: Decide on consistent fill values.
import logging
import math
from collections import defaultdict

import numpy as np
Expand Down Expand Up @@ -95,19 +94,23 @@ def get_back_positions(indices, events_dataset, xf: float):
yb[index_top] = get_back_position(yb_index[stop_type_top], "YBkTp", "ultra45")

# Correction for the propagation delay of the start anode and other effects.
t2[index_top] = get_image_params("TOFSC") * t1[stop_type_top] / 1024 + \
get_image_params("TOFTPOFF")
tof[index_top] = t2[index_top] + xf[stop_type_top] * get_image_params("XFTTOF") / 32768
t2[index_top] = get_image_params("TOFSC") * t1[stop_type_top] + get_image_params(
"TOFTPOFF"
)
tof[index_top] = t2[index_top] + xf[stop_type_top] * get_image_params("XFTTOF")

index_bottom = indices[events_dataset["STOP_TYPE"].data[indices] == 2]
stop_type_bottom = events_dataset["STOP_TYPE"].data[indices] == 2
xb[index_bottom] = get_back_position(xb_index[stop_type_bottom], "XBkBt", "ultra45")
yb[index_bottom] = get_back_position(yb_index[stop_type_bottom], "YBkBt", "ultra45")

# Correction for the propagation delay of the start anode and other effects.
t2[index_bottom] = get_image_params("TOFSC") * t1[stop_type_bottom] / 1024 + \
get_image_params("TOFBTOFF")
tof[index_bottom] = t2[stop_type_bottom] + xf[stop_type_bottom] * get_image_params("XFTTOF") / 32768
t2[index_bottom] = get_image_params("TOFSC") * t1[
stop_type_bottom
] + get_image_params("TOFBTOFF")
tof[index_bottom] = t2[index_bottom] + xf[stop_type_bottom] * get_image_params(
"XFTTOF"
)

return tof, t2, xb, yb

Expand All @@ -134,7 +137,6 @@ def get_front_x_position(start_type: np.array, start_position_tdc: np.array):
xf : np.array
x front position (hundredths of a millimeter).
"""

if np.any((start_type != 1) & (start_type != 2)):
raise ValueError("Error: Invalid Start Type")

Expand All @@ -146,7 +148,7 @@ def get_front_x_position(start_type: np.array, start_position_tdc: np.array):
# Calculate xf and convert to hundredths of a millimeter
# Note FSW uses xft_off+1.8, but the lookup table uses xft_off
# Note FSW uses xft_off-.25, but the lookup table uses xft_off
xf = (xftsc * -start_position_tdc + xft_off)*100
xf = (xftsc * -start_position_tdc + xft_off) * 100

return xf

Expand Down Expand Up @@ -200,7 +202,9 @@ def get_front_y_position(events_dataset, yb: float):

yf_estimate_2 = -40 # front position of particle (mm)
# TODO: make certain yb units correct
dy_lut_2 = np.round((yb[start_type_right] / 100 - yf_estimate_2) * 256 / 81.92) # mm
dy_lut_2 = np.round(
(yb[start_type_right] / 100 - yf_estimate_2) * 256 / 81.92
) # mm
yadj_2 = get_y_adjust(dy_lut_2) / 100 # mm
yf[index_right] = (yf_estimate_2 + yadj_2) * 100
dadj_2 = np.sqrt(2) * df - yadj_2 # mm# hundredths of a millimeter
Expand Down Expand Up @@ -322,7 +326,7 @@ def get_ssd_index(index: int, events_dataset: xarray.Dataset):
return ssd_index


def get_ssd_positions(index: int, events_dataset: xarray.Dataset, xf: float):
def get_ssd_positions(indices, events_dataset: xarray.Dataset, xf: float):
"""
Calculate back xb, yb position for the SSDs.
Expand Down Expand Up @@ -360,6 +364,9 @@ def get_ssd_positions(index: int, events_dataset: xarray.Dataset, xf: float):
"""
xb = 0

index_left = indices[events_dataset["START_TYPE"].data[indices] == 1]
index_right = indices[events_dataset["START_TYPE"].data[indices] == 2]

# Start Type: 1=Left, 2=Right
if events_dataset["START_TYPE"].data[index] == 1: # Left
side = "Lt"
Expand Down Expand Up @@ -620,15 +627,13 @@ def get_particle_velocity(
v_z = d / tof

# Magnitude of the velocity vector
magnitude_v = math.sqrt(v_x**2 + v_y**2 + v_z**2)
magnitude_v = np.sqrt(v_x**2 + v_y**2 + v_z**2)

vhat_x = v_x / magnitude_v
vhat_y = v_y / magnitude_v
vhat_z = v_z / magnitude_v

velocity = (vhat_x, vhat_y, vhat_z)

return velocity
return vhat_x, vhat_y, vhat_z


def process_count_zero(data_dict: dict):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
SHCOARSE,XFTSC,XFTLTOFF,XFTRTOFF,TOFSC,TOFTPOFF,TOFBTOFF,XFTTOF,XCOINTPSC,XCOINTPOFF,XCOINBTSC,XCOINBTOFF,ETOFSC,ETOFTPOFF,ETOFBTOFF,TOFDIFFTPMIN,TOFDIFFTPMAX,TOFDIFFBTMIN,TOFDIFFBTMAX,ETOFMIN,ETOFMAX,ETOFSLOPE1,ETOFOFF1,ETOFSLOPE2,ETOFOFF2,SPTPPHOFF,SPBTPHOFF,YBKSSD0,YBKSSD1,YBKSSD2,YBKSSD3,YBKSSD4,YBKSSD5,YBKSSD6,YBKSSD7,TOFSSDSC,TOFSSDLTOFF0,TOFSSDLTOFF1,TOFSSDLTOFF2,TOFSSDLTOFF3,TOFSSDLTOFF4,TOFSSDLTOFF5,TOFSSDLTOFF6,TOFSSDLTOFF7,TOFSSDRTOFF0,TOFSSDRTOFF1,TOFSSDRTOFF2,TOFSSDRTOFF3,TOFSSDRTOFF4,TOFSSDRTOFF5,TOFSSDRTOFF6,TOFSSDRTOFF7,TOFSSDTOTOFF,PATHSTEEPTHRESH,PATHMEDIUMTHRESH
445027647,0.172998046875,47.5,48.5,0.05,-53.0,-53.300000000000004,0.018310546875,0.068486328125,42.25,0.068486328125,-39.75,0.1,-44.5,-44.5,22.6,26.6,22.6,26.6,-40.0,9.0,0.66669921875,10.0,0.75,-5.0,540,540,29.3,37.300000000000004,7.1000000000000005,15.1,-15.1,-7.1000000000000005,-37.300000000000004,-29.3,0.19648437500000002,-6.0,-7.0,-3.5,-4.0,-3.8000000000000003,-3.5,-6.800000000000001,-5.5,-5.5,-6.800000000000001,-3.5,-3.8000000000000003,-4.0,-3.5,-7.0,-6.0,5.300000000000001,50.0,65.0
445027647,0.172998047,47.5,48.5,0.5,-528,-525,0.001831055,0.068486328,42.25,0.068486328,-39.75,0.1,-44.5,-44.5,22.6,26.6,22.6,26.6,-40,9,0.666699219,10,0.75,-5,540,540,29.3,37.300000000000004,7.1000000000000005,15.1,-15.1,-7.1,-37.3,-29.3,0.19648437500000002,-6,-7,-3.5,-4,-3.8,-3.5,-6.8,-5.5,-5.5,-6.8,-3.5,-3.8,-4,-3.5,-7,-6,5.300000000000001,50,65
Binary file not shown.

0 comments on commit 8fd16af

Please sign in to comment.