Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

864 hi l1b compute coincidence type and time deltas #995

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,8 @@
(r"py:.*", r".*CoDICECompression.*"),
(r"py:.*", r".*.lo.l0.utils.*"),
(r"py:.*", r".*.lo.l0.data_classes.*"),
(r"py:.*", r".*.hi.l1b.hi_l1b.CoincidenceBitmap.*"),
(r"py:.*", r".*.hi.l1b.hi_l1b.TriggerId.*"),
(r"py:.*", r".*.hit.l0.utils.*"),
(r"py:.*", r".*.hit.l0.data_classes.*"),
(r"py:.*", r".*.hit.l1a.*"),
Expand Down
153 changes: 144 additions & 9 deletions imap_processing/hi/l1b/hi_l1b.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,34 @@
"""IMAP-HI L1B processing module."""

import logging
from enum import IntEnum

import numpy as np
import xarray as xr

from imap_processing import imap_module_directory
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.hi.utils import HIAPID, create_dataset_variables
from imap_processing.hi.utils import HIAPID, HiConstants, create_dataset_variables
from imap_processing.utils import convert_raw_to_eu


class TriggerId(IntEnum):
"""IntEnum class for trigger id values."""

A = 1
B = 2
C = 3


class CoincidenceBitmap(IntEnum):
"""IntEnum class for coincidence type bitmap values."""

A = 2**3
B = 2**2
C1 = 2**1
C2 = 2**0


logger = logging.getLogger(__name__)
ATTR_MGR = ImapCdfAttributes()
ATTR_MGR.add_instrument_global_attrs("hi")
Expand Down Expand Up @@ -90,28 +110,143 @@ def annotate_direct_events(l1a_dataset: xr.Dataset) -> xr.Dataset:
l1b_dataset : xarray.Dataset
L1B direct event data.
"""
n_epoch = l1a_dataset["epoch"].size
l1b_dataset = compute_coincidence_type_and_time_deltas(l1a_dataset)
l1b_de_var_names = [
"coincidence_type",
"esa_energy_step",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
"spin_phase",
"hae_latitude",
"hae_longitude",
"quality_flag",
"nominal_bin",
]
new_data_vars = create_dataset_variables(
l1b_de_var_names, (n_epoch,), att_manager_lookup_str="hi_de_{0}"
l1b_de_var_names, l1a_dataset["epoch"].size, att_manager_lookup_str="hi_de_{0}"
)
l1b_dataset = l1a_dataset.assign(new_data_vars)
l1b_dataset = l1b_dataset.assign(new_data_vars)
l1b_dataset = l1b_dataset.drop_vars(
["tof_1", "tof_2", "tof_3", "de_tag", "ccsds_met", "meta_event_met"]
)

de_global_attrs = ATTR_MGR.get_global_attributes("imap_hi_l1b_de_attrs")
l1b_dataset.attrs.update(**de_global_attrs)
return l1b_dataset


def compute_coincidence_type_and_time_deltas(dataset: xr.Dataset) -> xr.Dataset:
"""
Compute coincidence type and time deltas.

Adds the new variables "coincidence_type", "delta_t_ab", "delta_t_ac1",
"delta_t_bc1", and "delta_t_c1c2" to the input xarray.Dataset and returns
the updated xarray.Dataset.

Parameters
----------
dataset : xarray.Dataset
The L1A/B dataset that results from reading in the L1A CDF and
allocating the new L1B DataArrays.

Returns
-------
xarray.Dataset
Updated xarray.Dataset with 5 new variables added.
"""
new_data_vars = create_dataset_variables(
[
"coincidence_type",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
],
len(dataset.epoch),
"hi_de_{0}",
)
out_ds = dataset.assign(new_data_vars)

# compute masks needed for coincidence type and delta t calculations
a_first = out_ds.trigger_id.values == TriggerId.A
b_first = out_ds.trigger_id.values == TriggerId.B
c_first = out_ds.trigger_id.values == TriggerId.C

tof1_valid = np.isin(out_ds.tof_1.values, HiConstants.TOF1_BAD_VALUES, invert=True)
tof2_valid = np.isin(out_ds.tof_2.values, HiConstants.TOF2_BAD_VALUES, invert=True)
tof1and2_valid = tof1_valid & tof2_valid
tof3_valid = np.isin(out_ds.tof_3.values, HiConstants.TOF3_BAD_VALUES, invert=True)

# Table denoting how hit-first mask and valid TOF masks are used to set
# coincidence type bitmask
# -----------------------------------------------------------------------
# | Trigger ID | Hit First | TOF 1 Valid | TOF 2 Valid | TOF 3 Valid |
# -----------------------------------------------------------------------
# | 1 | A | A,B | A,C1 | C1,C2 |
# | 2 | B | A,B | B,C1 | C1,C2 |
# | 3 | C1 | A,C1 | B,C1 | C1,C2 |
# Set coincidence type bitmask
out_ds.coincidence_type[a_first | tof1_valid] |= CoincidenceBitmap.A
out_ds.coincidence_type[
b_first | (a_first & tof1_valid) | (c_first & tof2_valid)
] |= CoincidenceBitmap.B
out_ds.coincidence_type[c_first | tof2_valid] |= CoincidenceBitmap.C1
out_ds.coincidence_type[tof3_valid] |= CoincidenceBitmap.C2

# Table denoting how TOF is interpreted for each Trigger ID
# -----------------------------------------------------------------------
# | Trigger ID | Hit First | TOF 1 | TOF 2 | TOF 3 |
# -----------------------------------------------------------------------
# | 1 | A | t_b - t_a | t_c1 - t_a | t_c2 - t_c1 |
# | 2 | B | t_a - t_b | t_c1 - t_b | t_c2 - t_c1 |
# | 3 | C | t_a - t_c1 | t_b - t_c1 | t_c2 - t_c1 |

# Prepare for delta_t calculations by converting TOF values to nanoseconds
tof_1_ns = (out_ds.tof_1.values * HiConstants.TOF1_TICK_DUR).astype(np.int32)
tof_2_ns = (out_ds.tof_2.values * HiConstants.TOF2_TICK_DUR).astype(np.int32)
tof_3_ns = (out_ds.tof_3.values * HiConstants.TOF3_TICK_DUR).astype(np.int32)

# # ********** delta_t_ab = (t_b - t_a) **********
# Table: row 1, column 1
a_and_tof1 = a_first & tof1_valid
out_ds.delta_t_ab.values[a_and_tof1] = tof_1_ns[a_and_tof1]
# Table: row 2, column 1
b_and_tof1 = b_first & tof1_valid
out_ds.delta_t_ab.values[b_and_tof1] = -1 * tof_1_ns[b_and_tof1]
# Table: row 3, column 1 and 2
# delta_t_ab = (t_b - t_c1) - (t_a - t_c1) = (t_b - t_a)
c_and_tof1and2 = c_first & tof1and2_valid
out_ds.delta_t_ab.values[c_and_tof1and2] = (
tof_2_ns[c_and_tof1and2] - tof_1_ns[c_and_tof1and2]
)

# ********** delta_t_ac1 = (t_c1 - t_a) **********
# Table: row 1, column 2
a_and_tof2 = a_first & tof2_valid
out_ds.delta_t_ac1.values[a_and_tof2] = tof_2_ns[a_and_tof2]
# Table: row 2, column 1 and 2
# delta_t_ac1 = (t_c1 - t_b) - (t_a - t_b) = (t_c1 - t_a)
b_and_tof1and2 = b_first & tof1and2_valid
out_ds.delta_t_ac1.values[b_and_tof1and2] = (
tof_2_ns[b_and_tof1and2] - tof_1_ns[b_and_tof1and2]
)
# Table: row 3, column 1
c_and_tof1 = c_first & tof1_valid
out_ds.delta_t_ac1.values[c_and_tof1] = -1 * tof_1_ns[c_and_tof1]

# ********** delta_t_bc1 = (t_c1 - t_b) **********
# Table: row 1, column 1 and 2
# delta_t_bc1 = (t_c1 - t_a) - (t_b - t_a) => (t_c1 - t_b)
a_and_tof1and2 = a_first & tof1and2_valid
out_ds.delta_t_bc1.values[a_and_tof1and2] = (
tof_2_ns[a_and_tof1and2] - tof_1_ns[a_and_tof1and2]
)
# Table: row 2, column 2
b_and_tof2 = b_first & tof2_valid
out_ds.delta_t_bc1.values[b_and_tof2] = tof_2_ns[b_and_tof2]
# Table: row 3, column 2
c_and_tof2 = c_first & tof2_valid
out_ds.delta_t_bc1.values[c_and_tof2] = -1 * tof_2_ns[c_and_tof2]

# ********** delta_t_c1c2 = (t_c2 - t_c1) **********
# Table: all rows, column 3
out_ds.delta_t_c1c2.values[tof3_valid] = tof_3_ns[tof3_valid]

return out_ds
33 changes: 33 additions & 0 deletions imap_processing/hi/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""IMAP-Hi utils functions."""

from collections.abc import Sequence
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Union

Expand Down Expand Up @@ -34,6 +35,38 @@ def sensor(self) -> str:
return self.name[1:3] + "sensor"


@dataclass(frozen=True)
class HiConstants:
"""
Constants for Hi instrument.

Attributes
----------
TOF1_TICK_DUR : int
Duration of Time-of-Flight 1 clock tick in nanoseconds.
TOF2_TICK_DUR : int
Duration of Time-of-Flight 2 clock tick in nanoseconds.
TOF3_TICK_DUR : int
Duration of Time-of-Flight 3 clock tick in nanoseconds.
TOF1_BAD_VALUES : tuple[int]
Tuple of values indicating TOF1 does not contain a valid time.
TOF2_BAD_VALUES : tuple[int]
Tuple of values indicating TOF2 does not contain a valid time.
TOF3_BAD_VALUES : tuple[int]
Tuple of values indicating TOF3 does not contain a valid time.
"""

TOF1_TICK_DUR = 1 # 1 ns
TOF2_TICK_DUR = 1 # 1 ns
TOF3_TICK_DUR = 0.5 # 0.5 ns

# These values are stored in the TOF telemetry when the TOF timer
# does not have valid data.
TOF1_BAD_VALUES = (511, 1023)
TOF2_BAD_VALUES = (1023,)
TOF3_BAD_VALUES = (1023,)


def full_dataarray(
name: str,
attrs: dict,
Expand Down
117 changes: 115 additions & 2 deletions imap_processing/tests/hi/test_hi_l1b.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
"""Test coverage for imap_processing.hi.l1b.hi_l1b.py"""

import numpy as np
import pytest
import xarray as xr

from imap_processing.hi.l1a.hi_l1a import hi_l1a
from imap_processing.hi.l1b.hi_l1b import hi_l1b
from imap_processing.hi.utils import HIAPID
from imap_processing.hi.l1b.hi_l1b import (
CoincidenceBitmap,
compute_coincidence_type_and_time_deltas,
hi_l1b,
)
from imap_processing.hi.utils import HIAPID, HiConstants


def test_hi_l1b_hk(hi_l0_test_data_path):
Expand All @@ -29,3 +37,108 @@ def test_hi_l1b_de(create_de_data, tmp_path):
l1b_dataset = hi_l1b(processed_data[0], data_version=data_version)
assert l1b_dataset.attrs["Logical_source"] == "imap_hi_l1b_45sensor-de"
assert len(l1b_dataset.data_vars) == 14


@pytest.fixture()
def synthetic_trigger_id_and_tof_data():
"""Create synthetic minimum dataset for testing the
coincidence_type_and_time_deltas algorithm."""
# The following coincidence type table shows possible values to consider
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This table is really helpful! I should add something similar for Lo's coincidence types

# Value| # Exp | Requirements to get this value
# -----|-------|-------------------------------
# 0 | 0 | Non-event not recorded
# 1 | 0 | Can't trigger c2 only
# 2 | 2 | trigger_id = 3, tof_3 invalid
# 3 | 2 | trigger_id = 3, tof_3 valid
# 4 | 2 | trigger_id = 2, no valid tofs
# 5 | 0 | B and C2 not possible?
# 6 | 4 | trigger_id = 2 OR 3, tof_2 valid
# 7 | 4 | trigger_id = 2 OR 3, tof_2/3 valid
# 8 | 2 | trigger_id = 3, no valid tofs
# 9 | 0 | A and C2 not possible?
# 10 | 3 | trigger_id = 1, tof_2 OR trigger_id = 3, tof_1
# 11 | 3 | trigger_id = 1, tof_2/3, OR trigger_id = 3, tof_1/3
# 12 | 2 | trigger_id = 1 OR 2, tof_1
# 13 | 0 | A/B and C2 not possible?
# 14 | 3 | trigger_id = 1 OR 2 OR 3, tof_1/2
# 15 | 3 | trigger_id = 1, 2, 3, tof_1/2/3

# Use meshgrid to get all combinations of trigger_id and tof valid/invalid
# Note: this generates 6 impossible occurrences where C1 is not triggered
# but C2 is. Those are manually removed below.
ids = np.arange(3) + 1
tof1s = np.array(np.concatenate((HiConstants.TOF1_BAD_VALUES, [1])))
tof2s = np.array(np.concatenate((HiConstants.TOF2_BAD_VALUES, [2])))
tof3s = np.array(np.concatenate((HiConstants.TOF3_BAD_VALUES, [3])))
var_names = ["trigger_id", "tof_1", "tof_2", "tof_3"]
data = np.meshgrid(ids, tof1s, tof2s, tof3s)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great idea to more easily cover the combinations!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can't quite take credit... found it on SO.

data = [arr.flatten() for arr in data]
# Remove impossible combinations
good_inds = np.nonzero(
np.logical_not(
np.logical_and(data[0] != 3, ((data[2] >= 511) & (data[3] < 511)))
)
)
data = [arr[good_inds] for arr in data]
data_vars = {
n: xr.DataArray(arr, dims=["epoch"]) for n, arr in zip(var_names, data)
}
synthetic_l1a_ds = xr.Dataset(
coords={
"epoch": xr.DataArray(
np.arange(data_vars["trigger_id"].size), name="epoch", dims=["epoch"]
)
},
data_vars=data_vars,
)
expected_histogram = np.array([0, 0, 2, 2, 2, 0, 4, 4, 2, 0, 3, 3, 2, 0, 3, 3])
return synthetic_l1a_ds, expected_histogram


def test_compute_coincidence_type_and_time_deltas(synthetic_trigger_id_and_tof_data):
"""Test coverage for
`imap_processing.hi.hi_l1b.compute_coincidence_type_and_time_deltas`."""
updated_dataset = compute_coincidence_type_and_time_deltas(
synthetic_trigger_id_and_tof_data[0]
)
for var_name in [
"coincidence_type",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
]:
assert var_name in updated_dataset.data_vars
# verify coincidence type values
coincidence_hist, bins = np.histogram(
updated_dataset.coincidence_type, bins=np.arange(17)
)
np.testing.assert_array_equal(
coincidence_hist, synthetic_trigger_id_and_tof_data[1]
)
# verify delta_t values are valid in the correct locations
np.testing.assert_array_equal(
updated_dataset.delta_t_ab != updated_dataset.delta_t_ab.FILLVAL,
updated_dataset.coincidence_type >= 12,
)
np.testing.assert_array_equal(
updated_dataset.delta_t_ac1 != updated_dataset.delta_t_ac1.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.A.value),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
),
)
np.testing.assert_array_equal(
updated_dataset.delta_t_bc1 != updated_dataset.delta_t_bc1.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.B.value),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
),
)
np.testing.assert_array_equal(
updated_dataset.delta_t_c1c2 != updated_dataset.delta_t_c1c2.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C2),
),
)
Loading