Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

864 hi l1b compute coincidence type and time deltas #995

Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,8 @@
(r"py:.*", r".*CoDICECompression.*"),
(r"py:.*", r".*.lo.l0.utils.*"),
(r"py:.*", r".*.lo.l0.data_classes.*"),
subagonsouth marked this conversation as resolved.
Show resolved Hide resolved
(r"py:.*", r".*.hi.l1b.hi_l1b.CoincidenceBitmap.*"),
(r"py:.*", r".*.hi.l1b.hi_l1b.TriggerId.*"),
(r"py:.*", r".*.hit.l0.utils.*"),
(r"py:.*", r".*.hit.l0.data_classes.*"),
(r"py:.*", r".*.hit.l1a.*"),
Expand Down
159 changes: 150 additions & 9 deletions imap_processing/hi/l1b/hi_l1b.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,34 @@
"""IMAP-HI L1B processing module."""

import logging
from enum import IntEnum

import numpy as np
import xarray as xr

from imap_processing import imap_module_directory
from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.hi.utils import HIAPID, create_dataset_variables
from imap_processing.hi.utils import HIAPID, HiConstants, create_dataset_variables
from imap_processing.utils import convert_raw_to_eu


class TriggerId(IntEnum):
"""IntEnum class for trigger id values."""

A = 1
B = 2
C = 3


class CoincidenceBitmap(IntEnum):
"""IntEnum class for coincidence type bitmap values."""

A = 2**3
B = 2**2
C1 = 2**1
C2 = 2**0


logger = logging.getLogger(__name__)
ATTR_MGR = ImapCdfAttributes()
ATTR_MGR.add_instrument_global_attrs("hi")
Expand Down Expand Up @@ -90,28 +110,149 @@ def annotate_direct_events(l1a_dataset: xr.Dataset) -> xr.Dataset:
l1b_dataset : xarray.Dataset
L1B direct event data.
"""
n_epoch = l1a_dataset["epoch"].size
l1b_dataset = compute_coincidence_type_and_time_deltas(l1a_dataset)
l1b_de_var_names = [
"coincidence_type",
"esa_energy_step",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
"spin_phase",
"hae_latitude",
"hae_longitude",
"quality_flag",
"nominal_bin",
]
new_data_vars = create_dataset_variables(
l1b_de_var_names, (n_epoch,), att_manager_lookup_str="hi_de_{0}"
l1b_de_var_names, l1a_dataset["epoch"].size, att_manager_lookup_str="hi_de_{0}"
)
l1b_dataset = l1a_dataset.assign(new_data_vars)
l1b_dataset = l1b_dataset.assign(new_data_vars)
l1b_dataset = l1b_dataset.drop_vars(
["tof_1", "tof_2", "tof_3", "de_tag", "ccsds_met", "meta_event_met"]
)

de_global_attrs = ATTR_MGR.get_global_attributes("imap_hi_l1b_de_attrs")
l1b_dataset.attrs.update(**de_global_attrs)
return l1b_dataset


def compute_coincidence_type_and_time_deltas(dataset: xr.Dataset) -> xr.Dataset:
"""
Compute coincidence type and time deltas.

Adds the new variables "coincidence_type", "delta_t_ab", "delta_t_ac1",
"delta_t_bc1", and "delta_t_c1c2" to the input xarray.Dataset and returns
the updated xarray.Dataset.

Parameters
----------
dataset : xarray.Dataset
The L1A/B dataset that results from reading in the L1A CDF and
allocating the new L1B DataArrays.

Returns
-------
xarray.Dataset
Updated xarray.Dataset with 5 new variables added.
"""
new_data_vars = create_dataset_variables(
[
"coincidence_type",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
],
len(dataset.epoch),
"hi_de_{0}",
)
out_ds = dataset.assign(new_data_vars)

# compute masks needed for coincidence type and delta t calculations
a_mask = out_ds.trigger_id.values == TriggerId.A
b_mask = out_ds.trigger_id.values == TriggerId.B
c_mask = out_ds.trigger_id.values == TriggerId.C

tof_1_valid_mask = np.isin(
out_ds.tof_1.values, HiConstants.TOF1_BAD_VALUES, invert=True
)
tof_2_valid_mask = np.isin(
out_ds.tof_2.values, HiConstants.TOF2_BAD_VALUES, invert=True
)
tof_1and2_valid_mask = tof_1_valid_mask & tof_2_valid_mask
tof_3_valid_mask = np.isin(
out_ds.tof_3.values, HiConstants.TOF3_BAD_VALUES, invert=True
)

# Table denoting how hit-first mask and valid TOF masks are used to set
# coincidence type bitmask
# -----------------------------------------------------------------------
subagonsouth marked this conversation as resolved.
Show resolved Hide resolved
# | Trigger ID | Hit First | TOF 1 Valid | TOF 2 Valid | TOF 3 Valid |
# -----------------------------------------------------------------------
# | 1 | A | A,B | A,C1 | C1,C2 |
# | 2 | B | A,B | B,C1 | C1,C2 |
# | 3 | C1 | A,C1 | B,C1 | C1,C2 |
# Set coincidence type bitmask
out_ds.coincidence_type[a_mask | tof_1_valid_mask] |= CoincidenceBitmap.A
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For me personally I would really appreciate a docstring on each bitmask. Maybe it's overkill for some people, but it helps me to immediately understand the purpose.

Example:
Set the 'A' bit in the 'coincidence_type' variable where:

  • The trigger is 'A' (using a_mask), OR
  • tof_1 is valid (using tof_1_valid_mask).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

My philosophy is that if you need a bunch of comments to explain your code, then the code is not written in a way that it can be clearly understood. IMO, the comments you wrote are exactly what the code says with a slightly different order. For this instance, I think the comment is redundant.

out_ds.coincidence_type[
b_mask | (a_mask & tof_1_valid_mask) | (c_mask & tof_2_valid_mask)
] |= CoincidenceBitmap.B
out_ds.coincidence_type[c_mask | tof_2_valid_mask | tof_2_valid_mask] |= (
subagonsouth marked this conversation as resolved.
Show resolved Hide resolved
CoincidenceBitmap.C1.value
)
out_ds.coincidence_type[tof_3_valid_mask] |= CoincidenceBitmap.C2

# Table denoting how TOF is interpreted for each Trigger ID
# -----------------------------------------------------------------------
# | Trigger ID | Hit First | TOF 1 | TOF 2 | TOF 3 |
# -----------------------------------------------------------------------
# | 1 | A | t_b - t_a | t_c1 - t_a | t_c2 - t_c1 |
# | 2 | B | t_a - t_b | t_c1 - t_b | t_c2 - t_c1 |
# | 3 | C | t_a - t_c1 | t_b - t_c1 | t_c2 - t_c1 |
# delta_t_ab
out_ds.delta_t_ab.values[a_mask & tof_1_valid_mask] = (
out_ds.tof_1.values[a_mask & tof_1_valid_mask].astype(np.float32)
* HiConstants.TOF1_TICK_PER_NS
)
out_ds.delta_t_ab.values[b_mask & tof_1_valid_mask] = (
-out_ds.tof_1.values[b_mask & tof_1_valid_mask].astype(np.float32)
* HiConstants.TOF1_TICK_PER_NS
)
out_ds.delta_t_ab.values[c_mask & tof_1and2_valid_mask] = (
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think that this could be split into two separate steps for clarity:

delta_t_2 = out_ds.tof_2.values[c_mask & tof_1and2_valid_mask].astype(np.float32) * HiConstants.TOF2_TICK_PER_NS
delta_t_1 = out_ds.tof_1.values[c_mask & tof_1and2_valid_mask].astype(np.float32) * HiConstants.TOF1_TICK_PER_NS
out_ds.delta_t_ab.values[c_mask & tof_1and2_valid_mask] = delta_t_2 - delta_t_1

out_ds.tof_2.values[c_mask & tof_1and2_valid_mask].astype(np.float32)
* HiConstants.TOF2_TICK_PER_NS
- out_ds.tof_1.values[c_mask & tof_1and2_valid_mask].astype(np.float32)
* HiConstants.TOF1_TICK_PER_NS
)

# delta_t_ac1
out_ds.delta_t_ac1.values[a_mask & tof_2_valid_mask] = (
out_ds.tof_2.values[a_mask & tof_2_valid_mask].astype(np.float32)
* HiConstants.TOF2_TICK_PER_NS
)
out_ds.delta_t_ac1.values[b_mask & tof_1and2_valid_mask] = (
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think this could be split for clarity:

delta_t_2 = out_ds.tof_2.values[b_mask & tof_1and2_valid_mask] * HiConstants.TOF2_TICK_PER_NS
delta_t_1 = out_ds.tof_1.values[b_mask & tof_1and2_valid_mask] * HiConstants.TOF1_TICK_PER_NS
out_ds.delta_t_ac1.values[b_mask & tof_1and2_valid_mask] = delta_t_2 - delta_t_1

out_ds.tof_2.values[b_mask & tof_1and2_valid_mask]
* HiConstants.TOF2_TICK_PER_NS
- out_ds.tof_1.values[b_mask & tof_1and2_valid_mask]
* HiConstants.TOF1_TICK_PER_NS
)
out_ds.delta_t_ac1.values[c_mask & tof_1_valid_mask] = (
-out_ds.tof_1.values[c_mask & tof_1_valid_mask] * HiConstants.TOF1_TICK_PER_NS
)

# delta_t_bc1
out_ds.delta_t_bc1.values[a_mask & tof_1_valid_mask & tof_2_valid_mask] = (
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same here I would split it up:
delta_t_2 = out_ds.tof_2.values[a_mask & tof_1and2_valid_mask] * HiConstants.TOF2_TICK_PER_NS
delta_t_1 = out_ds.tof_1.values[a_mask & tof_1and2_valid_mask] * HiConstants.TOF1_TICK_PER_NS
out_ds.delta_t_bc1.values[a_mask & tof_1_valid_mask & tof_2_valid_mask] = delta_t_2 - delta_t_1

out_ds.tof_2.values[a_mask & tof_1and2_valid_mask]
* HiConstants.TOF2_TICK_PER_NS
- out_ds.tof_1.values[a_mask & tof_1and2_valid_mask]
* HiConstants.TOF1_TICK_PER_NS
)
out_ds.delta_t_bc1.values[b_mask & tof_2_valid_mask] = (
out_ds.tof_2.values[b_mask & tof_2_valid_mask] * HiConstants.TOF2_TICK_PER_NS
)
out_ds.delta_t_bc1.values[c_mask & tof_2_valid_mask] = (
-out_ds.tof_2.values[c_mask & tof_2_valid_mask] * HiConstants.TOF2_TICK_PER_NS
)

# delta_t_c1c2
out_ds.delta_t_c1c2.values[tof_3_valid_mask] = (
out_ds.tof_3.values[tof_3_valid_mask] * HiConstants.TOF3_TICK_PER_NS
)

return out_ds
33 changes: 33 additions & 0 deletions imap_processing/hi/utils.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""IMAP-Hi utils functions."""

from collections.abc import Sequence
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Union

Expand Down Expand Up @@ -34,6 +35,38 @@ def sensor(self) -> str:
return self.name[1:3] + "sensor"


@dataclass(frozen=True)
class HiConstants:
"""
Constants for Hi instrument.

Attributes
----------
TOF1_TICK_PER_NS : int
Duration of Time-of-Flight 1 clock tick in nanoseconds.
TOF2_TICK_PER_NS : int
Duration of Time-of-Flight 2 clock tick in nanoseconds.
TOF3_TICK_PER_NS : int
Duration of Time-of-Flight 3 clock tick in nanoseconds.
TOF1_BAD_VALUES : tuple[int]
Tuple of values indicating TOF1 does not contain a valid time.
TOF2_BAD_VALUES : tuple[int]
Tuple of values indicating TOF2 does not contain a valid time.
TOF3_BAD_VALUES : tuple[int]
Tuple of values indicating TOF3 does not contain a valid time.
"""

TOF1_TICK_PER_NS = 1 # 1 ns
TOF2_TICK_PER_NS = 1 # 1 ns
TOF3_TICK_PER_NS = 2 # 0.5 ns

# These values are stored in the TOF telemetry when the TOF timer
# does not have valid data.
TOF1_BAD_VALUES = (511, 1023)
TOF2_BAD_VALUES = (1023,)
TOF3_BAD_VALUES = (1023,)


def full_dataarray(
name: str,
attrs: dict,
Expand Down
118 changes: 116 additions & 2 deletions imap_processing/tests/hi/test_hi_l1b.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,16 @@
"""Test coverage for imap_processing.hi.l1b.hi_l1b.py"""

import numpy as np
import pytest
import xarray as xr

from imap_processing.hi.l1a.hi_l1a import hi_l1a
from imap_processing.hi.l1b.hi_l1b import hi_l1b
from imap_processing.hi.utils import HIAPID
from imap_processing.hi.l1b.hi_l1b import (
CoincidenceBitmap,
compute_coincidence_type_and_time_deltas,
hi_l1b,
)
from imap_processing.hi.utils import HIAPID, HiConstants


def test_hi_l1b_hk(hi_l0_test_data_path):
Expand All @@ -29,3 +37,109 @@ def test_hi_l1b_de(create_de_data, tmp_path):
l1b_dataset = hi_l1b(processed_data[0], data_version=data_version)
assert l1b_dataset.attrs["Logical_source"] == "imap_hi_l1b_45sensor-de"
assert len(l1b_dataset.data_vars) == 14


@pytest.fixture()
def synthetic_trigger_id_and_tof_data():
"""Create synthetic minimum dataset for testing the
coincidence_type_and_time_deltas algorithm."""
# The following coincidence type table shows possible values to consider
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This table is really helpful! I should add something similar for Lo's coincidence types

# Value| # Exp | Requirements to get this value
# -----|-------|-------------------------------
# 0 | 0 | Non-event not recorded
# 1 | 0 | Can't trigger c2 only
# 2 | 2 | trigger_id = 3, tof_3 invalid
# 3 | 2 | trigger_id = 3, tof_3 valid
# 4 | 2 | trigger_id = 2, no valid tofs
# 5 | 0 | B and C2 not possible?
# 6 | 4 | trigger_id = 2 OR 3, tof_2 valid
# 7 | 4 | trigger_id = 2 OR 3, tof_2/3 valid
# 8 | 2 | trigger_id = 3, no valid tofs
# 9 | 0 | A and C2 not possible?
# 10 | 3 | trigger_id = 1, tof_2 OR trigger_id = 3, tof_1
# 11 | 3 | trigger_id = 1, tof_2/3, OR trigger_id = 3, tof_1/3
# 12 | 2 | trigger_id = 1 OR 2, tof_1
# 13 | 0 | A/B and C2 not possible?
# 14 | 3 | trigger_id = 1 OR 2 OR 3, tof_1/2
# 15 | 3 | trigger_id = 1, 2, 3, tof_1/2/3

# Use meshgrid to get all combinations of trigger_id and tof valid/invalid
# Note: this generates 6 impossible occurrences where C1 is not triggered
# but C2 is. Those are manually removed below.
ids = np.arange(3) + 1
tof1s = np.array(np.concatenate((HiConstants.TOF1_BAD_VALUES, [1])))
tof2s = np.array(np.concatenate((HiConstants.TOF2_BAD_VALUES, [2])))
tof3s = np.array(np.concatenate((HiConstants.TOF3_BAD_VALUES, [3])))
var_names = ["trigger_id", "tof_1", "tof_2", "tof_3"]
data = np.meshgrid(ids, tof1s, tof2s, tof3s)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Great idea to more easily cover the combinations!

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I can't quite take credit... found it on SO.

data = [arr.flatten() for arr in data]
# Remove impossible combinations
good_inds = np.nonzero(
np.logical_not(
np.logical_and(data[0] != 3, ((data[2] >= 511) & (data[3] < 511)))
)
)
data = [arr[good_inds] for arr in data]
data_vars = {
n: xr.DataArray(arr, dims=["epoch"]) for n, arr in zip(var_names, data)
}
synthetic_l1a_ds = xr.Dataset(
coords={
"epoch": xr.DataArray(
np.arange(data_vars["trigger_id"].size), name="epoch", dims=["epoch"]
)
},
data_vars=data_vars,
)
expected_histogram = np.array([0, 0, 2, 2, 2, 0, 4, 4, 2, 0, 3, 3, 2, 0, 3, 3])
return synthetic_l1a_ds, expected_histogram


def test_compute_coincidence_type_and_time_deltas(synthetic_trigger_id_and_tof_data):
"""Test coverage for
`imap_processing.hi.hi_l1b.compute_coincidence_type_and_time_deltas`."""
# l1a_dataset = load_cdf(hi_l1a_test_file_path)
subagonsouth marked this conversation as resolved.
Show resolved Hide resolved
updated_dataset = compute_coincidence_type_and_time_deltas(
synthetic_trigger_id_and_tof_data[0]
)
for var_name in [
"coincidence_type",
"delta_t_ab",
"delta_t_ac1",
"delta_t_bc1",
"delta_t_c1c2",
]:
assert var_name in updated_dataset.data_vars
# verify coincidence type values
coincidence_hist, bins = np.histogram(
updated_dataset.coincidence_type, bins=np.arange(17)
)
np.testing.assert_array_equal(
coincidence_hist, synthetic_trigger_id_and_tof_data[1]
)
# verify delta_t values are valid in the correct locations
np.testing.assert_array_equal(
updated_dataset.delta_t_ab != updated_dataset.delta_t_ab.FILLVAL,
updated_dataset.coincidence_type >= 12,
)
np.testing.assert_array_equal(
updated_dataset.delta_t_ac1 != updated_dataset.delta_t_ac1.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.A.value),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
),
)
np.testing.assert_array_equal(
updated_dataset.delta_t_bc1 != updated_dataset.delta_t_bc1.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.B.value),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
),
)
np.testing.assert_array_equal(
updated_dataset.delta_t_c1c2 != updated_dataset.delta_t_c1c2.FILLVAL,
np.logical_and(
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C1),
np.bitwise_and(updated_dataset.coincidence_type, CoincidenceBitmap.C2),
),
)
Loading