Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request IMAP-Science-Operations-Center#288 from bourque/in…
Browse files Browse the repository at this point in the history
…itial-codice-l1a

Initial CoDICE L1A Processing Algorithm
bourque authored Nov 30, 2023
2 parents d7a8dfb + 5ea5472 commit 39e3dae
Showing 19 changed files with 824 additions and 408 deletions.
2 changes: 1 addition & 1 deletion docs/Makefile
Original file line number Diff line number Diff line change
@@ -3,7 +3,7 @@

# You can set these variables from the command line, and also
# from the environment for the first two.
SPHINXOPTS ?=
SPHINXOPTS ?= -W --keep-going -n
SPHINXBUILD ?= sphinx-build
SOURCEDIR = source
BUILDDIR = build
23 changes: 19 additions & 4 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
@@ -76,6 +76,8 @@

# Autosummary
autosummary_generate = True
# Do not generate separate pages for class methods
numpydoc_show_class_members = False

intersphinx_mapping = {
"numpy": ("https://numpy.org/doc/stable/", None),
@@ -87,9 +89,22 @@

# Reference targets not found
nitpicky = True
# IntEnum inherited method targets aren't found through intershpinx
nitpick_ignore_regex = [(r"py:.*", r".*APID\..*")]
# Ignore the inherited members from the HitAPID IntEnum class

# Some inherited method targets aren't found through intersphinx
nitpick_ignore_regex = [
(r"py:.*", r".*APID\..*"),
(r"py:.*", r".*IntEnum.*"),
(r"py:.*", r".*space_packet_parser.*"),
(r"py:.*", r".*CoDICECompression.*"),
]

# Ignore the inherited members from the <instrument>APID IntEnum class
numpydoc_show_inherited_class_members = {
"imap_processing.hit.l0.hit_l1a_decom.HitAPID": False
"imap_processing.hit.l0.hit_l1a_decom.HitAPID": False,
"imap_processing.codice.utils.CODICEAPID": False,
}

# Suppress specific warnings
suppress_warnings = [
"autosectionlabel.*"
] # Duplicate label use (e.g. imap_processing.codice.codice_l0)
13 changes: 8 additions & 5 deletions docs/source/reference/codice.rst
Original file line number Diff line number Diff line change
@@ -8,21 +8,24 @@ CoDICE (Compact Dual Ion Composition Experiment)
This is the CoDICE Instrument module, which contains the code for processing
data from the CoDICE instrument.

The L0 code to decommutate the CCSDS packet data can be found below:
The processing code to decommutate the CCSDS packets (L0) and create L1a data
products can be found below:

.. autosummary::
:toctree: generated/
:template: autosummary.rst
:recursive:

l0.decom_codice
codice_l0
codice_l1a

The code below is used to decompress an 8 bit science value to a 24- or 32-bit
uncompressed value:
The modules below contain various utility classes and functions to support L0
and L1a processing:

.. autosummary::
:toctree: generated/
:template: autosummary.rst
:recursive:

l0.decompress_codice
utils
decompress
2 changes: 1 addition & 1 deletion imap_processing/cdf/utils.py
Original file line number Diff line number Diff line change
@@ -63,7 +63,7 @@ def write_cdf(
file_start_date = None
if "idex" in data.attrs["Logical_source"]:
file_start_date = data["Epoch"][0].data
elif "swe" in data.attrs["Logical_source"]:
else:
start_time = data["Epoch"].data[0]
file_start_date = calc_start_time(start_time)
if file_start_date is None:
1 change: 1 addition & 0 deletions imap_processing/codice/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__version__ = "01"
116 changes: 116 additions & 0 deletions imap_processing/codice/cdf_attrs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
"""CDF attrs for CoDICE.
This website provides information about what variables are required and what
their value should be:
https://spdf.gsfc.nasa.gov/istp_guide/istp_guide.html
For further details, see the documentation provided at
https://imap-processing.readthedocs.io/en/latest/development/CDFs/cdf_requirements.html
"""


from imap_processing.cdf.defaults import GlobalConstants
from imap_processing.cdf.global_attrs import (
AttrBase,
GlobalDataLevelAttrs,
GlobalInstrumentAttrs,
ScienceAttrs,
StringAttrs,
)
from imap_processing.codice import __version__

descriptor = "CoDICE>Compact Dual Ion Composition Experiment"
codice_description_text = (
"The Compact Dual Ion Composition Experiment (CoDICE) will measure the "
"distributions and composition of interstellar pickup ions (PUIs), "
"particles that make it through the heliosheath into the heliosphere. "
"CoDICE also collects and characterizes solar wind ions including the "
"mass and composition of highly energized particles (called suprathermal) "
"from the Sun. CoDICE combines an electrostatic analyzer(ESA) with a "
"Time-Of-Flight versus Energy (TOF / E) subsystem to simultaneously "
"measure the velocity, arrival direction, ionic charge state, and mass of "
"specific species of ions in the LISM. CoDICE also has a path for higher "
"energy particles to skip the ESA but still get measured by the common "
"TOF / E system. These measurements are critical in determining the Local "
"Interstellar Medium (LISM) composition and flow properties, the origin of "
"the enigmatic suprathermal tails on the solar wind distributions and "
"advance understanding of the acceleration of particles in the heliosphere."
)

codice_base = GlobalInstrumentAttrs(
version=__version__, descriptor=descriptor, text=codice_description_text
)

codice_l1a_global_attrs = GlobalDataLevelAttrs(
data_type="L1A->Level-1A",
logical_source="imap_codice_l1a",
logical_source_desc="IMAP Mission CoDICE Instrument Level-1A Data",
instrument_base=codice_base,
)

codice_l1b_global_attrs = GlobalDataLevelAttrs(
data_type="L1B->Level-1B",
logical_source="imap_cpdice_l1b",
logical_source_desc="IMAP Mission CoDICE Instrument Level-1B Data",
instrument_base=codice_base,
)

int_base = AttrBase(
validmin=0,
validmax=GlobalConstants.INT_MAXVAL,
format="I12",
var_type="support_data",
display_type="no_plot",
)

float_base = AttrBase(
validmin=0,
validmax=GlobalConstants.INT_MAXVAL,
format="I12",
var_type="support_data",
display_type="no_plot",
)

string_base = StringAttrs(
depend_0="Epoch",
)

codice_metadata_attrs = ScienceAttrs(
validmin=0,
validmax=GlobalConstants.INT_MAXVAL,
display_type="no_plot",
depend_0="Epoch",
format="I12",
units="dN",
var_type="support_data",
variable_purpose="PRIMARY",
)

# TODO: ask CoDICE team about valid min and max values of these data
l1a_science_attrs = ScienceAttrs(
validmin=0,
validmax=GlobalConstants.INT_MAXVAL,
display_type="spectrogram",
depend_0="Epoch",
depend_1="Energy",
depend_2="Counts",
format="I12",
units="dN",
var_type="data",
variable_purpose="PRIMARY",
)

l1b_science_attrs = ScienceAttrs(
validmin=0,
validmax=GlobalConstants.INT_MAXVAL,
display_type="spectrogram",
depend_0="Epoch",
depend_1="Energy",
depend_2="Angle",
depend_3="Rates",
format="I12",
units="dN",
var_type="data",
variable_purpose="PRIMARY",
)
35 changes: 35 additions & 0 deletions imap_processing/codice/codice_l0.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
"""Perform CoDICE L0 processing.
This module contains a function to decommutate CoDICE CCSDS packets using
XTCE packet definitions.
For more information on this process and the latest versions of the packet
definitions, see https://lasp.colorado.edu/galaxy/display/IMAP/CoDICE.
Use
---
from imap_processing.codice.codice_l0 import decom_packets
packet_file = '/path/to/raw_ccsds_20230822_122700Z_idle.bin'
xtce_document = '/path/to/P_COD_NHK.xml'
packet_list = decom_packets(packet_file, xtce_document)
"""

from imap_processing import decom, imap_module_directory


def decom_packets(packet_file: str) -> list:
"""Decom CoDICE data packets using CoDICE packet definition.
Parameters
----------
packet_file : str
Path to data packet path with filename.
Returns
-------
list : list
all the unpacked data.
"""
xtce_document = f"{imap_module_directory}/codice/packet_definitions/P_COD_NHK.xml"
return decom.decom_packets(packet_file, xtce_document)
59 changes: 59 additions & 0 deletions imap_processing/codice/codice_l1a.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
"""Perform CoDICE l1a processing.
This module contains functions to process decommutated CoDICE packets and create
L1a data products.
Use
---
from imap_processing.codice.codice_l0 import decom_packets
from imap_processing.codice.codice_l1a import codice_l1a
packets = decom_packets(packet_file, xtce_document)
cdf_filename = codice_l1a(packets)
"""

import logging

import space_packet_parser

from imap_processing.cdf.utils import write_cdf
from imap_processing.codice.utils import CODICEAPID, create_dataset
from imap_processing.utils import group_by_apid, sort_by_time


def codice_l1a(
packets: list[space_packet_parser.parser.Packet], cdf_directory: str
) -> str:
"""Process CoDICE l0 data to create l1a data products.
Parameters
----------
packets : list[space_packet_parser.parser.Packet]
Decom data list that contains all APIDs
cdf_directory : str
The directory in which to write the output CDF file.
Returns
-------
cdf_filename : str
The path to the CDF file that was created
"""
# Group data by APID and sort by time
grouped_data = group_by_apid(packets)

for apid in grouped_data.keys():
if apid == CODICEAPID.COD_NHK:
sorted_packets = sort_by_time(grouped_data[apid], "SHCOARSE")
data = create_dataset(packets=sorted_packets)
else:
logging.debug(f"{apid} is currently not supported")

# Write data to CDF
cdf_filename = write_cdf(
data,
mode="",
description="hk",
directory=cdf_directory,
)

return cdf_filename
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -28,11 +28,11 @@
import lzma
from enum import IntEnum

from imap_processing.codice.utils.codice_utils import CoDICECompression
from imap_processing.codice.utils.constants import LOSSY_A_TABLE, LOSSY_B_TABLE
from imap_processing.codice.constants import LOSSY_A_TABLE, LOSSY_B_TABLE
from imap_processing.codice.utils import CoDICECompression


def _apply_lossy_a(compressed_value: int):
def _apply_lossy_a(compressed_value: int) -> int:
"""Apply 8-bit to 32-bit Lossy A decompression algorithm.
The Lossy A algorithm uses a lookup table imported into this module.
@@ -50,7 +50,7 @@ def _apply_lossy_a(compressed_value: int):
return LOSSY_A_TABLE[compressed_value]


def _apply_lossy_b(compressed_value: int):
def _apply_lossy_b(compressed_value: int) -> int:
"""Apply 8-bit to 32-bit Lossy B decompression algorithm.
The Lossy B algorithm uses a lookup table imported into this module.
@@ -68,7 +68,7 @@ def _apply_lossy_b(compressed_value: int):
return LOSSY_B_TABLE[compressed_value]


def _apply_lzma_lossless(compressed_value: int):
def _apply_lzma_lossless(compressed_value: int) -> int:
"""Apply LZMA lossless decompression algorithm.
Parameters
@@ -87,7 +87,7 @@ def _apply_lzma_lossless(compressed_value: int):
return decompressed_value


def decompress(compressed_value: int, algorithm: IntEnum):
def decompress(compressed_value: int, algorithm: IntEnum) -> int:
"""Decompress the value.
Apply the appropriate decompression algorithm(s) based on the value
Empty file.
18 changes: 0 additions & 18 deletions imap_processing/codice/l0/decom_codice.py

This file was deleted.

Original file line number Diff line number Diff line change
@@ -4,20 +4,28 @@

import pandas as pd
import pytest
import space_packet_parser

from imap_processing import imap_module_directory
from imap_processing.codice.l0 import decom_codice
from imap_processing.codice import codice_l0


@pytest.fixture(scope="session")
def decom_test_data():
"""Read test data from file"""
def decom_test_data() -> list:
"""Read test data from file
Returns
-------
data_packet_list : list[space_packet_parser.parser.Packet]
The list of decommutated packets
"""

packet_file = Path(
f"{imap_module_directory}/codice/tests/data/"
f"raw_ccsds_20230822_122700Z_idle.bin"
)
Path(f"{imap_module_directory}/codice/packet_definitions/P_COD_NHK.xml")
data_packet_list = decom_codice.decom_packets(packet_file)
data_packet_list = codice_l0.decom_packets(packet_file)
data_packet_list = [
packet
for packet in data_packet_list
@@ -28,12 +36,12 @@ def decom_test_data():


@pytest.fixture(scope="session")
def validation_data():
def validation_data() -> pd.core.frame.DataFrame:
"""Read in validation data from the CSV file
Returns
-------
validation_data : pandas DataFrame
validation_data : pandas.core.frame.DataFrame
The validation data read from the CSV, cleaned up and ready to compare
the decommutated packet with
"""
@@ -52,14 +60,17 @@ def validation_data():
return validation_data


def test_housekeeping_data(decom_test_data, validation_data):
def test_housekeeping_data(
decom_test_data: list[space_packet_parser.parser.Packet],
validation_data: pd.core.frame.DataFrame,
):
"""Compare the decommutated housekeeping data to the validation data.
Parameters
----------
decom_test_data : List[Packet]
The decommuted housekeeping packet data
validation_data : pandas DataFrame
decom_test_data : list[space_packet_parser.parser.Packet]
The decommutated housekeeping packet data
validation_data : pandas.core.frame.DataFrame
The validation data to compare against
"""

@@ -79,18 +90,35 @@ def test_housekeeping_data(decom_test_data, validation_data):
assert value.raw_value == validation_row[key]


def test_total_packets_in_data_file(decom_test_data):
"""Test if total packets in data file is correct"""
def test_total_packets_in_data_file(
decom_test_data: list[space_packet_parser.parser.Packet],
):
"""Test if total packets in data file is correct
Parameters
----------
decom_test_data : list[space_packet_parser.parser.Packet]
The decommutated housekeeping packet data
"""

total_packets = 99
assert len(decom_test_data) == total_packets


def test_ways_to_get_data(decom_test_data):
"""Test if data can be retrieved using different ways"""
def test_ways_to_get_data(decom_test_data: list[space_packet_parser.parser.Packet]):
"""Test if data can be retrieved using different ways
Parameters
----------
decom_test_data : list[space_packet_parser.parser.Packet]
The decommutated housekeeping packet data
"""

# First way to get data
data_value_using_key = decom_test_data[0].data

# Second way to get data
data_value_using_list = decom_test_data[0][1]

# Check if data is same
assert data_value_using_key == data_value_using_list
48 changes: 48 additions & 0 deletions imap_processing/codice/tests/test_codice_l1a.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
"""Tests the L1a processing for decommutated CoDICE data"""

from pathlib import Path

import pytest
import space_packet_parser

from imap_processing import imap_module_directory
from imap_processing.codice.codice_l0 import decom_packets
from imap_processing.codice.codice_l1a import codice_l1a


@pytest.fixture(scope="session")
def l0_test_data() -> list:
"""Decom some packets to be used for testing
Returns
-------
packets : list[space_packet_parser.parser.Packet]
A list of decommutated packets for testing
"""

packet_file = Path(
f"{imap_module_directory}/codice/tests/data/"
f"raw_ccsds_20230822_122700Z_idle.bin"
)
packets = decom_packets(packet_file)

return packets


def test_codice_l1a(
l0_test_data: list[space_packet_parser.parser.Packet], tmp_path: Path
) -> str:
"""Tests the ``codice_l1a`` function and ensured that a proper CDF file
was created
Parameters
----------
l0_test_data : list[space_packet_parser.parser.Packet]
A list of packets to process
tmp_path : pathlib.PosixPath
pytest fixture used to provide a temporary directory during testing
"""

cdf_filename = codice_l1a(l0_test_data, tmp_path)

assert Path(cdf_filename).name == "imap_codice_l1a_hk_20100101_v01.cdf"
Original file line number Diff line number Diff line change
@@ -5,8 +5,8 @@

import pytest

from imap_processing.codice.l0.decompress_codice import decompress
from imap_processing.codice.utils.codice_utils import CoDICECompression
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import CoDICECompression

# Test the algorithms using input value of 234 (picked randomly)
LZMA_EXAMPLE = lzma.compress((234).to_bytes(1, byteorder="big"))
142 changes: 142 additions & 0 deletions imap_processing/codice/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,142 @@
"""Various classes and functions used throughout CoDICE processing.
This module contains utility classes and functions that are used by various
other CoDICE processing modules.
"""

import collections
import dataclasses
from enum import IntEnum

import space_packet_parser
import xarray as xr

from imap_processing.cdf.global_attrs import ConstantCoordinates
from imap_processing.codice import cdf_attrs


class CODICEAPID(IntEnum):
"""Create ENUM for CoDICE APIDs.
Parameters
----------
IntEnum : IntEnum
"""

COD_AUT = 1120
COD_NHK = 1136
COD_EVTMSG = 1137
COD_MEMDMP = 1138
COD_SHK = 1139
COD_RTS = 1141
COD_DIAG_SNSR_HV = 1145
COD_DIAG_OPTC_HV = 1146
COD_DIAG_APDFPGA = 1147
COD_DIAG_SSDFPGA = 1148
COD_DIAG_FSW = 1149
COD_DIAG_SYSVARS = 1150
COD_LO_IAL = 1152
COD_LO_PHA = 1153
COD_LO_INSTRUMENT_COUNTERS = 1154
COD_LO_PRIORITY_COUNTS = 1155
COD_LO_SW_SPECIES_COUNTS = 1156
COD_LO_NSW_SPECIES_COUNTS = 1157
COD_LO_SW_ANGULAR_COUNTS = 1158
COD_LO_NSW_ANGULAR_COUNTS = 1159
COD_HI_IAL = 1168
COD_HI_PHA = 1169
COD_HI_INSTRUMENTCOUNTERS = 1170
COD_HI_OMNI_SPECIES_COUNTS = 1172
COD_HI_SECT_SPECIES_COUNTS = 1173
COD_CSTOL_CONFIG = 2457


class CoDICECompression(IntEnum):
"""Create ENUM for CoDICE compression algorithms.
Parameters
----------
IntEnum : IntEnum
"""

NO_COMPRESSION = 1
LOSSY_A = 2
LOSSY_B = 3
LOSSLESS = 4
LOSSY_A_LOSSLESS = 5
LOSSY_B_LOSSLESS = 6


def add_metadata_to_array(
packet: space_packet_parser.parser.Packet, metadata_arrays: dict
) -> dict:
"""Add metadata to the metadata_arrays.
Parameters
----------
packet : space_packet_parser.parser.Packet
CODICE data packet
metadata_arrays : dict
Metadata arrays
Returns
-------
metadata_arrays : dict
Updated metadata arrays with values
"""
for key, value in packet.header.items():
metadata_arrays.setdefault(key, []).append(value.raw_value)

for key, value in packet.data.items():
metadata_arrays.setdefault(key, []).append(value.raw_value)

return metadata_arrays


def create_dataset(packets: list[space_packet_parser.parser.Packet]) -> xr.Dataset:
"""Create dataset for each metadata field.
Parameters
----------
packets : list[space_packet_parser.parser.Packet]
The list of packets to process
Returns
-------
xarray.Dataset
xarray dataset containing the metadata
"""
metadata_arrays = collections.defaultdict(list)

for packet in packets:
add_metadata_to_array(packet, metadata_arrays)

epoch_time = xr.DataArray(
metadata_arrays["SHCOARSE"],
name="Epoch",
dims=["Epoch"],
attrs=ConstantCoordinates.EPOCH,
)

dataset = xr.Dataset(
coords={"Epoch": epoch_time},
attrs=cdf_attrs.codice_l1a_global_attrs.output(),
)

for key, value in metadata_arrays.items():
if key == "SHCOARSE":
continue
else:
dataset[key] = xr.DataArray(
value,
dims=["Epoch"],
attrs=dataclasses.replace(
cdf_attrs.codice_metadata_attrs,
catdesc=key,
fieldname=key,
label_axis=key,
depend_0="Epoch",
).output(),
)

return dataset
Empty file.
17 changes: 0 additions & 17 deletions imap_processing/codice/utils/codice_utils.py

This file was deleted.

684 changes: 344 additions & 340 deletions poetry.lock

Large diffs are not rendered by default.

0 comments on commit 39e3dae

Please sign in to comment.