Skip to content

Commit

Permalink
Add numpydoc Pre-Commit For Codice & Glows Pt.7 Last One (#652)
Browse files Browse the repository at this point in the history
* numpydoc precommit change

* numpydoc precommit change

* numpydoc update for glows

* numpydoc update codice

* Apply suggestions from code review

Co-authored-by: Maxine Hartnett <117409426+maxinelasp@users.noreply.github.com>

* final

---------

Co-authored-by: Maxine Hartnett <117409426+maxinelasp@users.noreply.github.com>
  • Loading branch information
daralynnrhode and maxinelasp authored Jun 21, 2024
1 parent 19e48ba commit 2437799
Show file tree
Hide file tree
Showing 14 changed files with 286 additions and 191 deletions.
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,5 +36,5 @@ repos:
rev: 'v1.7.0'
hooks:
- id: numpydoc-validation
exclude: '^imap_processing/tests/|.*test.*|^imap_processing/glows|^imap_processing/codice'
# Will need to remove exclude section once all numpydoc changes are made.
exclude: '^imap_processing/tests/|.*test.*'

13 changes: 7 additions & 6 deletions imap_processing/codice/codice_l0.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
"""Perform CoDICE L0 processing.
"""
Perform CoDICE L0 processing.
This module contains a function to decommutate CoDICE CCSDS packets using
XTCE packet definitions.
For more information on this process and the latest versions of the packet
definitions, see https://lasp.colorado.edu/galaxy/display/IMAP/CoDICE.
Use
---
Notes
-----
from imap_processing.codice.codice_l0 import decom_packets
packet_file = '/path/to/raw_ccsds_20230822_122700Z_idle.bin'
packet_list = decom_packets(packet_file)
Expand All @@ -20,7 +20,8 @@


def decom_packets(packet_file: Path) -> list:
"""Decom CoDICE data packets using CoDICE packet definition.
"""
Decom CoDICE data packets using CoDICE packet definition.
Parameters
----------
Expand All @@ -30,7 +31,7 @@ def decom_packets(packet_file: Path) -> list:
Returns
-------
list : list
all the unpacked data.
All the unpacked data.
"""
packet_to_xtce_mapping = {
"imap_codice_l0_hi-counters-aggregated_20240429_v001.pkts": "P_COD_HI_INST_COUNTS_AGGREGATED.xml", # noqa
Expand Down
69 changes: 38 additions & 31 deletions imap_processing/codice/codice_l1a.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
"""Perform CoDICE l1a processing.
"""
Perform CoDICE l1a processing.
This module processes decommutated CoDICE packets and creates L1a data products.
Use
---
Notes
-----
from imap_processing.codice.codice_l0 import decom_packets
from imap_processing.codice.codice_l1a import process_codice_l1a
packets = decom_packets(packet_file)
Expand Down Expand Up @@ -38,23 +38,24 @@


class CoDICEL1aPipeline:
"""Contains methods for processing L0 data and creating L1a data products.
"""
Contains methods for processing L0 data and creating L1a data products.
Attributes
Parameters
----------
table_id : int
A unique ID assigned to a specific table configuration. This field is
used to link the overall acquisition and processing settings to a
specific table configuration
specific table configuration.
plan_id : int
The plan table that was in use. In conjunction with ``plan_step``,
describes which counters are included in the data packet
describes which counters are included in the data packet.
plan_step : int
Plan step that was active when the data was acquired and processed. In
conjunction with ``plan_id``, describes which counters are included
in the data packet
in the data packet.
view_id : int
Provides information about how data was collapsed and/or compressed
Provides information about how data was collapsed and/or compressed.
Methods
-------
Expand All @@ -80,22 +81,22 @@ def __init__(self, table_id: int, plan_id: int, plan_step: int, view_id: int):
def create_science_dataset(
self, start_time: np.datetime64, data_version: str
) -> xr.Dataset:
"""Create an ``xarray`` dataset for the unpacked science data.
"""
Create an ``xarray`` dataset for the unpacked science data.
The dataset can then be written to a CDF file.
Parameters
----------
start_time : numpy.datetime64
The start time of the packet, used to determine epoch data variable
The start time of the packet, used to determine epoch data variable.
data_version : str
Version of the data product being created
Version of the data product being created.
Returns
-------
xarray.Dataset
``xarray`` dataset containing the science data and supporting metadata
dataset : xarray.Dataset
The ``xarray`` dataset containing the science data and supporting metadata.
"""
# Set the CDF attrs
cdf_attrs = ImapCdfAttributes()
Expand Down Expand Up @@ -173,7 +174,8 @@ def create_science_dataset(
return dataset

def get_acquisition_times(self):
"""Retrieve the acquisition times via the Lo stepping table.
"""
Retrieve the acquisition times via the Lo stepping table.
Get the acquisition times from the data file based on the values of
``plan_id`` and ``plan_step``
Expand Down Expand Up @@ -216,7 +218,8 @@ def get_acquisition_times(self):
self.acquisition_times.append(lo_stepping_values.acq_time[row_number])

def get_data_products(self, apid: int):
"""Retrieve various settings for defining the data products.
"""
Retrieve various settings for defining the data products.
Parameters
----------
Expand All @@ -230,7 +233,8 @@ def get_data_products(self, apid: int):
self.dataset_name = config["dataset_name"]

def get_esa_sweep_values(self):
"""Retrieve the ESA sweep values.
"""
Retrieve the ESA sweep values.
Get the ElectroStatic Analyzer (ESA) sweep values from the data file
based on the values of ``plan_id`` and ``plan_step``
Expand Down Expand Up @@ -260,7 +264,8 @@ def get_esa_sweep_values(self):
self.esa_sweep_values = sweep_table["esa_v"].values

def unpack_science_data(self, science_values: str):
"""Unpack the science data from the packet.
"""
Unpack the science data from the packet.
For LO SW Species Counts data, the science data within the packet is a
blob of compressed values of length 2048 bits (16 species * 128 energy
Expand All @@ -270,7 +275,7 @@ def unpack_science_data(self, science_values: str):
Parameters
----------
science_values : str
A string of binary data representing the science values of the data
A string of binary data representing the science values of the data.
"""
self.compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[self.view_id]
self.collapse_table_id = constants.LO_COLLAPSE_TABLE_ID_LOOKUP[self.view_id]
Expand All @@ -288,7 +293,8 @@ def unpack_science_data(self, science_values: str):


def get_params(packet) -> tuple[int, int, int, int]:
"""Return the four 'main' parameters used for l1a processing.
"""
Return the four 'main' parameters used for l1a processing.
The combination of these parameters largely determines what steps/values
are used to create CoDICE L1a data products and what steps are needed in
Expand All @@ -297,23 +303,23 @@ def get_params(packet) -> tuple[int, int, int, int]:
Parameters
----------
packet : space_packet_parser.parser.Packet
A packet for the APID of interest
A packet for the APID of interest.
Returns
-------
table_id : int
A unique ID assigned to a specific table configuration. This field is
used to link the overall acquisition and processing settings to a
specific table configuration
specific table configuration.
plan_id : int
The plan table that was in use. In conjunction with ``plan_step``,
describes which counters are included in the data packet
describes which counters are included in the data packet.
plan_step : int
Plan step that was active when the data was acquired and processed. In
conjunction with ``plan_id``, describes which counters are included
in the data packet
in the data packet.
view_id : int
Provides information about how data was collapsed and/or compressed
Provides information about how data was collapsed and/or compressed.
"""
table_id = packet.data["TABLE_ID"].raw_value
plan_id = packet.data["PLAN_ID"].raw_value
Expand All @@ -324,19 +330,20 @@ def get_params(packet) -> tuple[int, int, int, int]:


def process_codice_l1a(file_path: Path | str, data_version: str) -> xr.Dataset:
"""Process CoDICE l0 data to create l1a data products.
"""
Will process CoDICE l0 data to create l1a data products.
Parameters
----------
file_path : pathlib.Path | str
Path to the CoDICE L0 file to process
Path to the CoDICE L0 file to process.
data_version : str
Version of the data product being created
Version of the data product being created.
Returns
-------
dataset : xarray.Dataset
``xarray`` dataset containing the science data and supporting metadata
The ``xarray`` dataset containing the science data and supporting metadata.
"""
# TODO: Once simulated data for codice-hi is acquired, there shouldn't be a
# need to split the processing based on the file_path, so this function can
Expand Down
49 changes: 26 additions & 23 deletions imap_processing/codice/codice_l1b.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
"""Perform CoDICE l1b processing.
"""
Perform CoDICE l1b processing.
This module processes CoDICE l1a files and creates L1a data products.
Use
---
from imap_processing.codice.codice_l0 import decom_packets
from imap_processing.codice.codice_l1b import process_codice_l1b
dataset = process_codice_l1b(l1a_file)
Notes
-----
from imap_processing.codice.codice_l0 import decom_packets
from imap_processing.codice.codice_l1b import process_codice_l1b
dataset = process_codice_l1b(l1a_file)
"""

import logging
Expand All @@ -25,21 +25,22 @@


def create_hskp_dataset(l1a_dataset, cdf_attrs) -> xr.Dataset:
"""Create an ``xarray`` dataset for the housekeeping data.
"""
Create an ``xarray`` dataset for the housekeeping data.
The dataset can then be written to a CDF file.
Parameters
----------
l1a_dataset : xr.Dataset
The L1a dataset that is being processed
The L1a dataset that is being processed.
cdf_attrs : ImapCdfAttributes
The CDF attributes for the dataset
The CDF attributes for the dataset.
Returns
-------
xarray.Dataset
``xarray`` dataset containing the science data and supporting metadata
l1b_dataset : xarray.Dataset
The ``xarray`` dataset containing the science data and supporting metadata.
"""
epoch = l1a_dataset.coords["epoch"]
l1b_dataset = xr.Dataset(
Expand Down Expand Up @@ -82,24 +83,25 @@ def create_hskp_dataset(l1a_dataset, cdf_attrs) -> xr.Dataset:
def create_science_dataset(
l1a_dataset: xr.Dataset, cdf_attrs, dataset_name
) -> xr.Dataset:
"""Create an ``xarray`` dataset for the science data.
"""
Create an ``xarray`` dataset for the science data.
The dataset can then be written to a CDF file.
Parameters
----------
l1a_dataset : xr.Dataset
The L1a dataset that is being processed
The L1a dataset that is being processed.
cdf_attrs : ImapCdfAttributes
The CDF attributes for the dataset
The CDF attributes for the dataset.
dataset_name : str
The name that is used to construct the data variable name and reference
the CDF attributes (e.g. ``imap_codice_l1b_hi_omni``)
the CDF attributes (e.g. ``imap_codice_l1b_hi_omni``).
Returns
-------
xarray.Dataset
``xarray`` dataset containing the science data and supporting metadata
l1b_dataset : xarray.Dataset
The ``xarray`` dataset containing the science data and supporting metadata.
"""
# Retrieve the coordinates from the l1a dataset
epoch = l1a_dataset.coords["epoch"]
Expand Down Expand Up @@ -153,19 +155,20 @@ def create_science_dataset(


def process_codice_l1b(file_path: Path, data_version: str) -> xr.Dataset:
"""Process CoDICE l1a data to create l1b data products.
"""
Will process CoDICE l1a data to create l1b data products.
Parameters
----------
file_path : pathlib.Path | str
Path to the CoDICE L1a file to process
Path to the CoDICE L1a file to process.
data_version : str
Version of the data product being created
Version of the data product being created.
Returns
-------
dataset : xarray.Dataset
``xarray`` dataset containing the science data and supporting metadata
l1b_dataset : xarray.Dataset
The``xarray`` dataset containing the science data and supporting metadata.
"""
logger.info(f"\nProcessing {file_path.name} file.")

Expand Down
7 changes: 4 additions & 3 deletions imap_processing/codice/constants.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,11 @@
"""Contains constants variables to support CoDICE processing.
"""
Contains constants variables to support CoDICE processing.
The ``plan_id``, ``plan_step``, and ``view_id`` mentioned in this module are
derived from the packet data.
Acronyms
--------
Notes
-----
SW = SunWard
NSW = Non-SunWard
PUI = PickUp Ion
Expand Down
Loading

0 comments on commit 2437799

Please sign in to comment.