Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

MRG: Add support for NIRSport2 devices #9401

Merged
merged 13 commits into from
May 21, 2021
2 changes: 1 addition & 1 deletion doc/changes/latest.inc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Enhancements

- New function :func:`mne.chpi.get_chpi_info` to retrieve basic information about the cHPI system used when recording MEG data (:gh:`9369` by `Richard Höchenberger`_)

- Add support for NIRSport devices to `mne.io.read_raw_nirx` (:gh:`9348` **by new contributor** |David Julien|_, **new contributor** |Romain Derollepot|_, `Robert Luke`_, and `Eric Larson`_)
- Add support for NIRSport and NIRSport2 devices to `mne.io.read_raw_nirx` (:gh:`9348` and :gh:`9401` **by new contributor** |David Julien|_, **new contributor** |Romain Derollepot|_, `Robert Luke`_, and `Eric Larson`_)

- New function :func:`mne.label.find_pos_in_annot` to get atlas label for MRI coordinates. (:gh:`9376` by **by new contributor** |Marian Dovgialo|_)

Expand Down
4 changes: 2 additions & 2 deletions mne/datasets/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True,
path = _get_path(path, key, name)
# To update the testing or misc dataset, push commits, then make a new
# release on GitHub. Then update the "releases" variable:
releases = dict(testing='0.119', misc='0.9')
releases = dict(testing='0.121', misc='0.9')
# And also update the "md5_hashes['testing']" variable below.
# To update any other dataset, update the data archive itself (upload
# an updated version) and update the md5 hash.
Expand Down Expand Up @@ -349,7 +349,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True,
sample='12b75d1cb7df9dfb4ad73ed82f61094f',
somato='32fd2f6c8c7eb0784a1de6435273c48b',
spm='9f43f67150e3b694b523a21eb929ea75',
testing='2e7c60a055228928bd39f68892b3d488',
testing='570186827dfdc1d454675dab552441c6',
multimodal='26ec847ae9ab80f58f204d09e2c08367',
fnirs_motor='c4935d19ddab35422a69f3326a01fef8',
opm='370ad1dcfd5c47e029e692c85358a374',
Expand Down
1 change: 0 additions & 1 deletion mne/io/egi/tests/test_egi.py
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,6 @@ def test_io_egi_evokeds_mff(idx, cond, tmax, signals, bads):
assert evoked_cond.info['nchan'] == 259
assert evoked_cond.info['sfreq'] == 250.0
assert not evoked_cond.info['custom_ref_applied']
assert evoked_cond.info['dig'] is None


@requires_version('mffpy', '0.5.7')
Expand Down
166 changes: 112 additions & 54 deletions mne/io/nirx/nirx.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import re as re
import os.path as op
import datetime as dt
import json

import numpy as np

Expand Down Expand Up @@ -87,10 +88,29 @@ def __init__(self, fname, saturated, preload=False, verbose=None):

fname = _check_fname(fname, 'read', True, 'fname', need_dir=True)

json_config = glob.glob('%s/*%s' % (fname, "config.json"))
if len(json_config):
is_aurora = True
else:
is_aurora = False

if is_aurora:
# NIRSport2 devices using Aurora software
keys = ('hdr', 'config.json', 'description.json',
'wl1', 'wl2', 'probeInfo.mat', 'tri')
else:
# NIRScout devices and NIRSport1 devices
keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',
'config.txt', 'probeInfo.mat')
n_dat = len(glob.glob('%s/*%s' % (fname, 'dat')))
if n_dat != 1:
warn("A single dat file was expected in the specified path, "
f"but got {n_dat}. This may indicate that the file "
"structure has been modified since the measurement "
"was saved.")

# Check if required files exist and store names for later use
files = dict()
keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',
'config.txt', 'probeInfo.mat')
nan_mask = dict()
for key in keys:
files[key] = glob.glob('%s/*%s' % (fname, key))
Expand Down Expand Up @@ -119,11 +139,6 @@ def __init__(self, fname, saturated, preload=False, verbose=None):
fidx = noidx
nan_mask[key] = files[key][0 if noidx == 1 else 1]
files[key] = files[key][fidx]
if len(glob.glob('%s/*%s' % (fname, 'dat'))) != 1:
warn("A single dat file was expected in the specified path, but "
"got %d. This may indicate that the file structure has been "
"modified since the measurement was saved." %
(len(glob.glob('%s/*%s' % (fname, 'dat')))))

# Read number of rows/samples of wavelength data
with _open(files['wl1']) as fid:
Expand All @@ -133,25 +148,40 @@ def __init__(self, fname, saturated, preload=False, verbose=None):
# The header file isn't compliant with the configparser. So all the
# text between comments must be removed before passing to parser
with _open(files['hdr']) as f:
hdr_str = f.read()
hdr_str = re.sub('#.*?#', '', hdr_str, flags=re.DOTALL)
hdr_str_all = f.read()
hdr_str = re.sub('#.*?#', '', hdr_str_all, flags=re.DOTALL)
if is_aurora:
hdr_str = re.sub('(\\[DataStructure].*)', '',
hdr_str, flags=re.DOTALL)
hdr = RawConfigParser()
hdr.read_string(hdr_str)

# Check that the file format version is supported
if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"', '"15.3"']:
raise RuntimeError('MNE does not support this NIRStar version'
' (%s)' % (hdr['GeneralInfo']['NIRStar'],))
if "NIRScout" not in hdr['GeneralInfo']['Device'] \
and "NIRSport" not in hdr['GeneralInfo']['Device']:
warn("Only import of data from NIRScout devices have been "
"thoroughly tested. You are using a %s device. " %
hdr['GeneralInfo']['Device'])
if is_aurora:
# We may need to ease this requirement back
if hdr['GeneralInfo']['Version'] not in ['2021.4.0-34-ge9fdbbc8']:
warn("MNE has not been tested with Aurora version "
f"{hdr['GeneralInfo']['Version']}")
else:
if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"',
'"15.3"']:
raise RuntimeError('MNE does not support this NIRStar version'
' (%s)' % (hdr['GeneralInfo']['NIRStar'],))
if "NIRScout" not in hdr['GeneralInfo']['Device'] \
and "NIRSport" not in hdr['GeneralInfo']['Device']:
warn("Only import of data from NIRScout devices have been "
"thoroughly tested. You are using a %s device. " %
hdr['GeneralInfo']['Device'])

# Parse required header fields

# Extract measurement date and time
datetime_str = hdr['GeneralInfo']['Date'] + hdr['GeneralInfo']['Time']
if is_aurora:
datetime_str = hdr['GeneralInfo']['Date']
else:
datetime_str = hdr['GeneralInfo']['Date'] + \
hdr['GeneralInfo']['Time']

meas_date = None
# Several formats have been observed so we try each in turn
for dt_code in ['"%a, %b %d, %Y""%H:%M:%S.%f"',
Expand All @@ -172,44 +202,65 @@ def __init__(self, fname, saturated, preload=False, verbose=None):
tzinfo=dt.timezone.utc)

# Extract frequencies of light used by machine
fnirs_wavelengths = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters'][
'Wavelengths'])]
if is_aurora:
fnirs_wavelengths = [760, 850]
else:
fnirs_wavelengths = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters'][
'Wavelengths'])]

# Extract source-detectors
sources = np.asarray([int(s) for s in re.findall(r'(\d+)-\d+:\d+',
hdr['DataStructure'][
'S-D-Key'])], int)
detectors = np.asarray([int(s) for s in re.findall(r'\d+-(\d+):\d+',
hdr['DataStructure']
['S-D-Key'])],
int)
if is_aurora:
sources = re.findall(r'(\d+)-\d+', hdr_str_all.split("\n")[-2])
detectors = re.findall(r'\d+-(\d+)', hdr_str_all.split("\n")[-2])
sources = [int(s) + 1 for s in sources]
detectors = [int(d) + 1 for d in detectors]

else:
sources = np.asarray([int(s) for s in
re.findall(r'(\d+)-\d+:\d+',
hdr['DataStructure']
['S-D-Key'])], int)
detectors = np.asarray([int(s) for s in
re.findall(r'\d+-(\d+):\d+',
hdr['DataStructure']
['S-D-Key'])], int)

# Extract sampling rate
samplingrate = float(hdr['ImagingParameters']['SamplingRate'])
if is_aurora:
samplingrate = float(hdr['GeneralInfo']['Sampling rate'])
else:
samplingrate = float(hdr['ImagingParameters']['SamplingRate'])

# Read participant information file
inf = ConfigParser(allow_no_value=True)
inf.read(files['inf'])
inf = inf._sections['Subject Demographics']
if is_aurora:
with open(files['description.json']) as f:
inf = json.load(f)
else:
inf = ConfigParser(allow_no_value=True)
inf.read(files['inf'])
inf = inf._sections['Subject Demographics']

# Store subject information from inf file in mne format
# Note: NIRX also records "Study Type", "Experiment History",
# "Additional Notes", "Contact Information" and this information
# is currently discarded
# NIRStar does not record an id, or handedness by default
subject_info = {}
names = inf['name'].split()
if is_aurora:
names = inf["subject"].split()
else:
names = inf['name'].split()
if len(names) > 0:
subject_info['first_name'] = \
inf['name'].split()[0].replace("\"", "")
names[0].replace("\"", "")
if len(names) > 1:
subject_info['last_name'] = \
inf['name'].split()[-1].replace("\"", "")
names[-1].replace("\"", "")
if len(names) > 2:
subject_info['middle_name'] = \
inf['name'].split()[-2].replace("\"", "")
names[-2].replace("\"", "")
subject_info['sex'] = inf['gender'].replace("\"", "")
# Recode values
if subject_info['sex'] in {'M', 'Male', '1'}:
Expand Down Expand Up @@ -237,10 +288,8 @@ def __init__(self, fname, saturated, preload=False, verbose=None):

# These are all in MNI coordinates, so let's transform them to
# the Neuromag head coordinate frame
mri_head_t, _ = _get_trans('fsaverage', 'mri', 'head')
src_locs = apply_trans(mri_head_t, src_locs)
det_locs = apply_trans(mri_head_t, det_locs)
ch_locs = apply_trans(mri_head_t, ch_locs)
src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head(
'fsaverage', 'mri', 'head', src_locs, det_locs, ch_locs)

# Set up digitization
dig = get_mni_fiducials('fsaverage', verbose=False)
Expand Down Expand Up @@ -268,14 +317,8 @@ def __init__(self, fname, saturated, preload=False, verbose=None):
req_ind = np.concatenate((req_ind, sd_idx[0]))
req_ind = req_ind.astype(int)

# Generate meaningful channel names
def prepend(li, str):
str += '{0}'
li = [str.format(i) for i in li]
return li

snames = prepend(sources[req_ind], 'S')
dnames = prepend(detectors[req_ind], '_D')
snames = [f"S{sources[idx]}" for idx in req_ind]
dnames = [f"_D{detectors[idx]}" for idx in req_ind]
sdnames = [m + str(n) for m, n in zip(snames, dnames)]
sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames]
sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames]
Expand Down Expand Up @@ -366,15 +409,22 @@ def prepend(li, str):
ch_names.extend([self.ch_names[2 * ci:2 * ci + 2]] * len(on))

# Read triggers from event file
if op.isfile(files['hdr'][:-3] + 'evt'):
with _open(files['hdr'][:-3] + 'evt') as fid:
if not is_aurora:
files['tri'] = files['hdr'][:-3] + 'evt'
if op.isfile(files['tri']):
with _open(files['tri']) as fid:
t = [re.findall(r'(\d+)', line) for line in fid]
for t_ in t:
binary_value = ''.join(t_[1:])[::-1]
trigger_frame = float(t_[0])
if is_aurora:
trigger_frame = float(t_[7])
desc = float(t_[8])
else:
binary_value = ''.join(t_[1:])[::-1]
desc = float(int(binary_value, 2))
trigger_frame = float(t_[0])
onset.append(trigger_frame / samplingrate)
duration.append(1.) # No duration info stored in files
description.append(float(int(binary_value, 2)))
description.append(desc)
ch_names.append(list())
annot = Annotations(onset, duration, description, ch_names=ch_names)
self.set_annotations(annot)
Expand Down Expand Up @@ -419,3 +469,11 @@ def _read_csv_rows_cols(fname, start, stop, cols, bounds):
x.shape = (stop - start, -1)
x = x[:, cols]
return x


def _convert_fnirs_to_head(trans, fro, to, src_locs, det_locs, ch_locs):
mri_head_t, _ = _get_trans(trans, fro, to)
src_locs = apply_trans(mri_head_t, src_locs)
det_locs = apply_trans(mri_head_t, det_locs)
ch_locs = apply_trans(mri_head_t, ch_locs)
return src_locs, det_locs, ch_locs, mri_head_t
Loading