Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[REF, DOC] Use logging to generate reports #424

Merged
merged 5 commits into from
Nov 8, 2019
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions tedana/combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from tedana.due import due, Doi

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


@due.dcite(Doi('10.1002/(SICI)1522-2594(199907)42:1<87::AID-MRM13>3.0.CO;2-O'),
Expand All @@ -31,6 +33,14 @@ def _combine_t2s(data, tes, ft2s):
combined : (M x T) :obj:`numpy.ndarray`
Data combined across echoes according to T2* estimates.
"""
RepLGR.info("Multi-echo data were then optimally combined using the "
"T2* combination method (Posse et al., 1999).")
RefLGR.info("Posse, S., Wiese, S., Gembris, D., Mathiak, K., Kessler, "
"C., Grosse‐Ruyken, M. L., ... & Kiselev, V. G. (1999). "
"Enhancement of BOLD‐contrast sensitivity by single‐shot "
"multi‐echo functional MR imaging. Magnetic Resonance in "
"Medicine: An Official Journal of the International Society "
"for Magnetic Resonance in Medicine, 42(1), 87-97.")
n_vols = data.shape[-1]
alpha = tes * np.exp(-tes / ft2s)
if alpha.ndim == 2:
Expand Down Expand Up @@ -70,6 +80,16 @@ def _combine_paid(data, tes):
combined : (M x T) :obj:`numpy.ndarray`
Data combined across echoes according to SNR/signal.
"""
RepLGR.info("Multi-echo data were then optimally combined using the "
"parallel-acquired inhomogeneity desensitized (PAID) "
"combination method.")
RefLGR.info("Poser, B. A., Versluis, M. J., Hoogduin, J. M., & Norris, "
"D. G. (2006). BOLD contrast sensitivity enhancement and "
"artifact reduction with multiecho EPI: parallel‐acquired "
"inhomogeneity‐desensitized fMRI. "
"Magnetic Resonance in Medicine: An Official Journal of the "
"International Society for Magnetic Resonance in Medicine, "
"55(6), 1227-1235.")
n_vols = data.shape[-1]
alpha = data.mean(axis=-1) * tes
alpha = np.tile(alpha[:, :, np.newaxis], (1, 1, n_vols))
Expand Down
8 changes: 8 additions & 0 deletions tedana/decay.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from tedana import utils

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def mono_exp(tes, s0, t2star):
Expand Down Expand Up @@ -84,6 +86,12 @@ def fit_decay(data, tes, mask, masksum, fittype):
in :math:`S_0` map with 0.
3. Generate limited :math:`T_2^*` and :math:`S_0` maps by doing something.
"""
RepLGR.info("A monoexponential model was fit to the data at each voxel "
"using log-linear regression in order to estimate T2* and S0 "
"maps. For each voxel, the value from the adaptive mask was "
"used to determine which echoes would be used to estimate T2* "
"and S0.")

if data.shape[1] != len(tes):
raise ValueError('Second dimension of data ({0}) does not match number '
'of echoes provided (tes; {1})'.format(data.shape[1], len(tes)))
Expand Down
2 changes: 2 additions & 0 deletions tedana/decomposition/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@
from scipy import stats

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def eimask(dd, ees=None):
Expand Down
5 changes: 4 additions & 1 deletion tedana/decomposition/ica.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from sklearn.decomposition import FastICA

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def tedica(data, n_components, fixed_seed, maxit=500, maxrestart=10):
Expand Down Expand Up @@ -41,9 +43,10 @@ def tedica(data, n_components, fixed_seed, maxit=500, maxrestart=10):
-----
Uses `sklearn` implementation of FastICA for decomposition
"""

warnings.filterwarnings(action='ignore', module='scipy',
message='^internal gelsd')
RepLGR.info("Independent component analysis was then used to "
"decompose the dimensionally reduced dataset.")

if fixed_seed == -1:
fixed_seed = np.random.randint(low=1, high=1000)
Expand Down
36 changes: 36 additions & 0 deletions tedana/decomposition/pca.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from tedana.due import due, BibTeX

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


@due.dcite(BibTeX(
Expand Down Expand Up @@ -195,6 +197,40 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG,
'tree.')
algorithm = 'kundu'

if algorithm == 'mle':
alg_str = "using MLE dimensionality estimation (Minka, 2001)"
RefLGR.info("Minka, T. P. (2001). Automatic choice of dimensionality "
"for PCA. In Advances in neural information processing "
"systems (pp. 598-604).")
elif algorithm == 'kundu':
alg_str = ("followed by the Kundu component selection decision "
"tree (Kundu et al., 2013)")
RefLGR.info("Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192.")
elif algorithm == 'kundu-stabilize':
alg_str = ("followed by the 'stabilized' Kundu component "
"selection decision tree (Kundu et al., 2013)")
RefLGR.info("Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192.")

if source_tes == -1:
dat_str = "the optimally combined data"
elif source_tes == 0:
dat_str = "the z-concatenated multi-echo data"
else:
dat_str = "a z-concatenated subset of echoes from the input data"

RepLGR.info("Principal component analysis {0} was applied to "
"{1} for dimensionality reduction.".format(alg_str, dat_str))

n_samp, n_echos, n_vols = data_cat.shape
source_tes = np.array([int(ee) for ee in str(source_tes).split(',')])

Expand Down
9 changes: 9 additions & 0 deletions tedana/gscontrol.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from tedana import io, utils

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def gscontrol_raw(catd, optcom, n_echos, ref_img, dtrank=4):
Expand Down Expand Up @@ -45,6 +47,8 @@ def gscontrol_raw(catd, optcom, n_echos, ref_img, dtrank=4):
Input `optcom` with global signal removed from time series
"""
LGR.info('Applying amplitude-based T1 equilibration correction')
RepLGR.info("Global signal regression was applied to the multi-echo "
"and optimally combined datasets.")
if catd.shape[0] != optcom.shape[0]:
raise ValueError('First dimensions of catd ({0}) and optcom ({1}) do not '
'match'.format(catd.shape[0], optcom.shape[0]))
Expand Down Expand Up @@ -134,6 +138,11 @@ def gscontrol_mmix(optcom_ts, mmix, mask, comptable, ref_img):
meica_mix_T1c.1D T1 global signal-corrected mixing matrix
====================== =================================================
"""
LGR.info('Performing T1c global signal regression to remove spatially '
'diffuse noise')
RepLGR.info("T1c global signal regression was then applied to the "
"data in order to remove spatially diffuse noise.")

all_comps = comptable.index.values
acc = comptable[comptable.classification == 'accepted'].index.values
ign = comptable[comptable.classification == 'ignored'].index.values
Expand Down
2 changes: 2 additions & 0 deletions tedana/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
from tedana.stats import computefeats2, get_coeffs

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def split_ts(data, mmix, mask, comptable):
Expand Down
6 changes: 6 additions & 0 deletions tedana/metrics/kundu_fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@


LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')

F_MAX = 500
Z_MAX = 8
Expand Down Expand Up @@ -87,6 +89,10 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img,
'({0}) does not match number of volumes in '
't2s ({1})'.format(catd.shape[2], t2s.shape[1]))

RepLGR.info("A series of TE-dependence metrics were calculated for "
"each ICA component, including Kappa, Rho, and variance "
"explained.")

# mask everything we can
tsoc = tsoc[mask, :]
catd = catd[mask, ...]
Expand Down
2 changes: 2 additions & 0 deletions tedana/selection/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
import numpy as np

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def clean_dataframe(comptable):
Expand Down
15 changes: 15 additions & 0 deletions tedana/selection/tedica.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from tedana.selection._utils import getelbow, clean_dataframe

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def manual_selection(comptable, acc=None, rej=None):
Expand All @@ -30,6 +32,9 @@ def manual_selection(comptable, acc=None, rej=None):
Component metric table with classification.
"""
LGR.info('Performing manual ICA component selection')
RepLGR.info("Next, components were manually classified as "
"BOLD (TE-dependent), non-BOLD (TE-independent), or "
"uncertain (low-variance).")
if ('classification' in comptable.columns and
'original_classification' not in comptable.columns):
comptable['original_classification'] = comptable['classification']
Expand Down Expand Up @@ -110,6 +115,16 @@ def kundu_selection_v2(comptable, n_echos, n_vols):
https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e
"""
LGR.info('Performing ICA component selection with Kundu decision tree v2.5')
RepLGR.info("Next, component selection was performed to identify "
"BOLD (TE-dependent), non-BOLD (TE-independent), and "
"uncertain (low-variance) components using the Kundu "
"decision tree (v2.5; Kundu et al., 2013).")
RefLGR.info("Kundu, P., Brenowitz, N. D., Voon, V., Worbe, Y., "
"Vértes, P. E., Inati, S. J., ... & Bullmore, E. T. "
"(2013). Integrated strategy for improving functional "
"connectivity mapping using multiecho fMRI. Proceedings "
"of the National Academy of Sciences, 110(40), "
"16187-16192.")
comptable['classification'] = 'accepted'
comptable['rationale'] = ''

Expand Down
2 changes: 2 additions & 0 deletions tedana/selection/tedpca.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from tedana.selection._utils import (getelbow_cons, getelbow, clean_dataframe)

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')

F_MAX = 500

Expand Down
2 changes: 2 additions & 0 deletions tedana/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
from tedana import utils

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def getfbounds(n_echos):
Expand Down
5 changes: 5 additions & 0 deletions tedana/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from tedana.due import due, BibTeX

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def load_image(data):
Expand Down Expand Up @@ -63,6 +65,9 @@ def make_adaptive_mask(data, mask=None, getsum=False):
Valued array indicating the number of echos with sufficient signal in a
given voxel. Only returned if `getsum = True`
"""
RepLGR.info("An adaptive mask was then generated, in which each voxel's "
"value reflects the number of echoes with 'good' data.")

# take temporal mean of echos and extract non-zero values in first echo
echo_means = data.mean(axis=-1) # temporal mean of echos
first_echo = echo_means[echo_means[:, 0] != 0, 0]
Expand Down
2 changes: 2 additions & 0 deletions tedana/viz.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
LGR = logging.getLogger(__name__)
MPL_LGR = logging.getLogger('matplotlib')
MPL_LGR.setLevel(logging.WARNING)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def trim_edge_zeros(arr):
Expand Down
14 changes: 14 additions & 0 deletions tedana/workflows/parser_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
Functions for parsers.
"""
import os.path as op
import logging


def is_valid_file(parser, arg):
Expand All @@ -12,3 +13,16 @@ def is_valid_file(parser, arg):
parser.error('The file {0} does not exist!'.format(arg))

return arg


class ContextFilter(logging.Filter):
"""
A filter to allow specific logging handlers to ignore specific loggers.
We use this to prevent our report-generation and reference-compiling
loggers from printing to the general log file or to stdout.
"""
NAMES = ['REPORT', 'REFERENCES']

def filter(self, record):
if not any([n in record.name for n in self.NAMES]):
return True
2 changes: 2 additions & 0 deletions tedana/workflows/t2smap.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
from tedana.workflows.parser_utils import is_valid_file

LGR = logging.getLogger(__name__)
RepLGR = logging.getLogger('REPORT')
RefLGR = logging.getLogger('REFERENCES')


def _get_parser():
Expand Down
Loading