diff --git a/tedana/combine.py b/tedana/combine.py index 020c533da..b151173b9 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -97,9 +97,12 @@ def _combine_paid(data, tes): return combined -def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): +def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True): """ - Optimally combine BOLD data across TEs. + Optimally combine BOLD data across TEs, using only those echos with reliable signal + across at least three echos. If the number of echos providing reliable signal is greater + than three but less than the total number of collected echos, we assume that later + echos do not provided meaningful signal. Parameters ---------- @@ -107,8 +110,8 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): Concatenated BOLD data. tes : (E,) :obj:`numpy.ndarray` Array of TEs, in seconds. - mask : (S,) :obj:`numpy.ndarray` - Brain mask in 3D array. + adaptive_mask : (S,) :obj:`numpy.ndarray` + Adaptive mask of the data indicating the number of echos with signal at each voxel t2s : (S [x T]) :obj:`numpy.ndarray` or None, optional Estimated T2* values. Only required if combmode = 't2s'. Default is None. @@ -142,12 +145,12 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): 'dimension of input data: {0} != ' '{1}'.format(len(tes), data.shape[1])) - if mask.ndim != 1: + if adaptive_mask.ndim != 1: raise ValueError('Mask is not 1D') - elif mask.shape[0] != data.shape[0]: + elif adaptive_mask.shape[0] != data.shape[0]: raise ValueError('Mask and data do not have same number of ' - 'voxels/samples: {0} != {1}'.format(mask.shape[0], - data.shape[0])) + 'voxels/samples: {0} != {1}'.format( + adaptive_mask.shape[0], data.shape[0])) if combmode not in ['t2s', 'paid']: raise ValueError("Argument 'combmode' must be either 't2s' or 'paid'") @@ -158,23 +161,32 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): LGR.warning("Argument 't2s' is not required if 'combmode' is 'paid'. " "'t2s' array will not be used.") - data = data[mask, :, :] # mask out empty voxels/samples - tes = np.array(tes)[np.newaxis, ...] # (1 x E) array_like - if combmode == 'paid': - LGR.info('Optimally combining data with parallel-acquired inhomogeneity ' - 'desensitized (PAID) method') - combined = _combine_paid(data, tes) + LGR.info('Optimally combining data with parallel-acquired ' + 'inhomogeneity desensitized (PAID) method') else: if t2s.ndim == 1: msg = 'Optimally combining data with voxel-wise T2* estimates' else: msg = ('Optimally combining data with voxel- and volume-wise T2* ' 'estimates') - t2s = t2s[mask, ..., np.newaxis] # mask out empty voxels/samples - LGR.info(msg) - combined = _combine_t2s(data, tes, t2s) + + mask = adaptive_mask >= 3 + data = data[mask, :, :] # mask out unstable voxels/samples + tes = np.array(tes)[np.newaxis, ...] # (1 x E) array_like + combined = np.zeros((data.shape[0], data.shape[2])) + for echo in np.unique(adaptive_mask[mask]): + echo_idx = adaptive_mask[mask] == echo + + if combmode == 'paid': + combined[echo_idx, :] = _combine_paid(data[echo_idx, :echo, :], + tes[:echo]) + else: + t2s_ = t2s[mask, ..., np.newaxis] # mask out empty voxels/samples + + combined[echo_idx, :] = _combine_t2s( + data[echo_idx, :echo, :], tes[:, :echo], t2s_[echo_idx, ...]) combined = unmask(combined, mask) return combined diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 58957908e..07e6a253d 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -46,7 +46,7 @@ def low_mem_pca(data): return u, s, v -def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, +def tedpca(data_cat, data_oc, combmode, mask, adaptive_mask, t2sG, ref_img, tes, algorithm='mdl', kdaw=10., rdaw=1., out_dir='.', verbose=False, low_mem=False): """ @@ -65,8 +65,8 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, Poser 2006 mask : (S,) array_like Boolean mask array - t2s : (S,) array_like - Map of voxel-wise T2* estimates. + adaptive_mask : (S,) array_like + Adaptive mask of the data indicating the number of echos with signal at each voxel t2sG : (S,) array_like Map of voxel-wise T2* estimates. ref_img : :obj:`str` or img_like @@ -218,18 +218,10 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, # Normalize each component's time series vTmixN = stats.zscore(comp_ts, axis=0) - comptable, _, _, _ = metrics.dependence_metrics(data_cat, - data_oc, - comp_ts, - t2s, - tes, - ref_img, - reindex=False, - mmixN=vTmixN, - algorithm=None, - label='mepca_', - out_dir=out_dir, - verbose=verbose) + comptable, _, _, _ = metrics.dependence_metrics( + data_cat, data_oc, comp_ts, adaptive_mask, tes, ref_img, + reindex=False, mmixN=vTmixN, algorithm=None, + label='mepca_', out_dir=out_dir, verbose=verbose) # varex_norm from PCA overrides varex_norm from dependence_metrics, # but we retain the original diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index d9c27c2ef..c8414e1c8 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -20,7 +20,7 @@ Z_MAX = 8 -def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, +def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, reindex=False, mmixN=None, algorithm=None, label=None, out_dir='.', verbose=False): """ @@ -35,8 +35,9 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, mmix : (T x C) array_like Mixing matrix for converting input data to component space, where `C` is components and `T` is the same as in `catd` - t2s : (S [x T]) array_like - Limited T2* map or timeseries. + adaptive_mask : (S) array_like + Adaptive mask, where each voxel's value is the number of echoes with + "good signal". tes : list List of echo times associated with `catd`, in milliseconds ref_img : str or img_like @@ -69,13 +70,14 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, mmix_corrected : :obj:`numpy.ndarray` Mixing matrix after sign correction and resorting (if reindex is True). """ - # Use t2s as mask - mask = t2s != 0 - if not (catd.shape[0] == t2s.shape[0] == mask.shape[0] == tsoc.shape[0]): + # Use adaptive_mask as mask + mask = adaptive_mask >= 3 + + if not (catd.shape[0] == adaptive_mask.shape[0] == tsoc.shape[0]): raise ValueError('First dimensions (number of samples) of catd ({0}), ' - 'tsoc ({1}), and t2s ({2}) do not ' + 'tsoc ({1}), and adaptive_mask ({2}) do not ' 'match'.format(catd.shape[0], tsoc.shape[0], - t2s.shape[0])) + adaptive_mask.shape[0])) elif catd.shape[1] != len(tes): raise ValueError('Second dimension of catd ({0}) does not match ' 'number of echoes provided (tes; ' @@ -83,12 +85,8 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, elif not (catd.shape[2] == tsoc.shape[1] == mmix.shape[0]): raise ValueError('Number of volumes in catd ({0}), ' 'tsoc ({1}), and mmix ({2}) do not ' - 'match.'.format(catd.shape[2], tsoc.shape[1], mmix.shape[0])) - elif t2s.ndim == 2: - if catd.shape[2] != t2s.shape[1]: - raise ValueError('Number of volumes in catd ' - '({0}) does not match number of volumes in ' - 't2s ({1})'.format(catd.shape[2], t2s.shape[1])) + 'match.'.format(catd.shape[2], tsoc.shape[1], + mmix.shape[0])) RepLGR.info("A series of TE-dependence metrics were calculated for " "each component, including Kappa, Rho, and variance " @@ -97,7 +95,6 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, # mask everything we can tsoc = tsoc[mask, :] catd = catd[mask, ...] - t2s = t2s[mask] # demean optimal combination tsoc_dm = tsoc - tsoc.mean(axis=-1, keepdims=True) @@ -126,16 +123,17 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, # compute Betas and means over TEs for TE-dependence analysis betas = get_coeffs(utils.unmask(catd, mask), mmix_corrected, - np.repeat(mask[:, np.newaxis], len(tes), axis=1)) + np.repeat(mask[:, np.newaxis], len(tes), axis=1), + add_const=True) betas = betas[mask, ...] n_voxels, n_echos, n_components = betas.shape mu = catd.mean(axis=-1, dtype=float) tes = np.reshape(tes, (n_echos, 1)) fmin, _, _ = getfbounds(n_echos) - # set up Xmats - X1 = mu.T # Model 1 - X2 = np.tile(tes, (1, n_voxels)) * mu.T / t2s.T # Model 2 + # set up design matrices + X1 = mu.T # Model 1: TE-independence model + X2 = np.tile(tes, (1, n_voxels)) * mu.T # Model 2: TE-dependence model # tables for component selection kappas = np.zeros([n_components]) @@ -145,8 +143,9 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, Z_maps = np.zeros([n_voxels, n_components]) F_R2_maps = np.zeros([n_voxels, n_components]) F_S0_maps = np.zeros([n_voxels, n_components]) - pred_R2_maps = np.zeros([n_voxels, n_echos, n_components]) - pred_S0_maps = np.zeros([n_voxels, n_echos, n_components]) + if verbose: + pred_R2_maps = np.zeros([n_voxels, n_echos, n_components]) + pred_S0_maps = np.zeros([n_voxels, n_echos, n_components]) LGR.info('Fitting TE- and S0-dependent models to components') for i_comp in range(n_components): @@ -156,24 +155,32 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm - # S0 Model - # (S,) model coefficient map - coeffs_S0 = (comp_betas * X1).sum(axis=0) / (X1**2).sum(axis=0) - pred_S0 = X1 * np.tile(coeffs_S0, (n_echos, 1)) - pred_S0_maps[:, :, i_comp] = pred_S0.T - SSE_S0 = (comp_betas - pred_S0)**2 - SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map - F_S0 = (alpha - SSE_S0) * (n_echos - 1) / (SSE_S0) - F_S0_maps[:, i_comp] = F_S0 - - # R2 Model - coeffs_R2 = (comp_betas * X2).sum(axis=0) / (X2**2).sum(axis=0) - pred_R2 = X2 * np.tile(coeffs_R2, (n_echos, 1)) - pred_R2_maps[:, :, i_comp] = pred_R2.T - SSE_R2 = (comp_betas - pred_R2)**2 - SSE_R2 = SSE_R2.sum(axis=0) - F_R2 = (alpha - SSE_R2) * (n_echos - 1) / (SSE_R2) - F_R2_maps[:, i_comp] = F_R2 + for j_echo in np.unique(adaptive_mask[adaptive_mask >= 3]): + mask_idx = adaptive_mask == j_echo + alpha = (np.abs(comp_betas[:j_echo])**2).sum(axis=0) + + # S0 Model + # (S,) model coefficient map + coeffs_S0 = (comp_betas[:j_echo] * X1[:j_echo, :]).sum(axis=0) /\ + (X1[:j_echo, :]**2).sum(axis=0) + pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) + SSE_S0 = (comp_betas[:j_echo] - pred_S0)**2 + SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map + F_S0 = (alpha - SSE_S0) * (j_echo - 1) / (SSE_S0) + F_S0_maps[mask_idx[mask], i_comp] = F_S0[mask_idx[mask]] + + # R2 Model + coeffs_R2 = (comp_betas[:j_echo] * X2[:j_echo, :]).sum(axis=0) /\ + (X2[:j_echo, :]**2).sum(axis=0) + pred_R2 = X2[:j_echo] * np.tile(coeffs_R2, (j_echo, 1)) + SSE_R2 = (comp_betas[:j_echo] - pred_R2)**2 + SSE_R2 = SSE_R2.sum(axis=0) + F_R2 = (alpha - SSE_R2) * (j_echo - 1) / (SSE_R2) + F_R2_maps[mask_idx[mask], i_comp] = F_R2[mask_idx[mask]] + + if verbose: + pred_S0_maps[mask_idx[mask], :j_echo, i_comp] = pred_S0.T[mask_idx[mask], :] + pred_R2_maps[mask_idx[mask], :j_echo, i_comp] = pred_R2.T[mask_idx[mask], :] # compute weights as Z-values wtsZ = (WTS[:, i_comp] - WTS[:, i_comp].mean()) / WTS[:, i_comp].std() @@ -199,12 +206,15 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, comptable = comptable[sort_idx, :] mmix_corrected = mmix_corrected[:, sort_idx] betas = betas[..., sort_idx] - pred_R2_maps = pred_R2_maps[:, :, sort_idx] - pred_S0_maps = pred_S0_maps[:, :, sort_idx] F_R2_maps = F_R2_maps[:, sort_idx] F_S0_maps = F_S0_maps[:, sort_idx] Z_maps = Z_maps[:, sort_idx] tsoc_Babs = tsoc_Babs[:, sort_idx] + + if verbose: + pred_R2_maps = pred_R2_maps[:, :, sort_idx] + pred_S0_maps = pred_S0_maps[:, :, sort_idx] + if algorithm == 'kundu_v3': WTS = WTS[:, sort_idx] PSC = PSC[:, sort_idx] @@ -226,7 +236,7 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, io.filewrite(utils.unmask(Z_maps ** 2., mask), op.join(out_dir, '{0}metric_weights.nii'.format(label)), ref_img) - del pred_R2_maps, pred_S0_maps + del pred_R2_maps, pred_S0_maps comptable = pd.DataFrame(comptable, columns=['kappa', 'rho', diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index 533661bba..f4748f293 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -72,10 +72,6 @@ figures/comp_064.png figures/comp_065.png figures/comp_066.png figures/comp_067.png -figures/comp_068.png -figures/comp_069.png -figures/comp_070.png -figures/comp_071.png dn_ts_OC.nii.gz feats_OC2.nii.gz figures diff --git a/tedana/tests/test_model_fit_dependence_metrics.py b/tedana/tests/test_model_fit_dependence_metrics.py index e6dd2dacd..f243bacc3 100644 --- a/tedana/tests/test_model_fit_dependence_metrics.py +++ b/tedana/tests/test_model_fit_dependence_metrics.py @@ -17,58 +17,49 @@ def test_break_dependence_metrics(): catd = np.empty((n_samples, n_echos, n_vols)) tsoc = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) - t2s = np.empty((n_samples, n_vols)) + adaptive_mask = np.empty((n_samples)) tes = np.empty((n_echos)) ref_img = '' # Shape of catd is wrong catd = np.empty((n_samples + 1, n_echos, n_vols)) with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') - # Shape of t2s is wrong + # Shape of adaptive_mask is wrong catd = np.empty((n_samples, n_echos, n_vols)) - t2s = np.empty((n_samples + 1, n_vols)) + adaptive_mask = np.empty((n_samples + 1)) with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') # Shape of tsoc is wrong - t2s = np.empty((n_samples, n_vols)) + adaptive_mask = np.empty((n_samples)) tsoc = np.empty((n_samples + 1, n_vols)) with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') # Shape of catd is wrong catd = np.empty((n_samples, n_echos + 1, n_vols)) tsoc = np.empty((n_samples, n_vols)) with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') # Shape of catd is wrong catd = np.empty((n_samples, n_echos, n_vols + 1)) with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') - - # Shape of t2s is wrong - catd = np.empty((n_samples, n_echos, n_vols)) - t2s = np.empty((n_samples, n_vols + 1)) - with pytest.raises(ValueError): - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, - algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 3e660982e..9651b2545 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -86,9 +86,9 @@ def test_make_adaptive_mask(): assert np.allclose(mask, utils.make_adaptive_mask(data)) # shapes are all the same assert mask.shape == masksum.shape == (64350,) - assert np.allclose(mask, masksum.astype(bool)) + assert np.allclose(mask, (masksum >= 3).astype(bool)) # mask has correct # of entries - assert mask.sum() == 50786 + assert mask.sum() == 41749 # masksum has correct values vals, counts = np.unique(masksum, return_counts=True) assert np.allclose(vals, np.array([0, 1, 2, 3])) @@ -99,7 +99,7 @@ def test_make_adaptive_mask(): mask, masksum = utils.make_adaptive_mask(data, mask=pjoin(datadir, 'mask.nii.gz'), getsum=True) - assert np.allclose(mask, masksum.astype(bool)) + assert np.allclose(mask, (masksum >= 3).astype(bool)) # SMOKE TESTS diff --git a/tedana/utils.py b/tedana/utils.py index d3954c4bb..b6f5f4f8c 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -91,15 +91,16 @@ def make_adaptive_mask(data, mask=None, getsum=False): if mask is None: # make it a boolean mask to (where we have at least 1 echo with good signal) - mask = masksum.astype(bool) + mask = (masksum >= 3).astype(bool) else: # if the user has supplied a binary mask mask = load_image(mask).astype(bool) masksum = masksum * mask + mask = (masksum >= 3).astype(bool) # reduce mask based on masksum # TODO: Use visual report to make checking the reduced mask easier - if np.any(masksum[mask] == 0): - n_bad_voxels = np.sum(masksum[mask] == 0) + if np.any(masksum[mask] < 3): + n_bad_voxels = np.sum(masksum[mask] < 3) LGR.warning('{0} voxels in user-defined mask do not have good ' 'signal. Removing voxels from mask.'.format(n_bad_voxels)) mask = masksum.astype(bool) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index 4696f0a98..562f3d3aa 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -481,7 +481,7 @@ def tedana_workflow(data, tes, out_dir='.', mask=None, io.filewrite(s0_full, op.join(out_dir, 's0vG.nii'), ref_img) # optimally combine data - data_oc = combine.make_optcom(catd, tes, mask, t2s=t2s_full, combmode=combmode) + data_oc = combine.make_optcom(catd, tes, masksum, t2s=t2s_full, combmode=combmode) # regress out global signal unless explicitly not desired if 'gsr' in gscontrol: @@ -491,7 +491,7 @@ def tedana_workflow(data, tes, out_dir='.', mask=None, if mixm is None: # Identify and remove thermal noise from data dd, n_components = decomposition.tedpca(catd, data_oc, combmode, mask, - t2s_limited, t2s_full, ref_img, + masksum, t2s_full, ref_img, tes=tes, algorithm=tedpca, kdaw=10., rdaw=1., out_dir=out_dir, @@ -509,7 +509,7 @@ def tedana_workflow(data, tes, out_dir='.', mask=None, # generated from dimensionally reduced data using full data (i.e., data # with thermal noise) comptable, metric_maps, betas, mmix = metrics.dependence_metrics( - catd, data_oc, mmix_orig, t2s_limited, tes, + catd, data_oc, mmix_orig, masksum, tes, ref_img, reindex=True, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) comp_names = [io.add_decomp_prefix(comp, prefix='ica', max_value=comptable.index.max()) @@ -529,7 +529,7 @@ def tedana_workflow(data, tes, out_dir='.', mask=None, if ctab is None: comptable, metric_maps, betas, mmix = metrics.dependence_metrics( - catd, data_oc, mmix_orig, t2s_limited, tes, + catd, data_oc, mmix_orig, masksum, tes, ref_img, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) comptable = metrics.kundu_metrics(comptable, metric_maps)