From a9068b4f07dc9f13d5cc88b187f2a68f2d4ff9b8 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 18:03:41 -0400 Subject: [PATCH 01/22] Only use good echoes for metric calculation and optimal combination. --- tedana/combine.py | 41 ++++++++------ tedana/decomposition/pca.py | 9 ++- tedana/metrics/kundu_fit.py | 108 ++++++++++++++++++------------------ tedana/workflows/tedana.py | 8 +-- 4 files changed, 87 insertions(+), 79 deletions(-) diff --git a/tedana/combine.py b/tedana/combine.py index 67d87f056..79a36cc6f 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -77,7 +77,7 @@ def _combine_paid(data, tes): return combined -def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): +def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True): """ Optimally combine BOLD data across TEs. @@ -87,8 +87,8 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): Concatenated BOLD data. tes : (E,) :obj:`numpy.ndarray` Array of TEs, in seconds. - mask : (S,) :obj:`numpy.ndarray` - Brain mask in 3D array. + adaptive_mask : (S,) :obj:`numpy.ndarray` + Brain adaptive_mask in 3D array. t2s : (S [x T]) :obj:`numpy.ndarray` or None, optional Estimated T2* values. Only required if combmode = 't2s'. Default is None. @@ -122,12 +122,12 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): 'dimension of input data: {0} != ' '{1}'.format(len(tes), data.shape[1])) - if mask.ndim != 1: + if adaptive_mask.ndim != 1: raise ValueError('Mask is not 1D') - elif mask.shape[0] != data.shape[0]: + elif adaptive_mask.shape[0] != data.shape[0]: raise ValueError('Mask and data do not have same number of ' - 'voxels/samples: {0} != {1}'.format(mask.shape[0], - data.shape[0])) + 'voxels/samples: {0} != {1}'.format( + adaptive_mask.shape[0], data.shape[0])) if combmode not in ['t2s', 'paid']: raise ValueError("Argument 'combmode' must be either 't2s' or 'paid'") @@ -138,23 +138,32 @@ def make_optcom(data, tes, mask, t2s=None, combmode='t2s', verbose=True): LGR.warning("Argument 't2s' is not required if 'combmode' is 'paid'. " "'t2s' array will not be used.") - data = data[mask, :, :] # mask out empty voxels/samples - tes = np.array(tes)[np.newaxis, ...] # (1 x E) array_like - if combmode == 'paid': - LGR.info('Optimally combining data with parallel-acquired inhomogeneity ' - 'desensitized (PAID) method') - combined = _combine_paid(data, tes) + LGR.info('Optimally combining data with parallel-acquired ' + 'inhomogeneity desensitized (PAID) method') else: if t2s.ndim == 1: msg = 'Optimally combining data with voxel-wise T2 estimates' else: msg = ('Optimally combining data with voxel- and volume-wise T2 ' 'estimates') - t2s = t2s[mask, ..., np.newaxis] # mask out empty voxels/samples - LGR.info(msg) - combined = _combine_t2s(data, tes, t2s) + + mask = adaptive_mask >= 3 + data = data[mask, :, :] # mask out empty voxels/samples + tes = np.array(tes)[np.newaxis, ...] # (1 x E) array_like + combined = np.zeros((data.shape[0], data.shape[2])) + for echo in np.unique(adaptive_mask[mask]): + echo_idx = adaptive_mask[mask] == echo + + if combmode == 'paid': + combined[echo_idx, :] = _combine_paid(data[echo_idx, :echo, :], + tes[:echo]) + else: + t2s_ = t2s[mask, ..., np.newaxis] # mask out empty voxels/samples + + combined[echo_idx, :] = _combine_t2s( + data[echo_idx, :echo, :], tes[:, :echo], t2s_[echo_idx, ...]) combined = unmask(combined, mask) return combined diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 399915845..8b6400811 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -83,7 +83,7 @@ def low_mem_pca(data): return u, s, v -def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, +def tedpca(data_cat, data_oc, combmode, mask, adaptive_mask, t2sG, ref_img, tes, algorithm='mle', source_tes=-1, kdaw=10., rdaw=1., out_dir='.', verbose=False, low_mem=False): """ @@ -102,8 +102,8 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, Poser 2006 mask : (S,) array_like Boolean mask array - t2s : (S,) array_like - Map of voxel-wise T2* estimates. + adaptive_mask : (S,) array_like + Adaptive mask t2sG : (S,) array_like Map of voxel-wise T2* estimates. ref_img : :obj:`str` or img_like @@ -206,7 +206,6 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, else: LGR.info('Computing PCA of echo #{0}'.format(','.join([str(ee) for ee in source_tes]))) data = np.stack([data_cat[mask, ee, :] for ee in source_tes - 1], axis=1) - eim = np.squeeze(eimask(data)) data = np.squeeze(data[eim]) @@ -239,7 +238,7 @@ def tedpca(data_cat, data_oc, combmode, mask, t2s, t2sG, # Normalize each component's time series vTmixN = stats.zscore(comp_ts, axis=0) comptable, _, _, _ = metrics.dependence_metrics( - data_cat, data_oc, comp_ts, t2s, tes, ref_img, + data_cat, data_oc, comp_ts, adaptive_mask, tes, ref_img, reindex=False, mmixN=vTmixN, algorithm=None, label='mepca_', out_dir=out_dir, verbose=verbose) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index 479bb59c6..818ea5303 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -18,7 +18,7 @@ Z_MAX = 8 -def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, +def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, reindex=False, mmixN=None, algorithm=None, label=None, out_dir='.', verbose=False): """ @@ -33,8 +33,9 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, mmix : (T x C) array_like Mixing matrix for converting input data to component space, where `C` is components and `T` is the same as in `catd` - t2s : (S [x T]) array_like - Limited T2* map or timeseries. + adaptive_mask : (S) array_like + Adaptive mask, where each voxel's value is the number of echoes with + "good signal". tes : list List of echo times associated with `catd`, in milliseconds ref_img : str or img_like @@ -63,16 +64,17 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, Dictionary containing component-specific metric maps to be used for component selection. If `algorithm` is None, then seldict will be None as well. - betas : :obj:`numpy.ndarray` + pes : :obj:`numpy.ndarray` mmix_new : :obj:`numpy.ndarray` """ - # Use t2s as mask - mask = t2s != 0 - if not (catd.shape[0] == t2s.shape[0] == mask.shape[0] == tsoc.shape[0]): + # Use adaptive_mask as mask + mask = adaptive_mask >= 3 + + if not (catd.shape[0] == adaptive_mask.shape[0] == tsoc.shape[0]): raise ValueError('First dimensions (number of samples) of catd ({0}), ' - 'tsoc ({1}), and t2s ({2}) do not ' + 'tsoc ({1}), and adaptive_mask ({2}) do not ' 'match'.format(catd.shape[0], tsoc.shape[0], - t2s.shape[0])) + adaptive_mask.shape[0])) elif catd.shape[1] != len(tes): raise ValueError('Second dimension of catd ({0}) does not match ' 'number of echoes provided (tes; ' @@ -80,17 +82,12 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, elif not (catd.shape[2] == tsoc.shape[1] == mmix.shape[0]): raise ValueError('Number of volumes in catd ({0}), ' 'tsoc ({1}), and mmix ({2}) do not ' - 'match.'.format(catd.shape[2], tsoc.shape[1], mmix.shape[0])) - elif t2s.ndim == 2: - if catd.shape[2] != t2s.shape[1]: - raise ValueError('Number of volumes in catd ' - '({0}) does not match number of volumes in ' - 't2s ({1})'.format(catd.shape[2], t2s.shape[1])) + 'match.'.format(catd.shape[2], tsoc.shape[1], + mmix.shape[0])) # mask everything we can tsoc = tsoc[mask, :] catd = catd[mask, ...] - t2s = t2s[mask] # demean optimal combination tsoc_dm = tsoc - tsoc.mean(axis=-1, keepdims=True) @@ -117,19 +114,18 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, totvar = (tsoc_B**2).sum() totvar_norm = (WTS**2).sum() - # compute Betas and means over TEs for TE-dependence analysis - betas = get_coeffs(utils.unmask(catd, mask), - mmix, - np.repeat(mask[:, np.newaxis], len(tes), axis=1)) - betas = betas[mask, ...] - n_voxels, n_echos, n_components = betas.shape + # compute parameter estimates and means over TEs for TE-dependence analysis + pes = get_coeffs(utils.unmask(catd, mask), mmix, + np.repeat(mask[:, np.newaxis], len(tes), axis=1)) + pes = pes[mask, ...] + n_voxels, n_echos, n_components = pes.shape mu = catd.mean(axis=-1, dtype=float) tes = np.reshape(tes, (n_echos, 1)) fmin, _, _ = getfbounds(n_echos) - # set up Xmats - X1 = mu.T # Model 1 - X2 = np.tile(tes, (1, n_voxels)) * mu.T / t2s.T # Model 2 + # set up design matrices + X1 = mu.T # Model 1: TE-independence model + X2 = np.tile(tes, (1, n_voxels)) * mu.T # Model 2: TE-dependence model # tables for component selection kappas = np.zeros([n_components]) @@ -144,30 +140,34 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, LGR.info('Fitting TE- and S0-dependent models to components') for i_comp in range(n_components): - # size of comp_betas is (n_echoes, n_samples) - comp_betas = np.atleast_3d(betas)[:, :, i_comp].T - alpha = (np.abs(comp_betas)**2).sum(axis=0) + # size of comp_pes is (n_echoes, n_samples) + comp_pes = np.atleast_3d(pes)[:, :, i_comp].T + alpha = (np.abs(comp_pes)**2).sum(axis=0) varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm * 100. - # S0 Model - # (S,) model coefficient map - coeffs_S0 = (comp_betas * X1).sum(axis=0) / (X1**2).sum(axis=0) - pred_S0 = X1 * np.tile(coeffs_S0, (n_echos, 1)) - pred_S0_maps[:, :, i_comp] = pred_S0.T - SSE_S0 = (comp_betas - pred_S0)**2 - SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map - F_S0 = (alpha - SSE_S0) * (n_echos - 1) / (SSE_S0) - F_S0_maps[:, i_comp] = F_S0 - - # R2 Model - coeffs_R2 = (comp_betas * X2).sum(axis=0) / (X2**2).sum(axis=0) - pred_R2 = X2 * np.tile(coeffs_R2, (n_echos, 1)) - pred_R2_maps[:, :, i_comp] = pred_R2.T - SSE_R2 = (comp_betas - pred_R2)**2 - SSE_R2 = SSE_R2.sum(axis=0) - F_R2 = (alpha - SSE_R2) * (n_echos - 1) / (SSE_R2) - F_R2_maps[:, i_comp] = F_R2 + for j_echo in np.unique(adaptive_mask): + mask_idx = adaptive_mask == j_echo + alpha = (np.abs(comp_pes[:j_echo])**2).sum(axis=0) + + # S0 Model + # (S,) model coefficient map + coeffs_S0 = (comp_pes[:j_echo] * X1[:j_echo, :]).sum(axis=0) / (X1[:j_echo, :]**2).sum(axis=0) + pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) + pred_S0_maps[mask_idx[mask], :j_echo, i_comp] = pred_S0.T[mask_idx[mask], :] + SSE_S0 = (comp_pes[:j_echo] - pred_S0)**2 + SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map + F_S0 = (alpha - SSE_S0) * (j_echo - 1) / (SSE_S0) + F_S0_maps[mask_idx[mask], i_comp] = F_S0[mask_idx[mask]] + + # R2 Model + coeffs_R2 = (comp_pes[:j_echo] * X2[:j_echo, :]).sum(axis=0) / (X2[:j_echo, :]**2).sum(axis=0) + pred_R2 = X2[:j_echo] * np.tile(coeffs_R2, (j_echo, 1)) + pred_R2_maps[mask_idx[mask], :j_echo, i_comp] = pred_R2.T[mask_idx[mask], :] + SSE_R2 = (comp_pes[:j_echo] - pred_R2)**2 + SSE_R2 = SSE_R2.sum(axis=0) + F_R2 = (alpha - SSE_R2) * (j_echo - 1) / (SSE_R2) + F_R2_maps[mask_idx[mask], i_comp] = F_R2[mask_idx[mask]] # compute weights as Z-values wtsZ = (WTS[:, i_comp] - WTS[:, i_comp].mean()) / WTS[:, i_comp].std() @@ -181,7 +181,7 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, norm_weights = np.abs(wtsZ ** 2.) kappas[i_comp] = np.average(F_R2, weights=norm_weights) rhos[i_comp] = np.average(F_S0, weights=norm_weights) - del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_betas + del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_pes if algorithm != 'kundu_v3': del WTS, PSC, tsoc_B @@ -192,7 +192,7 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, sort_idx = comptable[:, 0].argsort()[::-1] comptable = comptable[sort_idx, :] mmix_new = mmix[:, sort_idx] - betas = betas[..., sort_idx] + pes = pes[..., sort_idx] pred_R2_maps = pred_R2_maps[:, :, sort_idx] pred_S0_maps = pred_S0_maps[:, :, sort_idx] F_R2_maps = F_R2_maps[:, sort_idx] @@ -209,7 +209,7 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, if verbose: # Echo-specific weight maps for each of the ICA components. - io.filewrite(utils.unmask(betas, mask), + io.filewrite(utils.unmask(pes, mask), op.join(out_dir, '{0}betas_catd.nii'.format(label)), ref_img) @@ -303,7 +303,7 @@ def dependence_metrics(catd, tsoc, mmix, t2s, tes, ref_img, else: seldict = None - return comptable, seldict, betas, mmix_new + return comptable, seldict, pes, mmix_new def kundu_metrics(comptable, metric_maps): @@ -342,10 +342,10 @@ def kundu_metrics(comptable, metric_maps): """ Generate Dice values for R2 and S0 models - - dice_FR2: Dice value of cluster-extent thresholded maps of R2-model betas - and F-statistics. - - dice_FS0: Dice value of cluster-extent thresholded maps of S0-model betas - and F-statistics. + - dice_FR2: Dice value of cluster-extent thresholded maps of R2-model + parameter estimates and F-statistics. + - dice_FS0: Dice value of cluster-extent thresholded maps of S0-model + parameter estimates and F-statistics. """ comptable['dice_FR2'] = np.zeros(comptable.shape[0]) comptable['dice_FS0'] = np.zeros(comptable.shape[0]) diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index be816f732..7fd4e26fc 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -398,7 +398,7 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, io.filewrite(s0G, op.join(out_dir, 's0vG.nii'), ref_img) # optimally combine data - data_oc = combine.make_optcom(catd, tes, mask, t2s=t2sG, combmode=combmode) + data_oc = combine.make_optcom(catd, tes, masksum, t2s=t2sG, combmode=combmode) # regress out global signal unless explicitly not desired if 'gsr' in gscontrol: @@ -407,7 +407,7 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, if mixm is None: # Identify and remove thermal noise from data dd, n_components = decomposition.tedpca(catd, data_oc, combmode, mask, - t2s, t2sG, ref_img, + masksum, t2sG, ref_img, tes=tes, algorithm=tedpca, source_tes=source_tes, kdaw=10., rdaw=1., @@ -428,7 +428,7 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, # generated from dimensionally reduced data using full data (i.e., data # with thermal noise) comptable, metric_maps, betas, mmix = metrics.dependence_metrics( - catd, data_oc, mmix_orig, t2s, tes, + catd, data_oc, mmix_orig, masksum, tes, ref_img, reindex=True, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) np.savetxt(op.join(out_dir, 'meica_mix.1D'), mmix) @@ -439,7 +439,7 @@ def tedana_workflow(data, tes, mask=None, mixm=None, ctab=None, manacc=None, LGR.info('Using supplied mixing matrix from ICA') mmix_orig = np.loadtxt(op.join(out_dir, 'meica_mix.1D')) comptable, metric_maps, betas, mmix = metrics.dependence_metrics( - catd, data_oc, mmix_orig, t2s, tes, + catd, data_oc, mmix_orig, masksum, tes, ref_img, label='meica_', out_dir=out_dir, algorithm='kundu_v2', verbose=verbose) if ctab is None: From b0a2966ae9a1bf0d750f06a7fbe9031c3b86c5a2 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 18:07:40 -0400 Subject: [PATCH 02/22] Change variable name. --- tedana/metrics/kundu_fit.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index 818ea5303..ae9911ab6 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -98,10 +98,10 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, WTS = computefeats2(tsoc, mmixN, mask=None, normalize=False) # compute PSC dataset - shouldn't have to refit data - tsoc_B = get_coeffs(tsoc_dm, mmix, mask=None) + tsoc_pes = get_coeffs(tsoc_dm, mmix, mask=None) del tsoc_dm - tsoc_Babs = np.abs(tsoc_B) - PSC = tsoc_B / tsoc.mean(axis=-1, keepdims=True) * 100 + tsoc_Babs = np.abs(tsoc_pes) + PSC = tsoc_pes / tsoc.mean(axis=-1, keepdims=True) * 100 # compute skews to determine signs based on unnormalized weights, # correct mmix & WTS signs based on spatial distribution tails @@ -111,7 +111,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, mmix *= signs WTS *= signs PSC *= signs - totvar = (tsoc_B**2).sum() + totvar = (tsoc_pes**2).sum() totvar_norm = (WTS**2).sum() # compute parameter estimates and means over TEs for TE-dependence analysis @@ -143,7 +143,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # size of comp_pes is (n_echoes, n_samples) comp_pes = np.atleast_3d(pes)[:, :, i_comp].T alpha = (np.abs(comp_pes)**2).sum(axis=0) - varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100. + varex[i_comp] = (tsoc_pes[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm * 100. for j_echo in np.unique(adaptive_mask): @@ -183,7 +183,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, rhos[i_comp] = np.average(F_S0, weights=norm_weights) del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_pes if algorithm != 'kundu_v3': - del WTS, PSC, tsoc_B + del WTS, PSC, tsoc_pes # tabulate component values comptable = np.vstack([kappas, rhos, varex, varex_norm]).T @@ -202,7 +202,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, if algorithm == 'kundu_v3': WTS = WTS[:, sort_idx] PSC = PSC[:, sort_idx] - tsoc_B = tsoc_B[:, sort_idx] + tsoc_pes = tsoc_pes[:, sort_idx] else: mmix_new = mmix del mmix @@ -283,12 +283,12 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, del ccimg, tsoc_Babs if algorithm == 'kundu_v2': - # WTS, tsoc_B, PSC, and F_S0_maps are not used by Kundu v2.5 + # WTS, tsoc_pes, PSC, and F_S0_maps are not used by Kundu v2.5 selvars = ['Z_maps', 'F_R2_maps', 'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps', 'Br_R2_clmaps', 'Br_S0_clmaps'] elif algorithm == 'kundu_v3': - selvars = ['WTS', 'tsoc_B', 'PSC', + selvars = ['WTS', 'tsoc_pes', 'PSC', 'Z_maps', 'F_R2_maps', 'F_S0_maps', 'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps', 'Br_R2_clmaps', 'Br_S0_clmaps'] From 4eebf5f10de437c3597c7f9beb3ffb56eafe5134 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 18:15:34 -0400 Subject: [PATCH 03/22] A bit more memory management. --- tedana/metrics/kundu_fit.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index ae9911ab6..69afe522a 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -116,7 +116,8 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # compute parameter estimates and means over TEs for TE-dependence analysis pes = get_coeffs(utils.unmask(catd, mask), mmix, - np.repeat(mask[:, np.newaxis], len(tes), axis=1)) + np.repeat(mask[:, np.newaxis], len(tes), axis=1), + add_const=True) pes = pes[mask, ...] n_voxels, n_echos, n_components = pes.shape mu = catd.mean(axis=-1, dtype=float) @@ -135,8 +136,9 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, Z_maps = np.zeros([n_voxels, n_components]) F_R2_maps = np.zeros([n_voxels, n_components]) F_S0_maps = np.zeros([n_voxels, n_components]) - pred_R2_maps = np.zeros([n_voxels, n_echos, n_components]) - pred_S0_maps = np.zeros([n_voxels, n_echos, n_components]) + if verbose: + pred_R2_maps = np.zeros([n_voxels, n_echos, n_components]) + pred_S0_maps = np.zeros([n_voxels, n_echos, n_components]) LGR.info('Fitting TE- and S0-dependent models to components') for i_comp in range(n_components): @@ -154,7 +156,6 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # (S,) model coefficient map coeffs_S0 = (comp_pes[:j_echo] * X1[:j_echo, :]).sum(axis=0) / (X1[:j_echo, :]**2).sum(axis=0) pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) - pred_S0_maps[mask_idx[mask], :j_echo, i_comp] = pred_S0.T[mask_idx[mask], :] SSE_S0 = (comp_pes[:j_echo] - pred_S0)**2 SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map F_S0 = (alpha - SSE_S0) * (j_echo - 1) / (SSE_S0) @@ -163,12 +164,15 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # R2 Model coeffs_R2 = (comp_pes[:j_echo] * X2[:j_echo, :]).sum(axis=0) / (X2[:j_echo, :]**2).sum(axis=0) pred_R2 = X2[:j_echo] * np.tile(coeffs_R2, (j_echo, 1)) - pred_R2_maps[mask_idx[mask], :j_echo, i_comp] = pred_R2.T[mask_idx[mask], :] SSE_R2 = (comp_pes[:j_echo] - pred_R2)**2 SSE_R2 = SSE_R2.sum(axis=0) F_R2 = (alpha - SSE_R2) * (j_echo - 1) / (SSE_R2) F_R2_maps[mask_idx[mask], i_comp] = F_R2[mask_idx[mask]] + if verbose: + pred_S0_maps[mask_idx[mask], :j_echo, i_comp] = pred_S0.T[mask_idx[mask], :] + pred_R2_maps[mask_idx[mask], :j_echo, i_comp] = pred_R2.T[mask_idx[mask], :] + # compute weights as Z-values wtsZ = (WTS[:, i_comp] - WTS[:, i_comp].mean()) / WTS[:, i_comp].std() wtsZ[np.abs(wtsZ) > Z_MAX] = (Z_MAX * (np.abs(wtsZ) / wtsZ))[ From faaaedf342ceaec957af69293c7212933d2d78d2 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 18:49:40 -0400 Subject: [PATCH 04/22] Fix. --- tedana/decomposition/pca.py | 2 +- tedana/metrics/kundu_fit.py | 11 +++++++---- tedana/stats.py | 8 ++++---- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 8b6400811..987ab5ab2 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -217,7 +217,7 @@ def tedpca(data_cat, data_oc, combmode, mask, adaptive_mask, t2sG, elif low_mem: voxel_comp_weights, varex, comp_ts = low_mem_pca(data_z) else: - ppca = PCA(copy=False) + ppca = PCA(copy=False, n_components=(n_vols - 1)) ppca.fit(data_z) comp_ts = ppca.components_.T varex = ppca.explained_variance_ diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index 69afe522a..c7aaca723 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -148,7 +148,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, varex[i_comp] = (tsoc_pes[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm * 100. - for j_echo in np.unique(adaptive_mask): + for j_echo in np.unique(adaptive_mask[adaptive_mask >= 3]): mask_idx = adaptive_mask == j_echo alpha = (np.abs(comp_pes[:j_echo])**2).sum(axis=0) @@ -197,12 +197,15 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, comptable = comptable[sort_idx, :] mmix_new = mmix[:, sort_idx] pes = pes[..., sort_idx] - pred_R2_maps = pred_R2_maps[:, :, sort_idx] - pred_S0_maps = pred_S0_maps[:, :, sort_idx] F_R2_maps = F_R2_maps[:, sort_idx] F_S0_maps = F_S0_maps[:, sort_idx] Z_maps = Z_maps[:, sort_idx] tsoc_Babs = tsoc_Babs[:, sort_idx] + + if verbose: + pred_R2_maps = pred_R2_maps[:, :, sort_idx] + pred_S0_maps = pred_S0_maps[:, :, sort_idx] + if algorithm == 'kundu_v3': WTS = WTS[:, sort_idx] PSC = PSC[:, sort_idx] @@ -227,7 +230,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, io.filewrite(utils.unmask(Z_maps ** 2., mask), op.join(out_dir, '{0}metric_weights.nii'.format(label)), ref_img) - del pred_R2_maps, pred_S0_maps + del pred_R2_maps, pred_S0_maps comptable = pd.DataFrame(comptable, columns=['kappa', 'rho', diff --git a/tedana/stats.py b/tedana/stats.py index d4be7078d..46e391aad 100644 --- a/tedana/stats.py +++ b/tedana/stats.py @@ -142,11 +142,11 @@ def get_coeffs(data, X, mask=None, add_const=False): if add_const: # add intercept, if specified X = np.column_stack([X, np.ones((len(X), 1))]) - betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T + param_estimates = np.linalg.lstsq(X, mdata, rcond=None)[0].T if add_const: # drop beta for intercept, if specified - betas = betas[:, :-1] + param_estimates = param_estimates[:, :-1] if mask is not None: - betas = utils.unmask(betas, mask) + param_estimates = utils.unmask(param_estimates, mask) - return betas + return param_estimates From 4caa6eca4ededd96b9bcdf7caf9606dfff9be705 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 18:57:37 -0400 Subject: [PATCH 05/22] Fix tests. --- .../test_model_fit_dependence_metrics.py | 68 +++++++++---------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/tedana/tests/test_model_fit_dependence_metrics.py b/tedana/tests/test_model_fit_dependence_metrics.py index ee716d349..8c66db6c4 100644 --- a/tedana/tests/test_model_fit_dependence_metrics.py +++ b/tedana/tests/test_model_fit_dependence_metrics.py @@ -17,57 +17,61 @@ def test_break_dependence_metrics(): catd = np.empty((n_samples, n_echos, n_vols)) tsoc = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) - t2s = np.empty((n_samples, n_vols)) + adaptive_mask = np.empty((n_samples)) t2s_full = np.empty((n_samples, n_vols)) tes = np.empty((n_echos)) - combmode = 't2s' + combmode = 'adaptive_mask' ref_img = '' # Shape of catd is wrong catd = np.empty((n_samples+1, n_echos, n_vols)) with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' 'catd ({0}), tsoc ({1}), ' - 'and t2s ({2}) do not match'.format( + 'and adaptive_mask ({2}) do not match'.format( catd.shape[0], tsoc.shape[0], - t2s.shape[0])) + adaptive_mask.shape[0])) - # Shape of t2s is wrong + # Shape of adaptive_mask is wrong catd = np.empty((n_samples, n_echos, n_vols)) - t2s = np.empty((n_samples+1, n_vols)) + adaptive_mask = np.empty((n_samples+1)) with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' 'catd ({0}), tsoc ({1}), ' - 'and t2s ({2}) do not match'.format( + 'and adaptive_mask ({2}) do not match'.format( catd.shape[0], tsoc.shape[0], - t2s.shape[0])) + adaptive_mask.shape[0])) # Shape of tsoc is wrong - t2s = np.empty((n_samples, n_vols)) + adaptive_mask = np.empty((n_samples)) tsoc = np.empty((n_samples+1, n_vols)) with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('First dimensions (number of samples) of ' 'catd ({0}), tsoc ({1}), ' - 'and t2s ({2}) do not match'.format( + 'and adaptive_mask ({2}) do not match'.format( catd.shape[0], tsoc.shape[0], - t2s.shape[0])) + adaptive_mask.shape[0])) # Shape of catd is wrong catd = np.empty((n_samples, n_echos+1, n_vols)) tsoc = np.empty((n_samples, n_vols)) with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('Second dimension of catd ({0}) does not ' 'match number ' 'of echoes provided (tes; ' @@ -76,22 +80,12 @@ def test_break_dependence_metrics(): # Shape of catd is wrong catd = np.empty((n_samples, n_echos, n_vols+1)) with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') + kundu_fit.dependence_metrics( + catd=catd, tsoc=tsoc, mmix=mmix, + adaptive_mask=adaptive_mask, tes=tes, ref_img=ref_img, + reindex=False, mmixN=None, algorithm='kundu_v3') assert str(e_info.value) == ('Number of volumes in catd ' '({0}), tsoc ({1}), and ' 'mmix ({2}) do not ' 'match.'.format(catd.shape[2], tsoc.shape[1], mmix.shape[0])) - - # Shape of t2s is wrong - catd = np.empty((n_samples, n_echos, n_vols)) - t2s = np.empty((n_samples, n_vols+1)) - with pytest.raises(ValueError) as e_info: - kundu_fit.dependence_metrics(catd=catd, tsoc=tsoc, mmix=mmix, - t2s=t2s, tes=tes, ref_img=ref_img, - reindex=False, mmixN=None, algorithm='kundu_v3') - assert str(e_info.value) == ('Number of volumes in catd ({0}) ' - 'does not match number of volumes in ' - 't2s ({1})'.format(catd.shape[2], t2s.shape[1])) From fac57496c9c2e6d3d0926d0f45318da7893798d3 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 19:02:34 -0400 Subject: [PATCH 06/22] Fix style. --- tedana/combine.py | 2 +- tedana/metrics/kundu_fit.py | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tedana/combine.py b/tedana/combine.py index 79a36cc6f..605ddf781 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -127,7 +127,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True elif adaptive_mask.shape[0] != data.shape[0]: raise ValueError('Mask and data do not have same number of ' 'voxels/samples: {0} != {1}'.format( - adaptive_mask.shape[0], data.shape[0])) + adaptive_mask.shape[0], data.shape[0])) if combmode not in ['t2s', 'paid']: raise ValueError("Argument 'combmode' must be either 't2s' or 'paid'") diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index c7aaca723..d4958cec5 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -154,7 +154,8 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # S0 Model # (S,) model coefficient map - coeffs_S0 = (comp_pes[:j_echo] * X1[:j_echo, :]).sum(axis=0) / (X1[:j_echo, :]**2).sum(axis=0) + coeffs_S0 = (comp_pes[:j_echo] * X1[:j_echo, :]).sum(axis=0) /\ + (X1[:j_echo, :]**2).sum(axis=0) pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) SSE_S0 = (comp_pes[:j_echo] - pred_S0)**2 SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map @@ -162,7 +163,8 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, F_S0_maps[mask_idx[mask], i_comp] = F_S0[mask_idx[mask]] # R2 Model - coeffs_R2 = (comp_pes[:j_echo] * X2[:j_echo, :]).sum(axis=0) / (X2[:j_echo, :]**2).sum(axis=0) + coeffs_R2 = (comp_pes[:j_echo] * X2[:j_echo, :]).sum(axis=0) /\ + (X2[:j_echo, :]**2).sum(axis=0) pred_R2 = X2[:j_echo] * np.tile(coeffs_R2, (j_echo, 1)) SSE_R2 = (comp_pes[:j_echo] - pred_R2)**2 SSE_R2 = SSE_R2.sum(axis=0) From 10cd6753f7111bc82157c2156f07212f8a30a9be Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 20:01:19 -0400 Subject: [PATCH 07/22] Limit mask to adaptive_mask >= 3 --- tedana/utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tedana/utils.py b/tedana/utils.py index e1ab7ed9e..398f2c639 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -86,15 +86,15 @@ def make_adaptive_mask(data, mask=None, getsum=False): if mask is None: # make it a boolean mask to (where we have at least 1 echo with good signal) - mask = masksum.astype(bool) + mask = (masksum >= 3).astype(bool) else: # if the user has supplied a binary mask mask = load_image(mask).astype(bool) masksum = masksum * mask # reduce mask based on masksum # TODO: Use visual report to make checking the reduced mask easier - if np.any(masksum[mask] == 0): - n_bad_voxels = np.sum(masksum[mask] == 0) + if np.any(masksum[mask] < 3): + n_bad_voxels = np.sum(masksum[mask] < 3) LGR.warning('{0} voxels in user-defined mask do not have good ' 'signal. Removing voxels from mask.'.format(n_bad_voxels)) mask = masksum.astype(bool) From a320d38262bec2045ccddabbdbf0d65dec473ca3 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 20:23:33 -0400 Subject: [PATCH 08/22] Fix make_adaptive_mask --- tedana/utils.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tedana/utils.py b/tedana/utils.py index 398f2c639..07e73719a 100644 --- a/tedana/utils.py +++ b/tedana/utils.py @@ -91,6 +91,7 @@ def make_adaptive_mask(data, mask=None, getsum=False): # if the user has supplied a binary mask mask = load_image(mask).astype(bool) masksum = masksum * mask + mask = (masksum >= 3).astype(bool) # reduce mask based on masksum # TODO: Use visual report to make checking the reduced mask easier if np.any(masksum[mask] < 3): From e2da4d1b3654b4dddc5cd5f16aa4266df5ddd5d6 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 20:39:10 -0400 Subject: [PATCH 09/22] Fix test. --- tedana/tests/test_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 0785843f1..ecbf2c86a 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -84,9 +84,9 @@ def test_make_adaptive_mask(): assert np.allclose(mask, utils.make_adaptive_mask(data)) # shapes are all the same assert mask.shape == masksum.shape == (64350,) - assert np.allclose(mask, masksum.astype(bool)) + assert np.allclose(mask, (masksum >= 3).astype(bool)) # mask has correct # of entries - assert mask.sum() == 50786 + assert mask.sum() == 41749 # masksum has correct values vals, counts = np.unique(masksum, return_counts=True) assert np.allclose(vals, np.array([0, 1, 2, 3])) @@ -97,4 +97,4 @@ def test_make_adaptive_mask(): mask, masksum = utils.make_adaptive_mask(data, mask=pjoin(datadir, 'mask.nii.gz'), getsum=True) - assert np.allclose(mask, masksum.astype(bool)) + assert np.allclose(mask, (masksum >= 3).astype(bool)) From a37f86101dde28a036b30b6a8b3d36e9f688fe5d Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Wed, 10 Jul 2019 20:56:32 -0400 Subject: [PATCH 10/22] Update outputs. --- .circleci/tedana_outputs.txt | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.circleci/tedana_outputs.txt b/.circleci/tedana_outputs.txt index f717ea079..7cb8ed75d 100644 --- a/.circleci/tedana_outputs.txt +++ b/.circleci/tedana_outputs.txt @@ -122,17 +122,6 @@ comp_116.png comp_117.png comp_118.png comp_119.png -comp_120.png -comp_121.png -comp_122.png -comp_123.png -comp_124.png -comp_125.png -comp_126.png -comp_127.png -comp_128.png -comp_129.png -comp_130.png comp_table_ica.txt comp_table_pca.txt dn_ts_OC.nii From d852a6f71c790e831cfc8b193b1a3c2d0844e7d9 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sun, 10 Nov 2019 23:25:23 -0500 Subject: [PATCH 11/22] Remove old file. --- .circleci/tedana_outputs.txt | 138 ----------------------------------- 1 file changed, 138 deletions(-) delete mode 100644 .circleci/tedana_outputs.txt diff --git a/.circleci/tedana_outputs.txt b/.circleci/tedana_outputs.txt deleted file mode 100644 index 43bdb1572..000000000 --- a/.circleci/tedana_outputs.txt +++ /dev/null @@ -1,138 +0,0 @@ -Component_Overview.png -Kappa_vs_Rho_Scatter.png -betas_OC.nii -betas_hik_OC.nii -comp_000.png -comp_001.png -comp_002.png -comp_003.png -comp_004.png -comp_005.png -comp_006.png -comp_007.png -comp_008.png -comp_009.png -comp_010.png -comp_011.png -comp_012.png -comp_013.png -comp_014.png -comp_015.png -comp_016.png -comp_017.png -comp_018.png -comp_019.png -comp_020.png -comp_021.png -comp_022.png -comp_023.png -comp_024.png -comp_025.png -comp_026.png -comp_027.png -comp_028.png -comp_029.png -comp_030.png -comp_031.png -comp_032.png -comp_033.png -comp_034.png -comp_035.png -comp_036.png -comp_037.png -comp_038.png -comp_039.png -comp_040.png -comp_041.png -comp_042.png -comp_043.png -comp_044.png -comp_045.png -comp_046.png -comp_047.png -comp_048.png -comp_049.png -comp_050.png -comp_051.png -comp_052.png -comp_053.png -comp_054.png -comp_055.png -comp_056.png -comp_057.png -comp_058.png -comp_059.png -comp_060.png -comp_061.png -comp_062.png -comp_063.png -comp_064.png -comp_065.png -comp_066.png -comp_067.png -comp_068.png -comp_069.png -comp_070.png -comp_071.png -comp_072.png -comp_073.png -comp_074.png -comp_075.png -comp_076.png -comp_077.png -comp_078.png -comp_079.png -comp_080.png -comp_081.png -comp_082.png -comp_083.png -comp_084.png -comp_085.png -comp_086.png -comp_087.png -comp_088.png -comp_089.png -comp_090.png -comp_091.png -comp_092.png -comp_093.png -comp_094.png -comp_095.png -comp_096.png -comp_097.png -comp_098.png -comp_099.png -comp_100.png -comp_101.png -comp_102.png -comp_103.png -comp_104.png -comp_105.png -comp_106.png -comp_107.png -comp_108.png -comp_109.png -comp_110.png -comp_111.png -comp_112.png -comp_113.png -comp_114.png -comp_115.png -comp_116.png -comp_117.png -comp_118.png -comp_119.png -comp_table_ica.txt -comp_table_pca.txt -dn_ts_OC.nii -feats_OC2.nii -figures -hik_ts_OC.nii -lowk_ts_OC.nii -meica_mix.1D -mepca_OC_components.nii -mepca_mix.1D -report.txt -s0v.nii -t2sv.nii -ts_OC.nii From 2379fc14d7286e1c5479e74d190c2abbf7e3b707 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sun, 10 Nov 2019 23:51:09 -0500 Subject: [PATCH 12/22] Revert some variable name changes. --- tedana/decomposition/pca.py | 1 + tedana/metrics/kundu_fit.py | 44 ++++++++++++++++++------------------- tedana/stats.py | 8 +++---- 3 files changed, 27 insertions(+), 26 deletions(-) diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 3638d9c8a..126d87c19 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -243,6 +243,7 @@ def tedpca(data_cat, data_oc, combmode, mask, adaptive_mask, t2sG, else: LGR.info('Computing PCA of echo #{0}'.format(','.join([str(ee) for ee in source_tes]))) data = np.stack([data_cat[mask, ee, :] for ee in source_tes - 1], axis=1) + eim = np.squeeze(eimask(data)) data = np.squeeze(data[eim]) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index c19208b7e..7b4b3a05b 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -66,7 +66,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, Dictionary containing component-specific metric maps to be used for component selection. If `algorithm` is None, then seldict will be None as well. - pes : :obj:`numpy.ndarray` + betas : :obj:`numpy.ndarray` mmix_new : :obj:`numpy.ndarray` """ # Use adaptive_mask as mask @@ -104,10 +104,10 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, WTS = computefeats2(tsoc, mmixN, mask=None, normalize=False) # compute PSC dataset - shouldn't have to refit data - tsoc_pes = get_coeffs(tsoc_dm, mmix, mask=None) + tsoc_B = get_coeffs(tsoc_dm, mmix, mask=None) del tsoc_dm - tsoc_Babs = np.abs(tsoc_pes) - PSC = tsoc_pes / tsoc.mean(axis=-1, keepdims=True) * 100 + tsoc_Babs = np.abs(tsoc_B) + PSC = tsoc_B / tsoc.mean(axis=-1, keepdims=True) * 100 # compute skews to determine signs based on unnormalized weights, # correct mmix & WTS signs based on spatial distribution tails @@ -117,15 +117,15 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, mmix *= signs WTS *= signs PSC *= signs - totvar = (tsoc_pes**2).sum() + totvar = (tsoc_B**2).sum() totvar_norm = (WTS**2).sum() - # compute parameter estimates and means over TEs for TE-dependence analysis - pes = get_coeffs(utils.unmask(catd, mask), mmix, + # compute betas and means over TEs for TE-dependence analysis + betas = get_coeffs(utils.unmask(catd, mask), mmix, np.repeat(mask[:, np.newaxis], len(tes), axis=1), add_const=True) - pes = pes[mask, ...] - n_voxels, n_echos, n_components = pes.shape + betas = betas[mask, ...] + n_voxels, n_echos, n_components = betas.shape mu = catd.mean(axis=-1, dtype=float) tes = np.reshape(tes, (n_echos, 1)) fmin, _, _ = getfbounds(n_echos) @@ -149,9 +149,9 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, LGR.info('Fitting TE- and S0-dependent models to components') for i_comp in range(n_components): # size of comp_pes is (n_echoes, n_samples) - comp_pes = np.atleast_3d(pes)[:, :, i_comp].T + comp_pes = np.atleast_3d(betas)[:, :, i_comp].T alpha = (np.abs(comp_pes)**2).sum(axis=0) - varex[i_comp] = (tsoc_pes[:, i_comp]**2).sum() / totvar * 100. + varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm for j_echo in np.unique(adaptive_mask[adaptive_mask >= 3]): @@ -195,7 +195,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, rhos[i_comp] = np.average(F_S0, weights=norm_weights) del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_pes if algorithm != 'kundu_v3': - del WTS, PSC, tsoc_pes + del WTS, PSC, tsoc_B # tabulate component values comptable = np.vstack([kappas, rhos, varex, varex_norm]).T @@ -204,7 +204,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, sort_idx = comptable[:, 0].argsort()[::-1] comptable = comptable[sort_idx, :] mmix_new = mmix[:, sort_idx] - pes = pes[..., sort_idx] + betas = betas[..., sort_idx] F_R2_maps = F_R2_maps[:, sort_idx] F_S0_maps = F_S0_maps[:, sort_idx] Z_maps = Z_maps[:, sort_idx] @@ -217,14 +217,14 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, if algorithm == 'kundu_v3': WTS = WTS[:, sort_idx] PSC = PSC[:, sort_idx] - tsoc_pes = tsoc_pes[:, sort_idx] + tsoc_B = tsoc_B[:, sort_idx] else: mmix_new = mmix del mmix if verbose: # Echo-specific weight maps for each of the ICA components. - io.filewrite(utils.unmask(pes, mask), + io.filewrite(utils.unmask(betas, mask), op.join(out_dir, '{0}betas_catd.nii'.format(label)), ref_img) @@ -298,12 +298,12 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, del ccimg, tsoc_Babs if algorithm == 'kundu_v2': - # WTS, tsoc_pes, PSC, and F_S0_maps are not used by Kundu v2.5 + # WTS, tsoc_B, PSC, and F_S0_maps are not used by Kundu v2.5 selvars = ['Z_maps', 'F_R2_maps', 'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps', 'Br_R2_clmaps', 'Br_S0_clmaps'] elif algorithm == 'kundu_v3': - selvars = ['WTS', 'tsoc_pes', 'PSC', + selvars = ['WTS', 'tsoc_B', 'PSC', 'Z_maps', 'F_R2_maps', 'F_S0_maps', 'Z_clmaps', 'F_R2_clmaps', 'F_S0_clmaps', 'Br_R2_clmaps', 'Br_S0_clmaps'] @@ -318,7 +318,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, else: seldict = None - return comptable, seldict, pes, mmix_new + return comptable, seldict, betas, mmix_new def kundu_metrics(comptable, metric_maps): @@ -357,10 +357,10 @@ def kundu_metrics(comptable, metric_maps): """ Generate Dice values for R2 and S0 models - - dice_FR2: Dice value of cluster-extent thresholded maps of R2-model - parameter estimates and F-statistics. - - dice_FS0: Dice value of cluster-extent thresholded maps of S0-model - parameter estimates and F-statistics. + - dice_FR2: Dice value of cluster-extent thresholded maps of R2-model betas + and F-statistics. + - dice_FS0: Dice value of cluster-extent thresholded maps of S0-model betas + and F-statistics. """ comptable['dice_FR2'] = np.zeros(comptable.shape[0]) comptable['dice_FS0'] = np.zeros(comptable.shape[0]) diff --git a/tedana/stats.py b/tedana/stats.py index 8bda671dd..601d800a0 100644 --- a/tedana/stats.py +++ b/tedana/stats.py @@ -144,11 +144,11 @@ def get_coeffs(data, X, mask=None, add_const=False): if add_const: # add intercept, if specified X = np.column_stack([X, np.ones((len(X), 1))]) - param_estimates = np.linalg.lstsq(X, mdata, rcond=None)[0].T + betas = np.linalg.lstsq(X, mdata, rcond=None)[0].T if add_const: # drop beta for intercept, if specified - param_estimates = param_estimates[:, :-1] + betas = betas[:, :-1] if mask is not None: - param_estimates = utils.unmask(param_estimates, mask) + betas = utils.unmask(betas, mask) - return param_estimates + return betas From 0062679fbb7d949def2dd549a5e1b7025281ea5d Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sun, 10 Nov 2019 23:53:34 -0500 Subject: [PATCH 13/22] Revert some more. --- tedana/metrics/kundu_fit.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index 7b4b3a05b..b31bd06d8 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -120,10 +120,11 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, totvar = (tsoc_B**2).sum() totvar_norm = (WTS**2).sum() - # compute betas and means over TEs for TE-dependence analysis - betas = get_coeffs(utils.unmask(catd, mask), mmix, - np.repeat(mask[:, np.newaxis], len(tes), axis=1), - add_const=True) + # compute Betas and means over TEs for TE-dependence analysis + betas = get_coeffs(utils.unmask(catd, mask), + mmix, + np.repeat(mask[:, np.newaxis], len(tes), axis=1), + add_const=True) betas = betas[mask, ...] n_voxels, n_echos, n_components = betas.shape mu = catd.mean(axis=-1, dtype=float) @@ -148,31 +149,31 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, LGR.info('Fitting TE- and S0-dependent models to components') for i_comp in range(n_components): - # size of comp_pes is (n_echoes, n_samples) - comp_pes = np.atleast_3d(betas)[:, :, i_comp].T - alpha = (np.abs(comp_pes)**2).sum(axis=0) + # size of comp_betas is (n_echoes, n_samples) + comp_betas = np.atleast_3d(betas)[:, :, i_comp].T + alpha = (np.abs(comp_betas)**2).sum(axis=0) varex[i_comp] = (tsoc_B[:, i_comp]**2).sum() / totvar * 100. varex_norm[i_comp] = (WTS[:, i_comp]**2).sum() / totvar_norm for j_echo in np.unique(adaptive_mask[adaptive_mask >= 3]): mask_idx = adaptive_mask == j_echo - alpha = (np.abs(comp_pes[:j_echo])**2).sum(axis=0) + alpha = (np.abs(comp_betas[:j_echo])**2).sum(axis=0) # S0 Model # (S,) model coefficient map - coeffs_S0 = (comp_pes[:j_echo] * X1[:j_echo, :]).sum(axis=0) /\ + coeffs_S0 = (comp_betas[:j_echo] * X1[:j_echo, :]).sum(axis=0) /\ (X1[:j_echo, :]**2).sum(axis=0) pred_S0 = X1[:j_echo, :] * np.tile(coeffs_S0, (j_echo, 1)) - SSE_S0 = (comp_pes[:j_echo] - pred_S0)**2 + SSE_S0 = (comp_betas[:j_echo] - pred_S0)**2 SSE_S0 = SSE_S0.sum(axis=0) # (S,) prediction error map F_S0 = (alpha - SSE_S0) * (j_echo - 1) / (SSE_S0) F_S0_maps[mask_idx[mask], i_comp] = F_S0[mask_idx[mask]] # R2 Model - coeffs_R2 = (comp_pes[:j_echo] * X2[:j_echo, :]).sum(axis=0) /\ + coeffs_R2 = (comp_betas[:j_echo] * X2[:j_echo, :]).sum(axis=0) /\ (X2[:j_echo, :]**2).sum(axis=0) pred_R2 = X2[:j_echo] * np.tile(coeffs_R2, (j_echo, 1)) - SSE_R2 = (comp_pes[:j_echo] - pred_R2)**2 + SSE_R2 = (comp_betas[:j_echo] - pred_R2)**2 SSE_R2 = SSE_R2.sum(axis=0) F_R2 = (alpha - SSE_R2) * (j_echo - 1) / (SSE_R2) F_R2_maps[mask_idx[mask], i_comp] = F_R2[mask_idx[mask]] @@ -193,7 +194,7 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, norm_weights = np.abs(wtsZ ** 2.) kappas[i_comp] = np.average(F_R2, weights=norm_weights) rhos[i_comp] = np.average(F_S0, weights=norm_weights) - del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_pes + del SSE_S0, SSE_R2, wtsZ, F_S0, F_R2, norm_weights, comp_betas if algorithm != 'kundu_v3': del WTS, PSC, tsoc_B From a565e68b0020a5a980ad608dcabe6d7376a79626 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sun, 1 Dec 2019 18:46:29 -0500 Subject: [PATCH 14/22] Fix style issues with test. --- tedana/tests/test_model_fit_dependence_metrics.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tedana/tests/test_model_fit_dependence_metrics.py b/tedana/tests/test_model_fit_dependence_metrics.py index 997b6f1c0..f243bacc3 100644 --- a/tedana/tests/test_model_fit_dependence_metrics.py +++ b/tedana/tests/test_model_fit_dependence_metrics.py @@ -18,9 +18,7 @@ def test_break_dependence_metrics(): tsoc = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) adaptive_mask = np.empty((n_samples)) - t2s_full = np.empty((n_samples, n_vols)) tes = np.empty((n_echos)) - combmode = 'adaptive_mask' ref_img = '' # Shape of catd is wrong From 261b11009ea6dbf9bbbe3ba930d8e81ba0999d60 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sat, 28 Dec 2019 16:51:27 -0500 Subject: [PATCH 15/22] Add constant when calculating betas. Removing this seemed to cause the breaking test in the last merge commit. --- tedana/metrics/kundu_fit.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tedana/metrics/kundu_fit.py b/tedana/metrics/kundu_fit.py index 77855defd..31958ad7e 100644 --- a/tedana/metrics/kundu_fit.py +++ b/tedana/metrics/kundu_fit.py @@ -123,7 +123,8 @@ def dependence_metrics(catd, tsoc, mmix, adaptive_mask, tes, ref_img, # compute Betas and means over TEs for TE-dependence analysis betas = get_coeffs(utils.unmask(catd, mask), mmix_corrected, - np.repeat(mask[:, np.newaxis], len(tes), axis=1)) + np.repeat(mask[:, np.newaxis], len(tes), axis=1), + add_const=True) betas = betas[mask, ...] n_voxels, n_echos, n_components = betas.shape mu = catd.mean(axis=-1, dtype=float) From c5417adc53a3ef2c978f5b79aacf27c150b18a4c Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Sun, 2 Feb 2020 11:38:22 -0500 Subject: [PATCH 16/22] Drop extra components from outputs file. --- tedana/tests/data/cornell_three_echo_outputs.txt | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tedana/tests/data/cornell_three_echo_outputs.txt b/tedana/tests/data/cornell_three_echo_outputs.txt index 533661bba..f4748f293 100644 --- a/tedana/tests/data/cornell_three_echo_outputs.txt +++ b/tedana/tests/data/cornell_three_echo_outputs.txt @@ -72,10 +72,6 @@ figures/comp_064.png figures/comp_065.png figures/comp_066.png figures/comp_067.png -figures/comp_068.png -figures/comp_069.png -figures/comp_070.png -figures/comp_071.png dn_ts_OC.nii.gz feats_OC2.nii.gz figures From 99129da69dabbd8cd0fc1a403d25fbfae5b1144d Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 09:52:50 -0400 Subject: [PATCH 17/22] Improve tedpca docstring. Co-Authored-By: Elizabeth DuPre --- tedana/decomposition/pca.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/decomposition/pca.py b/tedana/decomposition/pca.py index 79be9ffee..07e6a253d 100644 --- a/tedana/decomposition/pca.py +++ b/tedana/decomposition/pca.py @@ -66,7 +66,7 @@ def tedpca(data_cat, data_oc, combmode, mask, adaptive_mask, t2sG, mask : (S,) array_like Boolean mask array adaptive_mask : (S,) array_like - Adaptive mask + Adaptive mask of the data indicating the number of echos with signal at each voxel t2sG : (S,) array_like Map of voxel-wise T2* estimates. ref_img : :obj:`str` or img_like From b2496160b6f92ea5b4554d74cd0473407e3987c5 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 12:14:07 -0400 Subject: [PATCH 18/22] Improve description of adaptive mask. --- tedana/combine.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tedana/combine.py b/tedana/combine.py index 61d21dc56..6b5cc9c78 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -108,7 +108,10 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True tes : (E,) :obj:`numpy.ndarray` Array of TEs, in seconds. adaptive_mask : (S,) :obj:`numpy.ndarray` - Brain adaptive_mask in 3D array. + Adaptive mask of the data indicating those voxels for which at least + three echos are available. If this is less than the total number of + collected echos, we assume that later echos do not provided + meaningful signal. t2s : (S [x T]) :obj:`numpy.ndarray` or None, optional Estimated T2* values. Only required if combmode = 't2s'. Default is None. From 47719d9afd3f5c2524235c2cb2eac242e0d42bb1 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 13:11:04 -0400 Subject: [PATCH 19/22] Change empty to unstable. --- tedana/combine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/combine.py b/tedana/combine.py index 6b5cc9c78..e36d10405 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -173,7 +173,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True LGR.info(msg) mask = adaptive_mask >= 3 - data = data[mask, :, :] # mask out empty voxels/samples + data = data[mask, :, :] # mask out unstable voxels/samples tes = np.array(tes)[np.newaxis, ...] # (1 x E) array_like combined = np.zeros((data.shape[0], data.shape[2])) for echo in np.unique(adaptive_mask[mask]): From e4353436390458f44fe9651e9acf2edada249c83 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 22:51:25 -0400 Subject: [PATCH 20/22] Update tedana/combine.py Co-Authored-By: Elizabeth DuPre --- tedana/combine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tedana/combine.py b/tedana/combine.py index e36d10405..01b799e43 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -108,7 +108,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True tes : (E,) :obj:`numpy.ndarray` Array of TEs, in seconds. adaptive_mask : (S,) :obj:`numpy.ndarray` - Adaptive mask of the data indicating those voxels for which at least + Adaptive mask of the data indicating the number of echos with signal at each voxel three echos are available. If this is less than the total number of collected echos, we assume that later echos do not provided meaningful signal. From e544bb3ddb914ef269aefe3a8a64d8fedf0b0749 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 22:54:59 -0400 Subject: [PATCH 21/22] Update tedana/combine.py Co-Authored-By: Elizabeth DuPre --- tedana/combine.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tedana/combine.py b/tedana/combine.py index 01b799e43..3c700383c 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -99,7 +99,10 @@ def _combine_paid(data, tes): def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True): """ - Optimally combine BOLD data across TEs. + Optimally combine BOLD data across TEs, using only those echos with reliable signal + across at least three echos. If the number of echos providing reliable signal is greater + than three but less than the total number of collected echos, we assume that later + echos do not provided meaningful signal. Parameters ---------- From 1b19c768da0efbf6eb5cae11d22dbaac5e07ba84 Mon Sep 17 00:00:00 2001 From: Taylor Salo Date: Fri, 17 Apr 2020 23:00:58 -0400 Subject: [PATCH 22/22] Update tedana/combine.py Co-Authored-By: Elizabeth DuPre --- tedana/combine.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tedana/combine.py b/tedana/combine.py index 3c700383c..b151173b9 100644 --- a/tedana/combine.py +++ b/tedana/combine.py @@ -111,10 +111,7 @@ def make_optcom(data, tes, adaptive_mask, t2s=None, combmode='t2s', verbose=True tes : (E,) :obj:`numpy.ndarray` Array of TEs, in seconds. adaptive_mask : (S,) :obj:`numpy.ndarray` - Adaptive mask of the data indicating the number of echos with signal at each voxel - three echos are available. If this is less than the total number of - collected echos, we assume that later echos do not provided - meaningful signal. + Adaptive mask of the data indicating the number of echos with signal at each voxel t2s : (S [x T]) :obj:`numpy.ndarray` or None, optional Estimated T2* values. Only required if combmode = 't2s'. Default is None.