Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ENH] Add option to return nulls in compare_images() #97

Merged
merged 4 commits into from
Apr 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion neuromaps/nulls/spins.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def get_parcel_centroids(surfaces, parcellation=None, method='surface',
vertices, faces = load_gifti(surf).agg_data()
if parc is not None:
labels = load_gifti(parc).agg_data()
labeltable = parc.labeltable.get_labels_as_dict()
labeltable = load_gifti(parc).labeltable.get_labels_as_dict()

for lab in np.unique(labels):
if labeltable.get(lab) in drop:
Expand Down
31 changes: 25 additions & 6 deletions neuromaps/stats.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@


def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None,
nan_policy='omit'):
nan_policy='omit', return_nulls=False):
"""
Compares images `src` and `trg`

Expand All @@ -44,13 +44,19 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None,
the nan values to the callable metric (will return nan if the metric
is `spearmanr` `or pearsonr`), 'raise' throws an error, 'omit' performs
the calculations ignoring nan values. Default: 'omit'
return_nulls : bool, optional
Whether to return the null distribution of comparisons. Can only be set
to `True` if `nulls` is not None. Default: False

Returns
-------
similarity : float
Comparison metric between `src` and `trg`
pvalue : float
The p-value of `similarity`, if `nulls` is not None
nulls : (n_perm, ) array_like
Null distribution of similarity metrics. Only returned if
`return_nulls` is True.
"""

methods = ('pearsonr', 'spearmanr')
Expand All @@ -62,6 +68,9 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None,
raise ValueError('Provided callable `metric` must accept two '
'inputs and return single value.')

if return_nulls and nulls is None:
raise ValueError('`return_nulls` cannot be True when `nulls` is None.')

srcdata, trgdata = load_data(src), load_data(trg)

# drop NaNs (if nan_policy==`omit`) and zeros (if ignore_zero=True)
Expand Down Expand Up @@ -90,13 +99,14 @@ def compare_images(src, trg, metric='pearsonr', ignore_zero=True, nulls=None,
n_perm = nulls.shape[-1]
nulls = nulls[mask]
return permtest_metric(srcdata, trgdata, metric, n_perm=n_perm,
nulls=nulls, nan_policy=nan_policy)
nulls=nulls, nan_policy=nan_policy,
return_nulls=return_nulls)

return metric(srcdata, trgdata)


def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None,
nan_policy='propagate'):
nan_policy='propagate', return_nulls=False):
"""
Generates non-parameteric p-value of `a` and `b` for `metric`

Expand Down Expand Up @@ -128,13 +138,18 @@ def permtest_metric(a, b, metric='pearsonr', n_perm=1000, seed=0, nulls=None,
Defines how to handle when inputs contain nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default: 'propagate'
return_nulls : bool, optional
Whether to return the null distribution of comparisons. Default: False

Returns
-------
similarity : float
Similarity metric
pvalue : float
Non-parametric p-value
nulls : (n_perm, ) array_like
Null distribution of similarity metrics. Only returned if
`return_nulls` is True.

Notes
-----
Expand Down Expand Up @@ -176,15 +191,19 @@ def nan_wrap(a, b, nan_policy='propagate'):
abs_true = np.abs(true_sim)

permutations = np.ones(true_sim.shape)
nulldist = np.zeros(((n_perm, ) + true_sim.shape))
for perm in range(n_perm):
# permute `a` and determine whether correlations exceed original
ap = a[rs.permutation(len(a))] if nulls is None else nulls[:, perm]
permutations += np.abs(
compfunc(ap, b, nan_policy=nan_policy)
) >= abs_true
nullcomp = compfunc(ap, b, nan_policy=nan_policy)
permutations += np.abs(nullcomp) >= abs_true
nulldist[perm] = nullcomp

pvals = permutations / (n_perm + 1) # + 1 in denom accounts for true_sim

if return_nulls:
return true_sim, pvals, nulldist

return true_sim, pvals


Expand Down