diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index e7c5910310f..1787f9b2b5e 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -27,12 +27,18 @@ Changelog - Add :meth:`mne.MixedSourceEstimate.surface` and :meth:`mne.MixedSourceEstimate.volume` methods to allow surface and volume extraction by `Eric Larson`_ +- Add :meth:`mne.VectorSourceEstimate.project` to project vector source estimates onto the direction of maximum source power by `Eric Larson`_ + - Add support to :func:`mne.extract_label_time_course` for vector-valued and volumetric source estimates by `Eric Larson`_ - Add method :meth:`mne.VolSourceEstimate.in_label` by `Eric Larson`_ - Add support for mixed source spaces to :func:`mne.compute_source_morph` by `Eric Larson`_ +- Add support for omitting the SDR step in volumetric morphing by passing ``n_iter_sdr=()`` to `mne.compute_source_morph` by `Eric Larson`_ + +- Add support for passing a string argument to ``bg_img`` in `mne.viz.plot_volume_source_estimates` by `Eric Larson`_ + - Add support for providing the destination surface source space in the ``src_to`` argument of :func:`mne.compute_source_morph` by `Eric Larson`_ - Add ``tol_kind`` option to :func:`mne.compute_rank` by `Eric Larson`_ @@ -89,6 +95,8 @@ Changelog - Add support for whitening and multiple channel types in :func:`mne.beamformer.make_dics` by `Marijn van Vliet`_ +- Add better error message when trying to save incompatible `~mne.Evoked` objects to the same file by `Eric Larson`_ + - Add support for loading complex numbers from mat files by `Thomas Hartmann`_ - Add generic reader function :func:`mne.io.read_raw` that loads files based on their extensions (it wraps the underlying specific ``read_raw_xxx`` functions) by `Clemens Brunner`_ @@ -116,6 +124,10 @@ Bug - Fix bug with :func:`mne.compute_source_morph` when more than one volume source space was present (e.g., when using labels) where only the first label would be interpolated when ``mri_resolution=True`` by `Eric Larson`_ +- Fix bug with :func:`mne.compute_source_morph` when morphing to a volume source space when ``src_to`` is used and the destination subject is not ``fsaverage`` by `Eric Larson`_ + +- Fix bug with :func:`mne.compute_source_morph` where outermost voxels in the destination source space could be errantly omitted by `Eric Larson`_ + - Fix bug with :func:`mne.minimum_norm.compute_source_psd_epochs` and :func:`mne.minimum_norm.source_band_induced_power` raised errors when ``method='eLORETA'`` by `Eric Larson`_ - Fix bug with :func:`mne.minimum_norm.apply_inverse` where the explained variance did not work for complex data by `Eric Larson`_ @@ -234,6 +246,8 @@ API - The method ``stc_mixed.plot_surface`` for a :class:`mne.MixedSourceEstimate` has been deprecated in favor of :meth:`stc.surface().plot(...) ` by `Eric Larson`_ +- The method ``stc.normal`` for :class:`mne.VectorSourceEstimate` has been deprecated in favor of :meth:`stc.project('nn', src) ` by `Eric Larson`_ + - Add ``use_dev_head_trans`` parameter to :func:`mne.preprocessing.annotate_movement` to allow choosing the device to head transform is used to define the fixed cHPI coordinates by `Luke Bloy`_ - The function ``mne.channels.read_dig_captrack`` will be deprecated in version 0.22 in favor of :func:`mne.channels.read_dig_captrak` to correct the spelling error: "captraCK" -> "captraK", by `Stefan Appelhoff`_ diff --git a/examples/inverse/plot_morph_volume_stc.py b/examples/inverse/plot_morph_volume_stc.py index a6935de3fc9..4b7581ab007 100644 --- a/examples/inverse/plot_morph_volume_stc.py +++ b/examples/inverse/plot_morph_volume_stc.py @@ -9,7 +9,8 @@ :class:`mne.VolSourceEstimate` to a common reference space. We achieve this using :class:`mne.SourceMorph`. Pre-computed data will be morphed based on an affine transformation and a nonlinear registration method -known as Symmetric Diffeomorphic Registration (SDR) by Avants et al. [1]_. +known as Symmetric Diffeomorphic Registration (SDR) by +:footcite:`AvantsEtAl2008`. Transformation is estimated from the subject's anatomical T1 weighted MRI (brain) to `FreeSurfer's 'fsaverage' T1 weighted MRI (brain) @@ -18,13 +19,6 @@ Afterwards the transformation will be applied to the volumetric source estimate. The result will be plotted, showing the fsaverage T1 weighted anatomical MRI, overlaid with the morphed volumetric source estimate. - -References ----------- -.. [1] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. """ # Author: Tommy Clausner # @@ -153,3 +147,6 @@ # # >>> morph.apply(stc) # +# References +# ---------- +# .. footbibliography:: diff --git a/examples/inverse/plot_vector_mne_solution.py b/examples/inverse/plot_vector_mne_solution.py index 8ed50bf87ae..90d5d851151 100644 --- a/examples/inverse/plot_vector_mne_solution.py +++ b/examples/inverse/plot_vector_mne_solution.py @@ -21,6 +21,7 @@ # # License: BSD (3-clause) +import numpy as np import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse @@ -52,6 +53,21 @@ brain = stc.plot( initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir) +############################################################################### +# Plot the activation in the direction of maximal power for this data: + +stc_max, directions = stc.project('pca', src=inv['src']) +# These directions must by design be close to the normals because this +# inverse was computed with loose=0.2: +print('Absolute cosine similarity between source normals and directions: ' + f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}') +brain_max = stc_max.plot( + initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, + time_label='Max power') +brain_normal = stc.project('normal', inv['src'])[0].plot( + initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, + time_label='Normal') + ############################################################################### # You can also do this with a fixed-orientation inverse. It looks a lot like # the result above because the ``loose=0.2`` orientation constraint keeps diff --git a/examples/simulation/plot_simulate_evoked_data.py b/examples/simulation/plot_simulate_evoked_data.py index 76065d21e36..a205c360968 100644 --- a/examples/simulation/plot_simulate_evoked_data.py +++ b/examples/simulation/plot_simulate_evoked_data.py @@ -46,7 +46,7 @@ ############################################################################### # Generate source time courses from 2 dipoles and the correspond evoked data -times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1 +times = np.arange(300, dtype=np.float64) / raw.info['sfreq'] - 0.1 rng = np.random.RandomState(42) diff --git a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py b/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py index 08a61b45785..dbc8e5954e3 100644 --- a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py +++ b/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py @@ -143,7 +143,7 @@ def data_fun(times, latency, duration): # event, the second is not used. The third one is the event id, which is # different for each of the 4 areas. -times = np.arange(150, dtype=np.float) / info['sfreq'] +times = np.arange(150, dtype=np.float64) / info['sfreq'] duration = 0.03 rng = np.random.RandomState(7) source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep) diff --git a/examples/time_frequency/plot_time_frequency_simulated.py b/examples/time_frequency/plot_time_frequency_simulated.py index da22307274e..b93a2f64481 100644 --- a/examples/time_frequency/plot_time_frequency_simulated.py +++ b/examples/time_frequency/plot_time_frequency_simulated.py @@ -43,7 +43,7 @@ noise = rng.randn(n_epochs, len(ch_names), n_times) # Add a 50 Hz sinusoidal burst to the noise and ramp it. -t = np.arange(n_times, dtype=np.float) / sfreq +t = np.arange(n_times, dtype=np.float64) / sfreq signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index f92d8a9d180..a0cf09800d4 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -284,7 +284,7 @@ def test_make_dics(tmpdir, _load_forward, idx, whiten): assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) for key in ['tmin', 'tmax']: # deal with strictness of object_diff - setattr(filters['csd'], key, np.float(getattr(filters['csd'], key))) + setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) assert object_diff(filters, filters_read) == '' diff --git a/mne/bem.py b/mne/bem.py index 6d6219b00ae..5af9e18e15a 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -1341,7 +1341,7 @@ def _read_bem_surface(fid, this, def_coord_frame, s_id=None): if tag is None: raise ValueError('Vertex data not found') - res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug + res['rr'] = tag.data.astype(np.float64) # XXX : double because of mayavi if res['rr'].shape[0] != res['np']: raise ValueError('Vertex information is incorrect') diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 8d52102c21c..2b8ae0ab81f 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -373,7 +373,7 @@ def _set_channel_positions(self, pos, names): if len(pos) != len(names): raise ValueError('Number of channel positions not equal to ' 'the number of names given.') - pos = np.asarray(pos, dtype=np.float) + pos = np.asarray(pos, dtype=np.float64) if pos.shape[-1] != 3 or pos.ndim != 2: msg = ('Channel positions must have the shape (n_points, 3) ' 'not %s.' % (pos.shape,)) diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py index 4ce20efc315..87b4f36a200 100644 --- a/mne/channels/interpolation.py +++ b/mne/channels/interpolation.py @@ -136,8 +136,8 @@ def _interpolate_bads_eeg(inst, origin, verbose=None): inst : mne.io.Raw, mne.Epochs or mne.Evoked The data to interpolate. Must be preloaded. """ - bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool) - goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool) + bads_idx = np.zeros(len(inst.ch_names), dtype=bool) + goods_idx = np.zeros(len(inst.ch_names), dtype=bool) picks = pick_types(inst.info, meg=False, eeg=True, exclude=[]) inst.info._check_consistency() diff --git a/mne/channels/layout.py b/mne/channels/layout.py index ed4edb92053..2cfa301d31f 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -126,7 +126,7 @@ def _read_lout(fname): name = chkind + ' ' + nb else: cid, x, y, dx, dy, name = splits - pos.append(np.array([x, y, dx, dy], dtype=np.float)) + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) names.append(name) ids.append(int(cid)) @@ -147,7 +147,7 @@ def _read_lay(fname): name = chkind + ' ' + nb else: cid, x, y, dx, dy, name = splits - pos.append(np.array([x, y, dx, dy], dtype=np.float)) + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) names.append(name) ids.append(int(cid)) diff --git a/mne/chpi.py b/mne/chpi.py index 0e6a4f83806..4a1488702d3 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -557,7 +557,7 @@ def _fit_chpi_amplitudes(raw, time_sl, hpi): # loads hpi_stim channel chpi_data = raw[hpi['hpi_pick'], time_sl][0] - ons = (np.round(chpi_data).astype(np.int) & + ons = (np.round(chpi_data).astype(np.int64) & hpi['on'][:, np.newaxis]).astype(bool) n_on = ons.all(axis=-1).sum(axis=0) if not (n_on >= 3).all(): diff --git a/mne/conftest.py b/mne/conftest.py index 09d970764e2..f8f000034f9 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -88,6 +88,11 @@ def pytest_configure(config): ignore:.*trait.*handler.*deprecated.*:DeprecationWarning ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning + ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.long` is a deprecated alias:DeprecationWarning ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning ignore:.*sphinx\.util\.smartypants is deprecated.*: ignore:.*pandas\.util\.testing is deprecated.*: diff --git a/mne/connectivity/effective.py b/mne/connectivity/effective.py index 9d42ee710e2..3de4864d14a 100644 --- a/mne/connectivity/effective.py +++ b/mne/connectivity/effective.py @@ -133,7 +133,7 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi, # allocate space for output out_shape = list(cohy.shape) out_shape[freq_dim] = n_bands - psi = np.zeros(out_shape, dtype=np.float) + psi = np.zeros(out_shape, dtype=np.float64) # allocate accumulator acc_shape = copy.copy(out_shape) diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py index 94c796de06f..2aa99f423ee 100644 --- a/mne/connectivity/spectral.py +++ b/mne/connectivity/spectral.py @@ -980,7 +980,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, raise ValueError('define frequencies of interest using ' 'cwt_freqs') else: - cwt_freqs = cwt_freqs.astype(np.float) + cwt_freqs = cwt_freqs.astype(np.float64) if any(cwt_freqs > (sfreq / 2.)): raise ValueError('entries in cwt_freqs cannot be ' 'larger than Nyquist (sfreq / 2)') @@ -1003,7 +1003,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, 5. / np.min(fmin), five_cycle_freq)) # create a frequency mask for all bands - freq_mask = np.zeros(len(freqs_all), dtype=np.bool) + freq_mask = np.zeros(len(freqs_all), dtype=bool) for f_lower, f_upper in zip(fmin, fmax): freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper)) diff --git a/mne/cov.py b/mne/cov.py index 78fb5d250ec..23ce5a22069 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -846,8 +846,8 @@ def _unpack_epochs(epochs): # prepare mean covs n_epoch_types = len(epochs) data_mean = [0] * n_epoch_types - n_samples = np.zeros(n_epoch_types, dtype=np.int) - n_epochs = np.zeros(n_epoch_types, dtype=np.int) + n_samples = np.zeros(n_epoch_types, dtype=np.int64) + n_epochs = np.zeros(n_epoch_types, dtype=np.int64) for ii, epochs_t in enumerate(epochs): @@ -1759,7 +1759,10 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, nzero = (eig > 0) eig[~nzero] = 0. # get rid of numerical noise (negative) ones - dtype = np.complex if noise_cov['eigvec'].dtype == np.complex else np.float + if noise_cov['eigvec'].dtype.kind == 'c': + dtype = np.complex128 + else: + dtype = np.float64 W = np.zeros((n_chan, 1), dtype) W[nzero, 0] = 1.0 / np.sqrt(eig[nzero]) # Rows of eigvec are the eigenvectors @@ -1966,7 +1969,7 @@ def _write_cov(fid, cov): else: # Store only lower part of covariance matrix dim = cov['dim'] - mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0 + mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0 vals = cov['data'][mask].ravel() write_double(fid, FIFF.FIFF_MNE_COV, vals) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index cfe5f58dd28..ebdb25300cb 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -239,7 +239,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.92', misc='0.6') + releases = dict(testing='0.93', misc='0.6') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload @@ -326,7 +326,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='ea825966c0a1e9b2f84e3826c5500161', spm='9f43f67150e3b694b523a21eb929ea75', - testing='42daafd1b882da2ef041de860ca6e771', + testing='2eb998a0893a28faedd583973c70b517', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/decoding/base.py b/mne/decoding/base.py index 934f45d5af5..a7a9c64b9c6 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -224,7 +224,7 @@ def _set_cv(cv, estimator=None, X=None, y=None): # Setup CV from sklearn import model_selection as models from sklearn.model_selection import (check_cv, StratifiedKFold, KFold) - if isinstance(cv, (int, np.int)): + if isinstance(cv, (int, np.int64)): XFold = StratifiedKFold if est_is_classifier else KFold cv = XFold(n_splits=cv) elif isinstance(cv, str): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index 859a5e6f907..418eeed69ec 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -108,7 +108,7 @@ def test_time_delay(): _delay_time_series(X, tmin, tmax, sfreq=[1]) # Delays must be int/float with pytest.raises(TypeError, match='.*complex.*'): - _delay_time_series(X, np.complex(tmin), tmax, 1) + _delay_time_series(X, np.complex128(tmin), tmax, 1) # Make sure swapaxes works start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1 n_delays = stop - start diff --git a/mne/dipole.py b/mne/dipole.py index 3bf69519b29..6361ed79472 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -1306,7 +1306,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False) # nzero = (cov['eig'] > 0) # n_chan = len(info_nb['ch_names']) - # whitener = np.zeros((n_chan, n_chan), dtype=np.float) + # whitener = np.zeros((n_chan, n_chan), dtype=np.float64) # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero]) # whitener = np.dot(whitener, cov['eigvec']) diff --git a/mne/epochs.py b/mne/epochs.py index 96a9fc5b2b3..535a1a43ba1 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -904,7 +904,7 @@ def subtract_evoked(self, evoked=None): else: if self._offset is None: self._offset = np.zeros((len(self.ch_names), len(self.times)), - dtype=np.float) + dtype=np.float64) self._offset[ep_picks] -= evoked.data[picks] logger.info('[done]') @@ -2900,35 +2900,37 @@ def add_channels_epochs(epochs_list, verbose=None): return epochs -def _compare_epochs_infos(info1, info2, ind): +def _compare_epochs_infos(info1, info2, name): """Compare infos.""" + if not isinstance(name, str): # passed epochs index + name = f'epochs[{name:d}]' info1._check_consistency() info2._check_consistency() if info1['nchan'] != info2['nchan']: - raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind) + raise ValueError(f'{name}.info[\'nchan\'] must match') if info1['bads'] != info2['bads']: - raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind) + raise ValueError(f'{name}.info[\'bads\'] must match') if info1['sfreq'] != info2['sfreq']: - raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind) + raise ValueError(f'{name}.info[\'sfreq\'] must match') if set(info1['ch_names']) != set(info2['ch_names']): - raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind) + raise ValueError(f'{name}.info[\'ch_names\'] must match') if len(info2['projs']) != len(info1['projs']): - raise ValueError('SSP projectors in epochs files must be the same') + raise ValueError(f'SSP projectors in {name} must be the same') if any(not _proj_equal(p1, p2) for p1, p2 in zip(info2['projs'], info1['projs'])): - raise ValueError('SSP projectors in epochs files must be the same') + raise ValueError(f'SSP projectors in {name} must be the same') if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \ (info1['dev_head_t'] is not None and not np.allclose(info1['dev_head_t']['trans'], info2['dev_head_t']['trans'], rtol=1e-6)): - raise ValueError('epochs[%d][\'info\'][\'dev_head_t\'] must match. ' - 'The epochs probably come from different runs, and ' + raise ValueError(f'{name}.info[\'dev_head_t\'] must match. The ' + 'instances probably come from different runs, and ' 'are therefore associated with different head ' 'positions. Manually change info[\'dev_head_t\'] to ' 'avoid this message but beware that this means the ' 'MEG sensors will not be properly spatially aligned. ' 'See mne.preprocessing.maxwell_filter to realign the ' - 'runs to a common head position.' % ind) + 'runs to a common head position.') def _concatenate_epochs(epochs_list, with_data=True, add_offset=True): diff --git a/mne/event.py b/mne/event.py index 35c9007e61b..8a76d92e704 100644 --- a/mne/event.py +++ b/mne/event.py @@ -49,7 +49,7 @@ def pick_events(events, include=None, exclude=None, step=False): if include is not None: if not isinstance(include, list): include = [include] - mask = np.zeros(len(events), dtype=np.bool) + mask = np.zeros(len(events), dtype=bool) for e in include: mask = np.logical_or(mask, events[:, 2] == e) if step: @@ -58,7 +58,7 @@ def pick_events(events, include=None, exclude=None, step=False): elif exclude is not None: if not isinstance(exclude, list): exclude = [exclude] - mask = np.ones(len(events), dtype=np.bool) + mask = np.ones(len(events), dtype=bool) for e in exclude: mask = np.logical_and(mask, events[:, 2] != e) if step: @@ -432,7 +432,7 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, if np.any(data < 0): warn('Trigger channel contains negative values, using absolute value.') data = np.abs(data) # make sure trig channel is positive - data = data.astype(np.int) + data = data.astype(np.int64) return _find_stim_steps(data, raw.first_samp, pad_start=pad_start, pad_stop=pad_stop, merge=merge) @@ -452,9 +452,9 @@ def _find_events(data, first_samp, verbose=None, output='onset', else: merge = 0 - data = data.astype(np.int) + data = data.astype(np.int64) if uint_cast: - data = data.astype(np.uint16).astype(np.int) + data = data.astype(np.uint16).astype(np.int64) if data.min() < 0: warn('Trigger channel contains negative values, using absolute ' 'value. If data were acquired on a Neuromag system with ' diff --git a/mne/evoked.py b/mne/evoked.py index 24ac8ea82e6..e5e0618d536 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -746,7 +746,7 @@ def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', self.first = int(round(tmin * info['sfreq'])) self.last = self.first + np.shape(data)[-1] - 1 self.times = np.arange(self.first, self.last + 1, - dtype=np.float) / info['sfreq'] + dtype=np.float64) / info['sfreq'] self.info = info.copy() # do not modify original info self.nave = nave self.kind = kind @@ -1111,7 +1111,7 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): else: # Put the old style epochs together data = np.concatenate([e.data[None, :] for e in epoch], axis=0) - data = data.astype(np.float) + data = data.astype(np.float64) if first_time is not None and nsamp is not None: times = first_time + np.arange(nsamp) / info['sfreq'] @@ -1163,6 +1163,7 @@ def write_evokeds(fname, evoked): def _write_evokeds(fname, evoked, check=True): """Write evoked data.""" + from .epochs import _compare_epochs_infos if check: check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', '_ave.fif', '_ave.fif.gz')) @@ -1184,7 +1185,9 @@ def _write_evokeds(fname, evoked, check=True): # One or more evoked data sets start_block(fid, FIFF.FIFFB_PROCESSED_DATA) - for e in evoked: + for ei, e in enumerate(evoked): + if ei: + _compare_epochs_infos(evoked[0].info, e.info, f'evoked[{ei}]') start_block(fid, FIFF.FIFFB_EVOKED) # Comment is optional @@ -1277,7 +1280,7 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): raise ValueError('The tmin must be smaller or equal to tmax') time_win = (times >= tmin) & (times <= tmax) - mask = np.ones_like(data).astype(np.bool) + mask = np.ones_like(data).astype(bool) mask[:, time_win] = False maxfun = np.argmax diff --git a/mne/filter.py b/mne/filter.py index 34eae5468d4..d1ef0cdef50 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -179,7 +179,7 @@ def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, # cost function based on number of multiplications N = 2 ** np.arange(np.ceil(np.log2(min_fft)), np.ceil(np.log2(max_fft)) + 1, dtype=int) - cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float)) * + cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) * N * (np.log2(N) + 1)) # add a heuristic term to prevent too-long FFT's which are slow @@ -2005,7 +2005,7 @@ def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1, lowpass = self.info.get('lowpass') lowpass = np.inf if lowpass is None else lowpass self.info['lowpass'] = min(lowpass, sfreq / 2.) - new_times = (np.arange(self._data.shape[-1], dtype=np.float) / + new_times = (np.arange(self._data.shape[-1], dtype=np.float64) / sfreq + self.times[0]) # adjust indirectly affected variables if isinstance(self, BaseEpochs): diff --git a/mne/fixes.py b/mne/fixes.py index 64f43c43ce1..f90167344d3 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -93,14 +93,14 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): nvert = _fread3(fobj) nquad = _fread3(fobj) (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.) - coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div + coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div coords = coords.reshape(-1, 3) quads = _fread3_many(fobj, nquad * 4) quads = quads.reshape(nquad, 4) # # Face splitting follows # - faces = np.zeros((2 * nquad, 3), dtype=np.int) + faces = np.zeros((2 * nquad, 3), dtype=np.int64) nface = 0 for quad in quads: if (quad[0] % 2) == 0: @@ -127,7 +127,7 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): else: raise ValueError("File does not appear to be a Freesurfer surface") - coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits + coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits ret = (coords, faces) if read_metadata: @@ -1138,3 +1138,16 @@ def _np_apply_along_axis(func1d, axis, arr): @jit() def mean(array, axis): return _np_apply_along_axis(np.mean, axis, array) + + +############################################################################### +# Added in Python 3.7 (remove when we drop support for 3.6) + +try: + from contextlib import nullcontext +except ImportError: + from contextlib import contextmanager + + @contextmanager + def nullcontext(enter_result=None): + yield enter_result diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py index 7b2ca9b6bc5..10734a019a3 100644 --- a/mne/forward/_make_forward.py +++ b/mne/forward/_make_forward.py @@ -690,7 +690,7 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=1, verbose=None): # Check for omissions due to proximity to inner skull in # make_forward_solution, which will result in an exception if fwd['src'][0]['nuse'] != len(pos): - inuse = fwd['src'][0]['inuse'].astype(np.bool) + inuse = fwd['src'][0]['inuse'].astype(bool) head = ('The following dipoles are outside the inner skull boundary') msg = len(head) * '#' + '\n' + head + '\n' for (t, pos) in zip(times[np.logical_not(inuse)], diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 8bf1c04a946..ac80fd7f774 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -175,12 +175,12 @@ def _block_diag(A, n): if na % n > 0: raise ValueError('Width of matrix must be a multiple of n') - tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma) + tmp = np.arange(ma * bdn, dtype=np.int64).reshape(bdn, ma) tmp = np.tile(tmp, (1, n)) ii = tmp.ravel() - jj = np.arange(na, dtype=np.int)[None, :] - jj = jj * np.ones(ma, dtype=np.int)[:, None] + jj = np.arange(na, dtype=np.int64)[None, :] + jj = jj * np.ones(ma, dtype=np.int64)[:, None] jj = jj.T.ravel() # column indices foreach sparse bd bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc() @@ -994,7 +994,7 @@ def compute_orient_prior(forward, loose=0.2, verbose=None): if not (0 <= loose <= 1): raise ValueError('loose value should be between 0 and 1, ' 'got %s.' % (loose,)) - orient_prior = np.ones(n_sources, dtype=np.float) + orient_prior = np.ones(n_sources, dtype=np.float64) if loose > 0.: if is_fixed_ori: raise ValueError('loose must be 0. with forward operator ' diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 2457166b8c6..12457a485e0 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -51,7 +51,7 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, M = M.copy() if gammas is None: - gammas = np.ones(G.shape[1], dtype=np.float) + gammas = np.ones(G.shape[1], dtype=np.float64) eps = np.finfo(float).eps @@ -132,7 +132,7 @@ def denom_fun(x): gammas = np.repeat(gammas_comb / group_size, group_size) # compute convergence criterion - gammas_full = np.zeros(n_sources, dtype=np.float) + gammas_full = np.zeros(n_sources, dtype=np.float64) gammas_full[active_set] = gammas err = (np.sum(np.abs(gammas_full - gammas_full_old)) / diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index f6e01d876c1..b3cebaa8073 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -403,7 +403,7 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, M_estimated = np.dot(gain[:, active_set], X) if mask is not None: - active_set_tmp = np.zeros(len(mask), dtype=np.bool) + active_set_tmp = np.zeros(len(mask), dtype=bool) active_set_tmp[mask] = active_set active_set = active_set_tmp del active_set_tmp @@ -662,7 +662,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, M_estimated = np.dot(gain[:, active_set], X) if mask is not None: - active_set_tmp = np.zeros(len(mask), dtype=np.bool) + active_set_tmp = np.zeros(len(mask), dtype=bool) active_set_tmp[mask] = active_set active_set = active_set_tmp del active_set_tmp diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index b06e598db49..64fffb1bdb5 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -67,7 +67,7 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False): Examples -------- - >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1)) + >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float64), (2, 1)) >>> Y = np.r_[Y, np.zeros_like(Y)] >>> print(Y) # doctest:+SKIP [[ 0. 4. 3. 0. 0.] @@ -82,7 +82,7 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False): [ True True False False] """ if len(Y) == 0: - return np.zeros_like(Y), np.zeros((0,), dtype=np.bool) + return np.zeros_like(Y), np.zeros((0,), dtype=bool) if shape is not None: shape_init = Y.shape Y = Y.reshape(*shape) @@ -141,7 +141,7 @@ def prox_l1(Y, alpha, n_orient): Examples -------- - >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1)) + >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float64), (2, 1)) >>> Y = np.r_[Y, np.zeros_like(Y)] >>> print(Y) # doctest:+SKIP [[ 1. 2. 3. 2. 0.] @@ -252,7 +252,7 @@ def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200, Y = np.zeros((n_sources, n_times)) # FISTA aux variable E = [] # track primal objective function highest_d_obj = - np.inf - active_set = np.ones(n_sources, dtype=np.bool) # start with full AS + active_set = np.ones(n_sources, dtype=bool) # start with full AS for i in range(maxit): X0, active_set_0 = X, active_set # store previous values @@ -332,7 +332,7 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, E = [] # track primal objective function highest_d_obj = - np.inf - active_set = np.zeros(n_sources, dtype=np.bool) # start with full AS + active_set = np.zeros(n_sources, dtype=bool) # start with full AS alpha_lc = alpha / lipschitz_constant @@ -544,7 +544,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, E = list() highest_d_obj = - np.inf X_init = None - active_set = np.zeros(n_dipoles, dtype=np.bool) + active_set = np.zeros(n_dipoles, dtype=bool) idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient)) new_active_idx = idx_large_corr[-active_set_size:] if n_orient > 1: @@ -673,7 +673,7 @@ def gprime(w): E = list() - active_set = np.ones(G.shape[1], dtype=np.bool) + active_set = np.ones(G.shape[1], dtype=bool) weights = np.ones(G.shape[1]) X = np.zeros((G.shape[1], M.shape[1])) @@ -740,7 +740,7 @@ def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None): """ n_times = M.shape[1] n_points = G.shape[1] - iv = np.ones((n_points, n_times), dtype=np.float) + iv = np.ones((n_points, n_times), dtype=np.float64) v = phi(iv) L = 1e100 for it in range(100): @@ -1251,7 +1251,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, n_positions = n_sources // n_orient Z = dict.fromkeys(np.arange(n_positions), 0.0) - active_set = np.zeros(n_sources, dtype=np.bool) + active_set = np.zeros(n_sources, dtype=bool) active = [] if Z_init is not None: if Z_init.shape != (n_sources, phi.n_coefs.sum()): @@ -1295,7 +1295,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_( M, G[:, active_set], Z_init, - np.ones(len(active) * n_orient, dtype=np.bool), + np.ones(len(active) * n_orient, dtype=bool), candidates_, alpha_space, alpha_time, lipschitz_constant[active_set[::n_orient]], phi, phiT, w_space=w_space_as, w_time=w_time_as, @@ -1322,7 +1322,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) X = phiT(Z) else: - Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex) + Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128) X = np.zeros((0, n_times)) return X, Z, active_set, E, gap @@ -1547,8 +1547,8 @@ def g_time_prime_inv(Z): E = list() - active_set = np.ones(n_sources, dtype=np.bool) - Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex) + active_set = np.ones(n_sources, dtype=bool) + Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128) for k in range(n_tfmxne_iter): active_set_0 = active_set.copy() diff --git a/mne/inverse_sparse/tests/test_mxne_debiasing.py b/mne/inverse_sparse/tests/test_mxne_debiasing.py index fe0d684fd93..e1bf466aaef 100755 --- a/mne/inverse_sparse/tests/test_mxne_debiasing.py +++ b/mne/inverse_sparse/tests/test_mxne_debiasing.py @@ -14,7 +14,7 @@ def test_compute_debiasing(): rng = np.random.RandomState(42) G = rng.randn(10, 4) X = rng.randn(4, 20) - debias_true = np.arange(1, 5, dtype=np.float) + debias_true = np.arange(1, 5, dtype=np.float64) M = np.dot(G, X * debias_true[:, np.newaxis]) debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7) assert_almost_equal(debias, debias_true, decimal=5) diff --git a/mne/io/artemis123/utils.py b/mne/io/artemis123/utils.py index 5171c8f82e2..de7e98c9113 100644 --- a/mne/io/artemis123/utils.py +++ b/mne/io/artemis123/utils.py @@ -20,7 +20,7 @@ def _load_mne_locs(fname=None): with open(fname, 'r') as fid: for line in fid: vals = line.strip().split(',') - locs[vals[0]] = np.array(vals[1::], np.float) + locs[vals[0]] = np.array(vals[1::], np.float64) return locs @@ -56,9 +56,9 @@ def _load_tristan_coil_locs(coil_loc_path): channel_info[vals[0]] = dict() if vals[6]: channel_info[vals[0]]['inner_coil'] = \ - np.array(vals[2:5], np.float) + np.array(vals[2:5], np.float64) channel_info[vals[0]]['outer_coil'] = \ - np.array(vals[5:8], np.float) + np.array(vals[5:8], np.float64) else: # nothing supplied channel_info[vals[0]]['inner_coil'] = np.zeros(3) channel_info[vals[0]]['outer_coil'] = np.zeros(3) diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py index 99e6b051dd9..3d79b4a2060 100644 --- a/mne/io/brainvision/brainvision.py +++ b/mne/io/brainvision/brainvision.py @@ -647,7 +647,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # highpass relaxed / no filters highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') else np.Inf for filt in highpass] - info['highpass'] = np.max(np.array(highpass, dtype=np.float)) + info['highpass'] = np.max(np.array(highpass, dtype=np.float64)) # Coveniently enough 1 / np.Inf = 0.0, so this works for # DC / no highpass filter # filter time constant t [secs] to Hz conversion: 1/2*pi*t @@ -662,7 +662,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): else: highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') else 0.0 for filt in highpass] - info['highpass'] = np.min(np.array(highpass, dtype=np.float)) + info['highpass'] = np.min(np.array(highpass, dtype=np.float64)) if info['highpass'] == 0.0 and len(set(highpass)) == 1: # not actually heterogeneous in effect # ... just heterogeneously disabled @@ -691,7 +691,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # infinitely relaxed / no filters lowpass = [float(filt) if filt not in ('NaN', 'Off') else 0.0 for filt in lowpass] - info['lowpass'] = np.min(np.array(lowpass, dtype=np.float)) + info['lowpass'] = np.min(np.array(lowpass, dtype=np.float64)) try: # filter time constant t [secs] to Hz conversion: 1/2*pi*t info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) @@ -713,7 +713,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # infinitely relaxed / no filters lowpass = [float(filt) if filt not in ('NaN', 'Off') else np.Inf for filt in lowpass] - info['lowpass'] = np.max(np.array(lowpass, dtype=np.float)) + info['lowpass'] = np.max(np.array(lowpass, dtype=np.float64)) if np.isinf(info['lowpass']): # No lowpass actually set for the weakest setting diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py index 710ece56917..210ff827992 100644 --- a/mne/io/bti/read.py +++ b/mne/io/bti/read.py @@ -34,7 +34,7 @@ def read_char(fid, count=1): def read_bool(fid): """Read bool value from bti file.""" - return _unpack_simple(fid, '>?', np.bool) + return _unpack_simple(fid, '>?', bool) def read_uint8(fid): diff --git a/mne/io/ctf/info.py b/mne/io/ctf/info.py index b98ad0c2923..9700267eb0e 100644 --- a/mne/io/ctf/info.py +++ b/mne/io/ctf/info.py @@ -479,8 +479,8 @@ def _annotate_bad_segments(directory, start_time, meas_date): for f in fid.readlines(): tmp = f.strip().split() desc.append('bad_%s' % tmp[0]) - onsets.append(np.float(tmp[1]) - start_time) - durations.append(np.float(tmp[2]) - np.float(tmp[1])) + onsets.append(np.float64(tmp[1]) - start_time) + durations.append(np.float64(tmp[2]) - np.float64(tmp[1])) # return None if there are no bad segments if len(onsets) == 0: return None diff --git a/mne/io/ctf_comp.py b/mne/io/ctf_comp.py index 5c420c81b7f..3be634e31f6 100644 --- a/mne/io/ctf_comp.py +++ b/mne/io/ctf_comp.py @@ -109,8 +109,8 @@ def read_ctf_comp(fid, node, chs, verbose=None): # Calibrate... _calibrate_comp(one, chs, mat['row_names'], mat['col_names']) else: - one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float) - one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float) + one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float64) + one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float64) compdata.append(one) diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 61c36797fa5..5c59c14e1b5 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -164,7 +164,7 @@ def read_stim_ch(self, buffer_size=1e5): pick = pick_types(self.info, meg=False, ref_meg=False, stim=True, exclude=[]) - stim_ch = np.empty((1, stop), dtype=np.int) + stim_ch = np.empty((1, stop), dtype=np.int64) for b_start in range(start, stop, buffer_size): b_stop = b_start + buffer_size x = self[pick, b_start:b_stop][0] @@ -686,7 +686,7 @@ def get_kit_info(rawfile, allow_unknown_format): z = cos(theta) vec_z = np.array([x, y, z]) vec_z /= linalg.norm(vec_z) - vec_x = np.zeros(vec_z.size, dtype=np.float) + vec_x = np.zeros(vec_z.size, dtype=np.float64) if vec_z[1] < vec_z[2]: if vec_z[0] < vec_z[1]: vec_x[0] = 1.0 diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index 133c3c2c321..0991d45c89f 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -1994,7 +1994,7 @@ def create_info(ch_names, sfreq, ch_types='misc', verbose=None): nchan = len(ch_names) if isinstance(ch_types, str): ch_types = [ch_types] * nchan - ch_types = np.atleast_1d(np.array(ch_types, np.str)) + ch_types = np.atleast_1d(np.array(ch_types, np.str_)) if ch_types.ndim != 1 or len(ch_types) != nchan: raise ValueError('ch_types and ch_names must be the same length ' '(%s != %s) for ch_types=%s' diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 4cd6c02cb3d..3c43fab3884 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -22,6 +22,8 @@ def read_raw_nirx(fname, preload=False, verbose=None): """Reader for a NIRX fNIRS recording. + This function has only been tested with NIRScout devices. + Parameters ---------- fname : str @@ -70,6 +72,9 @@ def __init__(self, fname, preload=False, verbose=None): if fname.endswith('.hdr'): fname = op.dirname(op.abspath(fname)) + if not op.isdir(fname): + raise RuntimeError('The path you specified does not exist.') + # Check if required files exist and store names for later use files = dict() keys = ('evt', 'hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', @@ -92,35 +97,6 @@ def __init__(self, fname, preload=False, verbose=None): for line in fid: last_sample += 1 - # Read participant information file - inf = ConfigParser(allow_no_value=True) - inf.read(files['inf']) - inf = inf._sections['Subject Demographics'] - - # Store subject information from inf file in mne format - # Note: NIRX also records "Study Type", "Experiment History", - # "Additional Notes", "Contact Information" and this information - # is currently discarded - subject_info = {} - names = inf['name'].split() - if len(names) > 0: - subject_info['first_name'] = \ - inf['name'].split()[0].replace("\"", "") - if len(names) > 1: - subject_info['last_name'] = \ - inf['name'].split()[-1].replace("\"", "") - if len(names) > 2: - subject_info['middle_name'] = \ - inf['name'].split()[-2].replace("\"", "") - # subject_info['birthday'] = inf['age'] # TODO: not formatted properly - subject_info['sex'] = inf['gender'].replace("\"", "") - # Recode values - if subject_info['sex'] in {'M', 'Male', '1'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE - elif subject_info['sex'] in {'F', 'Female', '2'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE - # NIRStar does not record an id, or handedness by default - # Read header file # The header file isn't compliant with the configparser. So all the # text between comments must be removed before passing to parser @@ -135,6 +111,10 @@ def __init__(self, fname, preload=False, verbose=None): ["\"15.0\"", "\"15.2\""]): raise RuntimeError('MNE does not support this NIRStar version' ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) + if "NIRScout" not in hdr['GeneralInfo']['Device']: + warn("Only import of data from NIRScout devices have been " + "thoroughly tested. You are using a %s device. " % + hdr['GeneralInfo']['Device']) # Parse required header fields @@ -161,6 +141,35 @@ def __init__(self, fname, preload=False, verbose=None): # Extract sampling rate samplingrate = float(hdr['ImagingParameters']['SamplingRate']) + # Read participant information file + inf = ConfigParser(allow_no_value=True) + inf.read(files['inf']) + inf = inf._sections['Subject Demographics'] + + # Store subject information from inf file in mne format + # Note: NIRX also records "Study Type", "Experiment History", + # "Additional Notes", "Contact Information" and this information + # is currently discarded + subject_info = {} + names = inf['name'].split() + if len(names) > 0: + subject_info['first_name'] = \ + inf['name'].split()[0].replace("\"", "") + if len(names) > 1: + subject_info['last_name'] = \ + inf['name'].split()[-1].replace("\"", "") + if len(names) > 2: + subject_info['middle_name'] = \ + inf['name'].split()[-2].replace("\"", "") + # subject_info['birthday'] = inf['age'] # TODO: not formatted properly + subject_info['sex'] = inf['gender'].replace("\"", "") + # Recode values + if subject_info['sex'] in {'M', 'Male', '1'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE + elif subject_info['sex'] in {'F', 'Female', '2'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE + # NIRStar does not record an id, or handedness by default + # Read information about probe/montage/optodes # A word on terminology used here: # Sources produce light diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index 67cc33b9029..aaa2de4874f 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -37,6 +37,13 @@ def test_nirx_hdr_load(): assert raw.info['sfreq'] == 12.5 +@requires_testing_data +def test_nirx_missing_warn(): + """Test reading NIRX files when missing data.""" + with pytest.raises(RuntimeError, match='The path you'): + read_raw_nirx(fname_nirx_15_2_short + "1", preload=True) + + @requires_testing_data def test_nirx_dat_warn(tmpdir): """Test reading NIRX files when missing data.""" diff --git a/mne/io/pick.py b/mne/io/pick.py index cae6b044ef2..05b95f63b9e 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -383,7 +383,7 @@ def pick_types(info, meg=None, eeg=False, stim=False, eog=False, ecg=False, exclude = _check_info_exclude(info, exclude) nchan = info['nchan'] - pick = np.zeros(nchan, dtype=np.bool) + pick = np.zeros(nchan, dtype=bool) _check_meg_type(ref_meg, allow_auto=True) _check_meg_type(meg) diff --git a/mne/io/tests/test_compensator.py b/mne/io/tests/test_compensator.py index 3401630b1ac..a06debfec49 100644 --- a/mne/io/tests/test_compensator.py +++ b/mne/io/tests/test_compensator.py @@ -67,7 +67,7 @@ def make_evoked(fname, comp): if comp is not None: raw.apply_gradient_compensation(comp) picks = pick_types(raw.info, meg=True, ref_meg=True) - events = np.array([[0, 0, 1]], dtype=np.int) + events = np.array([[0, 0, 1]], dtype=np.int64) evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks, baseline=None).average() return evoked diff --git a/mne/label.py b/mne/label.py index 6a5f3a375bf..c1adaa827e8 100644 --- a/mne/label.py +++ b/mne/label.py @@ -999,7 +999,7 @@ def write_label(filename, label, verbose=None): with open(filename, 'wb') as fid: n_vertices = len(label.vertices) - data = np.zeros((n_vertices, 5), dtype=np.float) + data = np.zeros((n_vertices, 5), dtype=np.float64) data[:, 0] = label.vertices data[:, 1:4] = 1e3 * label.pos data[:, 4] = label.values @@ -1895,7 +1895,7 @@ def _read_annot(fname): np.fromfile(fid, '>c', length) # discard orig_tab names = list() - ctab = np.zeros((n_entries, 5), np.int) + ctab = np.zeros((n_entries, 5), np.int64) for i in range(n_entries): name_length = np.fromfile(fid, '>i4', 1)[0] name = np.fromfile(fid, "|S%d" % name_length, 1)[0] @@ -1909,7 +1909,7 @@ def _read_annot(fname): if ctab_version != 2: raise Exception('Color table version not supported') n_entries = np.fromfile(fid, '>i4', 1)[0] - ctab = np.zeros((n_entries, 5), np.int) + ctab = np.zeros((n_entries, 5), np.int64) length = np.fromfile(fid, '>i4', 1)[0] np.fromfile(fid, "|S%d" % length, 1) # Orig table path entries_to_read = np.fromfile(fid, '>i4', 1)[0] @@ -2387,7 +2387,7 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False, 'specify subject and subjects_dir parameters.') # Create annot and color table array to write - annot = np.empty(n_vertices, dtype=np.int) + annot = np.empty(n_vertices, dtype=np.int64) annot[:] = -1 # create the annotation ids from the colors annot_id_coding = np.array((1, 2 ** 8, 2 ** 16)) diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index f19d1f9892c..2c7f44f6d17 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -335,15 +335,28 @@ def test_localization_bias_fixed(bias_params_fixed, method, lower, upper, ('eLORETA', 99, 100, 0.8, 0.2), ('eLORETA', 99, 100, 0.8, 0.001), ]) +@pytest.mark.parametrize('pick_ori', (None, 'vector')) def test_localization_bias_loose(bias_params_fixed, method, lower, upper, - depth, loose): + depth, loose, pick_ori): """Test inverse localization bias for loose minimum-norm solvers.""" + if pick_ori == 'vector' and method == 'eLORETA': # works, but save cycles + return evoked, fwd, noise_cov, _, want = bias_params_fixed fwd = convert_forward_solution(fwd, surf_ori=False, force_fixed=False) assert not is_fixed_orient(fwd) inv_loose = make_inverse_operator(evoked.info, fwd, noise_cov, loose=loose, depth=depth) - loc = apply_inverse(evoked, inv_loose, lambda2, method).data + loc = apply_inverse( + evoked, inv_loose, lambda2, method, pick_ori=pick_ori) + if pick_ori is not None: + assert loc.data.ndim == 3 + loc, directions = loc.project('pca', src=fwd['src']) + abs_cos_sim = np.abs(np.sum( + directions * inv_loose['source_nn'][2::3], axis=1)) + assert np.percentile(abs_cos_sim, 10) > 0.9 # most very aligned + loc = abs(loc).data + else: + loc = loc.data assert (loc >= 0).all() # Compute the percentage of sources for which there is no loc bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 @@ -535,7 +548,7 @@ def test_orientation_prior(bias_params_free, method, looses, vmin, vmax, [_get_src_nn(s) for s in inv['src']])) vec_stc_surf = np.matmul(rot, vec_stc.data) if 0. in looses: - vec_stc_normal = vec_stc.normal(inv['src']) + vec_stc_normal, _ = vec_stc.project('normal', inv['src']) assert_allclose(stcs[1].data, vec_stc_normal.data) del vec_stc assert_allclose(vec_stc_normal.data, vec_stc_surf[:, 2]) @@ -1065,8 +1078,9 @@ def test_inverse_mixed(all_src_types_inv_evoked): stcs[kind].magnitude().data) assert_allclose(getattr(stcs['mixed'], kind)().magnitude().data, stcs[kind].magnitude().data) - assert_allclose(stcs['mixed'].surface().normal(surf_src).data, - stcs['surface'].normal(surf_src).data) + with pytest.deprecated_call(): + assert_allclose(stcs['mixed'].surface().normal(surf_src).data, + stcs['surface'].normal(surf_src).data) run_tests_if_main() diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py index ee41ffe11fb..65bc9294674 100644 --- a/mne/minimum_norm/time_frequency.py +++ b/mne/minimum_norm/time_frequency.py @@ -191,9 +191,9 @@ def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, with_power, with_plv, pick_ori, decim, verbose=None): """Aux function for induced power and PLV.""" shape, is_free_ori = _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori) - power = np.zeros(shape, dtype=np.float) # power or raw TFR + power = np.zeros(shape, dtype=np.float64) # power or raw TFR # phase lock - plv = np.zeros(shape, dtype=np.complex) if with_plv else None + plv = np.zeros(shape, dtype=np.complex128) if with_plv else None for epoch in data: epoch = epoch[sel] # keep only selected channels @@ -215,9 +215,9 @@ def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape, with_plv, with_power): """Compute single trial TFRs, either ITC, power or raw TFR.""" - tfr_e = np.zeros(shape, dtype=np.float) # power or raw TFR + tfr_e = np.zeros(shape, dtype=np.float64) # power or raw TFR # phase lock - plv_e = np.zeros(shape, dtype=np.complex) if with_plv else None + plv_e = np.zeros(shape, dtype=np.complex128) if with_plv else None n_sources, _, n_times = shape for f, w in enumerate(Ws): tfr_ = cwt(data, [w], use_fft=use_fft, decim=decim) @@ -225,9 +225,9 @@ def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape, # phase lock and power at freq f if with_plv: - plv_f = np.zeros((n_sources, n_times), dtype=np.complex) + plv_f = np.zeros((n_sources, n_times), dtype=np.complex128) - tfr_f = np.zeros((n_sources, n_times), dtype=np.float) + tfr_f = np.zeros((n_sources, n_times), dtype=np.float64) for k, t in enumerate([np.real(tfr_), np.imag(tfr_)]): sol = np.dot(K, t) diff --git a/mne/morph.py b/mne/morph.py index 5ad9792852d..9dea5f94fc7 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -17,6 +17,7 @@ _BaseVolSourceEstimate, _BaseSourceEstimate, _get_ico_tris) from .source_space import SourceSpaces, _ensure_src from .surface import read_morph_map, mesh_edges, read_surface, _compute_nearest +from .transforms import _angle_between_quats, rot_to_quat from .utils import (logger, verbose, check_version, get_subjects_dir, warn as warn_, fill_doc, _check_option, _validate_type, BunchConst, wrapped_stdout, _check_fname, warn, @@ -33,8 +34,9 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', """Create a SourceMorph from one subject to another. Method is based on spherical morphing by FreeSurfer for surface - cortical estimates [1]_ and Symmetric Diffeomorphic Registration - for volumic data [2]_. + cortical estimates :footcite:`GreveEtAl2013` and + Symmetric Diffeomorphic Registration for volumic data + :footcite:`AvantsEtAl2008`. Parameters ---------- @@ -134,7 +136,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', obtained as described `here `_. For statistical comparisons between hemispheres, use of the symmetric ``fsaverage_sym`` - model is recommended to minimize bias [1]_. + model is recommended to minimize bias :footcite:`GreveEtAl2013`. .. versionadded:: 0.17.0 @@ -143,14 +145,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', References ---------- - .. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M. - R., Fischl B., Brysbaert M. - A Surface-based Analysis of Language Lateralization and Cortical - Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013. - .. [2] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. + .. footbibliography:: """ src_data, kind, src_subject = _get_src_data(src) subject_from = _check_subject_src(subject_from, src_subject) @@ -179,13 +174,13 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', _check_dep(nibabel='2.1.0', dipy='0.10.1') import nibabel as nib - logger.info('volume source space(s) present...') + logger.info('Volume source space(s) present...') # load moving MRI mri_subpath = op.join('mri', 'brain.mgz') mri_path_from = op.join(subjects_dir, subject_from, mri_subpath) - logger.info('loading %s as "from" volume' % mri_path_from) + logger.info(' Loading %s as "from" volume' % mri_path_from) with warnings.catch_warnings(): mri_from = nib.load(mri_path_from) @@ -194,7 +189,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', mri_path_to = op.join(subjects_dir, subject_to, mri_subpath) if not op.isfile(mri_path_to): raise IOError('cannot read file: %s' % mri_path_to) - logger.info('loading %s as "to" volume' % mri_path_to) + logger.info(' Loading %s as "to" volume' % mri_path_to) with warnings.catch_warnings(): mri_to = nib.load(mri_path_to) @@ -206,11 +201,15 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', 'mixed source space') else: surf_offset = 2 if src_to.kind == 'mixed' else 0 - src_data['to_vox_map'] = ( - src_to[-1]['shape'], src_to[-1]['src_mri_t']['trans'] * - np.array([[1e3, 1e3, 1e3, 1]]).T) + # All of our computations are in RAS (like img.affine), so we need + # to get the transformation from RAS to the source space + # subsampling of vox (src), not MRI (FreeSurfer surface RAS) to src + src_ras_t = np.dot(src_to[-1]['mri_ras_t']['trans'], + src_to[-1]['src_mri_t']['trans']) + src_ras_t[:3] *= 1e3 + src_data['to_vox_map'] = (src_to[-1]['shape'], src_ras_t) vertices_to_vol = [s['vertno'] for s in src_to[surf_offset:]] - zooms_src_to = np.diag(src_data['to_vox_map'][1])[:3] + zooms_src_to = np.diag(src_to[-1]['src_mri_t']['trans'])[:3] * 1000 assert (zooms_src_to[0] == zooms_src_to).all() zooms_src_to = tuple(zooms_src_to) @@ -312,7 +311,7 @@ class SourceMorph(object): Number of levels (``len(niter_sdr)``) and number of iterations per level - for each successive stage of iterative refinement - to perform the Symmetric Diffeomorphic Registration (sdr) - transform [2]_. + transform :footcite:`AvantsEtAl2008`. spacing : int | list | None See :func:`mne.compute_source_morph`. smooth : int | str | None @@ -321,7 +320,7 @@ class SourceMorph(object): Morph across hemisphere. morph_mat : scipy.sparse.csr_matrix The sparse surface morphing matrix for spherical surface - based morphing [1]_. + based morphing :footcite:`GreveEtAl2013`. vertices_to : list of ndarray The destination surface vertices. shape : tuple @@ -344,14 +343,7 @@ class SourceMorph(object): References ---------- - .. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M. - R., Fischl B., Brysbaert M. - A Surface-based Analysis of Language Lateralization and Cortical - Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013. - .. [2] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. + .. footbibliography:: """ def __init__(self, subject_from, subject_to, kind, zooms, @@ -476,14 +468,16 @@ def _morph_one_vol(self, one): img_to, self.affine, _get_zooms_orig(self), self.zooms) # morph data - img_to = self.sdr_morph.transform(self.pre_affine.transform(img_to)) + img_to = self.pre_affine.transform(img_to) + if self.sdr_morph is not None: + img_to = self.sdr_morph.transform(img_to) # subselect the correct cube if src_to is provided if self.src_data['to_vox_map'] is not None: # order=0 (nearest) should be fine since it's just subselecting - img_to = _get_img_fdata(resample_from_to( - SpatialImage(img_to, self.affine), - self.src_data['to_vox_map'], order=0)) + img_to = SpatialImage(img_to, self.affine) + img_to = resample_from_to(img_to, self.src_data['to_vox_map'], 1) + img_to = _get_img_fdata(img_to) # reshape to nvoxel x nvol: # in the MNE definition of volume source spaces, @@ -809,7 +803,7 @@ def _interpolate_data(stc, morph, mri_resolution, mri_space, output): vols = np.zeros((np.prod(shape[:3]), shape[3]), order='F') # flatten n_vertices_seen = 0 for this_inuse in inuse: - this_inuse = this_inuse.astype(np.bool) + this_inuse = this_inuse.astype(bool) n_vertices = np.sum(this_inuse) stc_slice = slice(n_vertices_seen, n_vertices_seen + n_vertices) vols[this_inuse] = stc.data[stc_slice] @@ -889,7 +883,7 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): mri_to = nib.Nifti1Image(mri_to_res, mri_to_res_affine) affine = mri_to.affine - mri_to = _get_img_fdata(mri_to) # to ndarray + mri_to = _get_img_fdata(mri_to).copy() # to ndarray mri_to /= mri_to.max() mri_from_affine = mri_from.affine # get mri_from to world transform mri_from = _get_img_fdata(mri_from) # to ndarray @@ -919,6 +913,13 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): rigid = affreg.optimize( mri_to, mri_from, transforms.RigidTransform3D(), None, affine, mri_from_affine, starting_affine=translation.affine) + dist = np.linalg.norm(rigid.affine[:3, 3]) + angle = np.rad2deg(_angle_between_quats( + np.zeros(3), rot_to_quat(rigid.affine[:3, :3]))) + + logger.info(f'Translation: {dist:5.1f} mm') + logger.info(f'Rotation: {angle:5.1f}°') + logger.info('') # affine transform (translation + rotation + scaling) logger.info('Optimizing full affine:') @@ -928,12 +929,33 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): affine, mri_from_affine, starting_affine=rigid.affine) # compute mapping - sdr = imwarp.SymmetricDiffeomorphicRegistration( - metrics.CCMetric(3), list(niter_sdr)) - logger.info('Optimizing SDR:') - with wrapped_stdout(indent=' '): - sdr_morph = sdr.optimize(mri_to, pre_affine.transform(mri_from)) - shape = tuple(sdr_morph.domain_shape) # should be tuple of int + mri_from_to = pre_affine.transform(mri_from) + shape = tuple(pre_affine.domain_shape) + if len(niter_sdr): + sdr = imwarp.SymmetricDiffeomorphicRegistration( + metrics.CCMetric(3), list(niter_sdr)) + logger.info('Optimizing SDR:') + with wrapped_stdout(indent=' '): + sdr_morph = sdr.optimize(mri_to, pre_affine.transform(mri_from)) + assert shape == tuple(sdr_morph.domain_shape) # should be tuple of int + mri_from_to = sdr_morph.transform(mri_from_to) + else: + sdr_morph = None + + mri_to, mri_from_to = mri_to.ravel(), mri_from_to.ravel() + mri_from_to /= np.linalg.norm(mri_from_to) + mri_to /= np.linalg.norm(mri_to) + r2 = 100 * (mri_to @ mri_from_to) + logger.info(f'Variance explained by morph: {r2:0.1f}%') + + # To debug to_vox_map, this can be used: + # from nibabel.processing import resample_from_to + # mri_from_to = sdr_morph.transform(pre_affine.transform(mri_from)) + # mri_from_to = nib.Nifti1Image(mri_from_to, affine) + # fig1 = mri_from_to.orthoview() + # mri_from_to_cut = resample_from_to(mri_from_to, to_vox_map, 1) + # fig2 = mri_from_to_cut.orthoview() + return shape, zooms, affine, pre_affine, sdr_morph @@ -1232,7 +1254,7 @@ def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None): def _sparse_argmax_nnz_row(csr_mat): """Return index of the maximum non-zero index in each row.""" n_rows = csr_mat.shape[0] - idx = np.empty(n_rows, dtype=np.int) + idx = np.empty(n_rows, dtype=np.int64) for k in range(n_rows): row = csr_mat[k].tocoo() idx[k] = row.col[np.argmax(row.data)] diff --git a/mne/preprocessing/_peak_finder.py b/mne/preprocessing/_peak_finder.py index 88a8d2abdd1..0fa02d959a1 100644 --- a/mne/preprocessing/_peak_finder.py +++ b/mne/preprocessing/_peak_finder.py @@ -102,7 +102,7 @@ def peak_finder(x0, thresh=None, extrema=1, verbose=None): # Preallocate max number of maxima maxPeaks = int(np.ceil(length / 2.0)) - peak_loc = np.zeros(maxPeaks, dtype=np.int) + peak_loc = np.zeros(maxPeaks, dtype=np.int64) peak_mag = np.zeros(maxPeaks) c_ind = 0 # Loop through extrema which should be peaks and then valleys diff --git a/mne/preprocessing/bads.py b/mne/preprocessing/bads.py index bc07ea3fa9b..901df7f9aa9 100644 --- a/mne/preprocessing/bads.py +++ b/mne/preprocessing/bads.py @@ -30,7 +30,7 @@ def _find_outliers(X, threshold=3.0, max_iter=2, tail=0): The outlier indices. """ from scipy.stats import zscore - my_mask = np.zeros(len(X), dtype=np.bool) + my_mask = np.zeros(len(X), dtype=bool) for _ in range(max_iter): X = np.ma.masked_array(X, my_mask) if tail == 0: diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py index 5de4bc79ef1..8742c42fc5f 100644 --- a/mne/preprocessing/ecg.py +++ b/mne/preprocessing/ecg.py @@ -94,7 +94,7 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3, if window[0] > thresh1: max_time = np.argmax(window) time.append(ii + max_time) - nx = np.sum(np.diff(((window > thresh1).astype(np.int) == + nx = np.sum(np.diff(((window > thresh1).astype(np.int64) == 1).astype(int))) numcross.append(nx) rms.append(np.sqrt(sum_squared(window) / window.size)) diff --git a/mne/preprocessing/xdawn.py b/mne/preprocessing/xdawn.py index 89850bddeaf..2e5578304fd 100644 --- a/mne/preprocessing/xdawn.py +++ b/mne/preprocessing/xdawn.py @@ -630,7 +630,7 @@ def _pick_sources(self, data, include, exclude, eid): sources = np.dot(self.filters_[eid], data) if include not in (None, list()): - mask = np.ones(len(sources), dtype=np.bool) + mask = np.ones(len(sources), dtype=bool) mask[np.unique(include)] = False sources[mask] = 0. logger.info('Zeroing out %i Xdawn components' % mask.sum()) diff --git a/mne/simulation/evoked.py b/mne/simulation/evoked.py index 598ea3a5047..8da4bff4db8 100644 --- a/mne/simulation/evoked.py +++ b/mne/simulation/evoked.py @@ -73,7 +73,7 @@ def simulate_evoked(fwd, stc, info, cov, nave=30, iir_filter=None, if nave < np.inf: noise = _simulate_noise_evoked(evoked, cov, iir_filter, random_state) evoked.data += noise.data / math.sqrt(nave) - evoked.nave = np.int(nave) + evoked.nave = np.int64(nave) if cov is not None and cov.get('projs', None): evoked.add_proj(cov['projs']).apply_proj() return evoked diff --git a/mne/simulation/tests/test_source.py b/mne/simulation/tests/test_source.py index d37c02802d1..30806982fb5 100644 --- a/mne/simulation/tests/test_source.py +++ b/mne/simulation/tests/test_source.py @@ -113,7 +113,7 @@ def test_simulate_sparse_stc(_get_fwd_labels): n_times = 10 tmin = 0 tstep = 1e-3 - times = np.arange(n_times, dtype=np.float) * tstep + tmin + times = np.arange(n_times, dtype=np.float64) * tstep + tmin pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels), times, labels=labels, location='center', subject='sample', @@ -221,7 +221,7 @@ def test_simulate_sparse_stc_single_hemi(_get_fwd_labels): n_times = 10 tmin = 0 tstep = 1e-3 - times = np.arange(n_times, dtype=np.float) * tstep + tmin + times = np.arange(n_times, dtype=np.float64) * tstep + tmin stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times, labels=labels_single_hemi, random_state=0) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index b852444891d..c7b0f55e0b8 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -17,7 +17,6 @@ from .cov import Covariance from .evoked import _get_peak from .filter import resample -from .fixes import einsum from .io.constants import FIFF from .surface import read_surface, _get_ico_surface, mesh_edges from .source_space import (_ensure_src, _get_morph_src_reordering, @@ -1786,6 +1785,8 @@ def magnitude(self): data_mag, self.vertices, self.tmin, self.tstep, self.subject, self.verbose) + @deprecated('stc.normal(src) is deprecated and will be removed in 0.22, ' + 'use stc.project("normal", src)[0] instead') @fill_doc def normal(self, src, use_cps=True): """Compute activity orthogonal to the cortex. @@ -1806,13 +1807,97 @@ def normal(self, src, use_cps=True): The source estimate only retaining the activity orthogonal to the cortex. """ - _check_src_normal('normal', src) + return self.project('normal', src, use_cps)[0] + + def _get_src_normals(self, src, use_cps): normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in - zip(src, self.vertices)]) - data_norm = einsum('ijk,ij->ik', self.data, normals) - return self._scalar_class( + zip(src, self.vertices)]) + return normals + + @fill_doc + def project(self, directions, src=None, use_cps=True): + """Project the data for each vertex in a given direction. + + Parameters + ---------- + directions : ndarray, shape (n_vertices, 3) | str + Can be: + + - ``'normal'`` + Project onto the source space normals. + - ``'pca'`` + SVD will be used to project onto the direction of maximal + power for each source. + - :class:`~numpy.ndarray`, shape (n_vertices, 3) + Projection directions for each source. + src : instance of SourceSpaces | None + The source spaces corresponding to the source estimate. + Not used when ``directions`` is an array, optional when + ``directions='pca'``. + %(use_cps)s + Should be the same value that was used when the forward model + was computed (typically True). + + Returns + ------- + stc : instance of SourceEstimate + The projected source estimate. + directions : ndarray, shape (n_vertices, 3) + The directions that were computed (or just used). + + Notes + ----- + When using SVD, there is a sign ambiguity for the direction of maximal + power. When ``src is None``, the direction is chosen that makes the + resulting time waveform sum positive (i.e., have positive amplitudes). + When ``src`` is provided, the directions are flipped in the direction + of the source normals, i.e., outward from cortex for surface source + spaces and in the +Z / superior direction for volume source spaces. + + .. versionadded:: 0.21 + """ + _validate_type(directions, (str, np.ndarray), 'directions') + _validate_type(src, (None, SourceSpaces), 'src') + if isinstance(directions, str): + _check_option('directions', directions, ('normal', 'pca'), + extra='when str') + + if directions == 'normal': + if src is None: + raise ValueError( + 'If directions="normal", src cannot be None') + _check_src_normal('normal', src) + directions = self._get_src_normals(src, use_cps) + else: + assert directions == 'pca' + x = self.data + if not np.isrealobj(self.data): + _check_option('stc.data.dtype', self.data.dtype, + (np.complex64, np.complex128)) + dtype = \ + np.float32 if x.dtype == np.complex64 else np.float64 + x = x.view(dtype) + assert x.shape[-1] == 2 * self.data.shape[-1] + u, _, v = np.linalg.svd(x, full_matrices=False) + directions = u[:, :, 0] + # The sign is arbitrary, so let's flip it in the direction that + # makes the resulting time series the most positive: + if src is None: + signs = np.sum(v[:, 0].real, axis=1, keepdims=True) + else: + normals = self._get_src_normals(src, use_cps) + signs = np.sum(directions * normals, axis=1, keepdims=True) + assert signs.shape == (self.data.shape[0], 1) + signs = np.sign(signs) + signs[signs == 0] = 1. + directions *= signs + _check_option( + 'directions.shape', directions.shape, [(self.data.shape[0], 3)]) + data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0] + stc = self._scalar_class( data_norm, self.vertices, self.tmin, self.tstep, self.subject, self.verbose) + return stc, directions class _BaseVolSourceEstimate(_BaseSourceEstimate): @@ -1822,7 +1907,7 @@ class _BaseVolSourceEstimate(_BaseSourceEstimate): @copy_function_doc_to_method_doc(plot_volume_source_estimates) def plot(self, src, subject=None, subjects_dir=None, mode='stat_map', - bg_img=None, colorbar=True, colormap='auto', clim='auto', + bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto', transparent='auto', show=True, initial_time=None, initial_pos=None, verbose=None): data = self.magnitude() if self._data_ndim == 3 else self @@ -2725,7 +2810,7 @@ def _get_connectivity_from_edges(edges, n_times, verbose=None): n_vertices = edges.shape[0] logger.info("-- number of connected vertices : %d" % n_vertices) nnz = edges.col.size - aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int) + aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz)) col = (edges.col[None, :] + aux).ravel() row = (edges.row[None, :] + aux).ravel() if n_times > 1: # add temporal edges @@ -2736,7 +2821,7 @@ def _get_connectivity_from_edges(edges, n_times, verbose=None): row = np.concatenate((row, o, d)) col = np.concatenate((col, d, o)) data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1), - dtype=np.int) + dtype=np.int64) connectivity = coo_matrix((data, (row, col)), shape=(n_times * n_vertices,) * 2) return connectivity diff --git a/mne/source_space.py b/mne/source_space.py index 8bc3e611d4e..b6922eb61da 100644 --- a/mne/source_space.py +++ b/mne/source_space.py @@ -803,7 +803,7 @@ def _read_one_source_space(fid, this): if tag is None: raise ValueError('Vertex data not found') - res['rr'] = tag.data.astype(np.float) # double precision for mayavi + res['rr'] = tag.data.astype(np.float64) # double precision for mayavi if res['rr'].shape[0] != res['np']: raise ValueError('Vertex information is incorrect') @@ -835,7 +835,7 @@ def _read_one_source_space(fid, this): tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE) if tag is None: res['nuse'] = 0 - res['inuse'] = np.zeros(res['nuse'], dtype=np.int) + res['inuse'] = np.zeros(res['nuse'], dtype=np.int64) res['vertno'] = None else: res['nuse'] = int(tag.data) @@ -843,7 +843,7 @@ def _read_one_source_space(fid, this): if tag is None: raise ValueError('Source selection information missing') - res['inuse'] = tag.data.astype(np.int).T + res['inuse'] = tag.data.astype(np.int64).T if len(res['inuse']) != res['np']: raise ValueError('Incorrect number of entries in source space ' 'selection') diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index f6286cfa7f2..727e9460229 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -119,7 +119,7 @@ def _get_clusters_spatial(s, neighbors): # s is a vector of spatial indices that are significant, like: # s = np.where(x_in)[0] # for x_in representing a single time-instant - r = np.ones(s.shape, np.bool) + r = np.ones(s.shape, bool) clusters = list() next_ind = 0 if s.size > 0 else -1 while next_ind >= 0: diff --git a/mne/surface.py b/mne/surface.py index 4328332b5d7..8a459736934 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -617,7 +617,7 @@ def _fread3(fobj): def _fread3_many(fobj, n): """Read 3-byte ints from an open binary file object.""" b1, b2, b3 = np.fromfile(fobj, ">u1", - 3 * n).reshape(-1, 3).astype(np.int).T + 3 * n).reshape(-1, 3).astype(np.int64).T return (b1 << 16) + (b2 << 8) + b3 @@ -647,7 +647,7 @@ def read_curvature(filepath, binary=True): _fread3(fobj) curv = np.fromfile(fobj, ">i2", vnum) / 100 if binary: - return 1 - np.array(curv != 0, np.int) + return 1 - np.array(curv != 0, np.int64) else: return curv diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 19c30b93e90..f1e5163907e 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1174,8 +1174,9 @@ def test_evoked_io_from_epochs(tmpdir): picks=picks, decim=5) evoked = epochs.average() evoked.info['proj_name'] = '' # Test that empty string shortcuts to None. - evoked.save(op.join(tempdir, 'evoked-ave.fif')) - evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0] + fname_temp = op.join(tempdir, 'evoked-ave.fif') + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] assert_equal(evoked2.info['proj_name'], None) assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, @@ -1185,8 +1186,8 @@ def test_evoked_io_from_epochs(tmpdir): epochs = Epochs(raw, events[:4], event_id, 0.1, tmax, picks=picks, baseline=(0.1, 0.2), decim=5) evoked = epochs.average() - evoked.save(op.join(tempdir, 'evoked-ave.fif')) - evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0] + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) @@ -1198,6 +1199,25 @@ def test_evoked_io_from_epochs(tmpdir): assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) + # should work when one channel type is changed to a non-data ch + picks = pick_types(raw.info, meg=True, eeg=True) + epochs = Epochs(raw, events[:4], event_id, -0.2, tmax, + picks=picks, baseline=(0.1, 0.2), decim=5) + with pytest.warns(RuntimeWarning, match='unit for.*changed from'): + epochs.set_channel_types({epochs.ch_names[0]: 'syst'}) + evokeds = list() + for picks in (None, 'all'): + evoked = epochs.average(picks) + evokeds.append(evoked) + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] + start = 1 if picks is None else 0 + for ev in (evoked, evoked2): + assert ev.ch_names == epochs.ch_names[start:] + assert_allclose(ev.data, epochs.get_data().mean(0)[start:]) + with pytest.raises(ValueError, match='.*nchan.* must match'): + write_evokeds(fname_temp, evokeds) + def test_evoked_standard_error(tmpdir): """Test calculation and read/write of standard error.""" diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index 0497d2f781b..6ba0785746f 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -23,7 +23,7 @@ from mne.minimum_norm import (apply_inverse, read_inverse_operator, make_inverse_operator) from mne.source_space import (get_volume_labels_from_aseg, _get_mri_info_data, - _get_atlas_values) + _get_atlas_values, _add_interpolator) from mne.utils import (run_tests_if_main, requires_nibabel, check_version, requires_dipy, requires_h5py) from mne.fixes import _get_args @@ -39,16 +39,19 @@ 'sample_audvis_trunc-meg-vol-7-meg-inv.fif') fname_fwd_vol = op.join(sample_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif') -fname_vol = op.join(sample_dir, - 'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w') +fname_vol_w = op.join(sample_dir, + 'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w') fname_inv_surf = op.join(sample_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') fname_fmorph = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis_trunc-meg') fname_smorph = op.join(sample_dir, 'sample_audvis_trunc-meg') fname_t1 = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') +fname_vol = op.join(subjects_dir, 'sample', 'bem', 'sample-volume-7mm-src.fif') fname_brain = op.join(subjects_dir, 'sample', 'mri', 'brain.mgz') fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') +fname_fs_vol = op.join(subjects_dir, 'fsaverage', 'bem', + 'fsaverage-vol7-nointerp-src.fif.gz') fname_aseg_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz') fname_stc = op.join(sample_dir, 'fsaverage_audvis_trunc-meg') @@ -196,6 +199,16 @@ def test_surface_source_morph_round_trip(smooth, lower, upper, n_warn): stc_back = morph_back.apply(stc_fs) corr = np.corrcoef(stc.data.ravel(), stc_back.data.ravel())[0, 1] assert lower <= corr <= upper + # check the round-trip power + assert_power_preserved(stc, stc_back) + + +def assert_power_preserved(orig, new, limits=(1., 1.05)): + """Assert that the power is preserved during a round-trip morph.""" + __tracebackhide__ = True + power_ratio = np.linalg.norm(orig.data) / np.linalg.norm(new.data) + min_, max_ = limits + assert min_ < power_ratio < max_, 'Power ratio' @requires_h5py @@ -245,7 +258,7 @@ def test_surface_vector_source_morph(tmpdir): assert isinstance(source_morph_surf.apply(stc_surf), SourceEstimate) # degenerate - stc_vol = read_source_estimate(fname_vol, 'sample') + stc_vol = read_source_estimate(fname_vol_w, 'sample') with pytest.raises(TypeError, match='stc_from must be an instance'): source_morph_surf.apply(stc_vol) @@ -259,7 +272,7 @@ def test_volume_source_morph(tmpdir): """Test volume source estimate morph, special cases and exceptions.""" import nibabel as nib inverse_operator_vol = read_inverse_operator(fname_inv_vol) - stc_vol = read_source_estimate(fname_vol, 'sample') + stc_vol = read_source_estimate(fname_vol_w, 'sample') # check for invalid input type with pytest.raises(TypeError, match='src must be'): @@ -284,7 +297,7 @@ def test_volume_source_morph(tmpdir): with pytest.raises(ValueError, match='Only surface.*sparse morph'): compute_source_morph(src=src, sparse=True, subjects_dir=subjects_dir) - # terrible quality buts fast + # terrible quality but fast zooms = 20 kwargs = dict(zooms=zooms, niter_sdr=(1,), niter_affine=(1,)) source_morph_vol = compute_source_morph( @@ -433,6 +446,87 @@ def test_volume_source_morph(tmpdir): source_morph_vol.apply(stc_vol_bad) +@requires_h5py +@requires_nibabel() +@requires_dipy() +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize('subject_from, subject_to, lower, upper', [ + ('sample', 'fsaverage', 8.5, 9), + ('fsaverage', 'fsaverage', 7, 7.5), + ('sample', 'sample', 6, 7), +]) +def test_volume_source_morph_round_trip( + tmpdir, subject_from, subject_to, lower, upper): + """Test volume source estimate morph round-trips well.""" + import nibabel as nib + from nibabel.processing import resample_from_to + src = dict() + if 'sample' in (subject_from, subject_to): + src['sample'] = mne.read_source_spaces(fname_vol) + src['sample'][0]['subject_his_id'] = 'sample' + assert src['sample'][0]['nuse'] == 4157 + if 'fsaverage' in (subject_from, subject_to): + # Created to save space with: + # + # bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage', + # 'fsaverage-inner_skull-bem.fif') + # src_fsaverage = mne.setup_volume_source_space( + # 'fsaverage', pos=7., bem=bem, mindist=0, + # subjects_dir=subjects_dir, add_interpolator=False) + # mne.write_source_spaces(fname_fs_vol, src_fsaverage, overwrite=True) + # + # For speed we do it without the interpolator because it's huge. + src['fsaverage'] = mne.read_source_spaces(fname_fs_vol) + src['fsaverage'][0].update( + vol_dims=np.array([23, 29, 25]), seg_name='brain') + _add_interpolator(src['fsaverage'], True) + assert src['fsaverage'][0]['nuse'] == 6379 + src_to, src_from = src[subject_to], src[subject_from] + del src + # No SDR just for speed once everything works + kwargs = dict(niter_sdr=(), niter_affine=(1,), + subjects_dir=subjects_dir, verbose=True) + morph_from_to = compute_source_morph( + src=src_from, src_to=src_to, subject_to=subject_to, **kwargs) + morph_to_from = compute_source_morph( + src=src_to, src_to=src_from, subject_to=subject_from, **kwargs) + use = np.linspace(0, src_from[0]['nuse'] - 1, 10).round().astype(int) + stc_from = VolSourceEstimate( + np.eye(src_from[0]['nuse'])[:, use], [src_from[0]['vertno']], 0, 1) + stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from)) + maxs = np.argmax(stc_from_rt.data, axis=0) + src_rr = src_from[0]['rr'][src_from[0]['vertno']] + dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1) + mu = np.mean(dists) + assert lower <= mu < upper # fsaverage=7.97; 25.4 without src_ras_t fix + # check that pre_affine is close to identity when subject_to==subject_from + if subject_to == subject_from: + for morph in (morph_to_from, morph_from_to): + assert_allclose( + morph.pre_affine.affine, np.eye(4), atol=1e-2) + # check that power is more or less preserved + ratio = stc_from.data.size / stc_from_rt.data.size + limits = ratio * np.array([1, 1.2]) + stc_from.crop(0, 0)._data.fill(1.) + stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from)) + assert_power_preserved(stc_from, stc_from_rt, limits=limits) + # before and after morph, check the proportion of vertices + # that are inside and outside the brainmask.mgz + brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz')) + mask = _get_img_fdata(brain) > 0 + if subject_from == subject_to == 'sample': + for stc in [stc_from, stc_from_rt]: + img = stc.as_volume(src_from, mri_resolution=True) + img = nib.Nifti1Image(_get_img_fdata(img)[:, :, :, 0], img.affine) + img = _get_img_fdata(resample_from_to(img, brain, order=1)) + assert img.shape == mask.shape + in_ = img[mask].astype(bool).mean() + out = img[~mask].astype(bool).mean() + assert 0.97 < in_ < 0.98 + assert out < 0.02 + + @pytest.mark.slowtest @testing.requires_testing_data def test_morph_stc_dense(): diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py index 837c0938092..3b5790aea9d 100644 --- a/mne/tests/test_proj.py +++ b/mne/tests/test_proj.py @@ -175,7 +175,7 @@ def test_compute_proj_epochs(): p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') - mask = np.ones(p1_data.size, dtype=np.bool) + mask = np.ones(p1_data.size, dtype=bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index d78c759e9c9..0ff1ece8060 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -11,6 +11,7 @@ assert_allclose, assert_equal) import pytest from scipy import sparse +from scipy.optimize import fmin_cobyla from mne import (stats, SourceEstimate, VectorSourceEstimate, VolSourceEstimate, Label, read_source_spaces, @@ -988,7 +989,7 @@ def test_spatio_temporal_src_connectivity(): def test_to_data_frame(): """Test stc Pandas exporter.""" n_vert, n_times = 10, 5 - vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)] + vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') @@ -1010,7 +1011,7 @@ def test_to_data_frame(): def test_to_data_frame_index(index): """Test index creation in stc Pandas exporter.""" n_vert, n_times = 10, 5 - vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)] + vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') @@ -1119,23 +1120,32 @@ def test_mixed_stc(tmpdir): (VolVectorSourceEstimate, 'discrete'), (MixedVectorSourceEstimate, 'mixed'), ]) -def test_vec_stc_basic(tmpdir, klass, kind): +@pytest.mark.parametrize('dtype', [ + np.float32, np.float64, np.complex64, np.complex128]) +def test_vec_stc_basic(tmpdir, klass, kind, dtype): """Test (vol)vector source estimate.""" nn = np.array([ [1, 0, 0], [0, 1, 0], - [0, 0, 1], + [np.sqrt(1. / 2.), 0, np.sqrt(1. / 2.)], [np.sqrt(1 / 3.)] * 3 - ]) + ], np.float32) data = np.array([ [1, 0, 0], [0, 2, 0], - [3, 0, 0], + [-3, 0, 0], [1, 1, 1], - ])[:, :, np.newaxis] - magnitudes = np.linalg.norm(data, axis=1)[:, 0] - normals = np.array([1, 2, 0, np.sqrt(3)]) + ], dtype)[:, :, np.newaxis] + amplitudes = np.array([1, 2, 3, np.sqrt(3)], dtype) + magnitudes = amplitudes.copy() + normals = np.array([1, 2, -3. / np.sqrt(2), np.sqrt(3)], dtype) + if dtype in (np.complex64, np.complex128): + data *= 1j + amplitudes *= 1j + normals *= 1j + directions = np.array( + [[1, 0, 0], [0, 1, 0], [-1, 0, 0], [1. / np.sqrt(3)] * 3]) vol_kind = kind if kind in ('discrete', 'vol') else 'vol' vol_src = SourceSpaces([dict(nn=nn, type=vol_kind)]) assert vol_src.kind == dict(vol='volume').get(vol_kind, vol_kind) @@ -1155,20 +1165,35 @@ def test_vec_stc_basic(tmpdir, klass, kind): verts = surf_verts + vol_verts assert src.kind == 'mixed' data = np.tile(data, (2, 1, 1)) + amplitudes = np.tile(amplitudes, 2) magnitudes = np.tile(magnitudes, 2) normals = np.tile(normals, 2) + directions = np.tile(directions, (2, 1)) stc = klass(data, verts, 0, 1, 'foo') + amplitudes = amplitudes[:, np.newaxis] + magnitudes = magnitudes[:, np.newaxis] # Magnitude of the vectors - assert_array_equal(stc.magnitude().data[:, 0], magnitudes) + assert_array_equal(stc.magnitude().data, magnitudes) # Vector components projected onto the vertex normals if kind in ('vol', 'mixed'): with pytest.raises(RuntimeError, match='surface or discrete'): - stc.normal(src) + stc.project('normal', src)[0] else: - normal = stc.normal(src) - assert_array_equal(normal.data[:, 0], normals) + normal = stc.project('normal', src)[0] + assert_allclose(normal.data[:, 0], normals) + + # Maximal-variance component, either to keep amps pos or to align to src-nn + projected, got_directions = stc.project('pca') + assert_allclose(got_directions, directions) + assert_allclose(projected.data, amplitudes) + projected, got_directions = stc.project('pca', src) + flips = np.array([[1], [1], [-1.], [1]]) + if klass is MixedVectorSourceEstimate: + flips = np.tile(flips, (2, 1)) + assert_allclose(got_directions, directions * flips) + assert_allclose(projected.data, amplitudes * flips) out_name = tmpdir.join('temp.h5') stc.save(out_name) @@ -1188,6 +1213,37 @@ def test_vec_stc_basic(tmpdir, klass, kind): klass(data, verts, 0, 1) +@pytest.mark.parametrize('real', (True, False)) +def test_source_estime_project(real): + """Test projecting a source estimate onto direction of max power.""" + n_src, n_times = 4, 100 + rng = np.random.RandomState(0) + data = rng.randn(n_src, 3, n_times) + if not real: + data = data + 1j * rng.randn(n_src, 3, n_times) + assert data.dtype == np.complex128 + else: + assert data.dtype == np.float64 + + # Make sure that the normal we get maximizes the power + # (i.e., minimizes the negative power) + want_nn = np.empty((n_src, 3)) + for ii in range(n_src): + x0 = np.ones(3) + + def objective(x): + x = x / np.linalg.norm(x) + return -np.linalg.norm(np.dot(x, data[ii])) + want_nn[ii] = fmin_cobyla(objective, x0, (), rhobeg=0.1, rhoend=1e-6) + want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True) + + stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1) + stc_max, directions = stc.project('pca') + flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True)) + directions *= flips + assert_allclose(directions, want_nn, atol=1e-6) + + @pytest.fixture(scope='module', params=[testing._pytest_param()]) def invs(): """Inverses of various amounts of loose.""" @@ -1251,7 +1307,8 @@ def test_vec_stc_inv_fixed(invs, pick_ori): evoked, _, _, _, fixed, fixedish = invs stc_fixed = apply_inverse(evoked, fixed) stc_fixed_vector = apply_inverse(evoked, fixed, pick_ori='vector') - assert_allclose(stc_fixed.data, stc_fixed_vector.normal(fixed['src']).data) + assert_allclose(stc_fixed.data, + stc_fixed_vector.project('normal', fixed['src'])[0].data) stc_fixedish = apply_inverse(evoked, fixedish, pick_ori=pick_ori) if pick_ori == 'vector': assert_allclose(stc_fixed_vector.data, stc_fixedish.data, atol=1e-2) @@ -1259,7 +1316,7 @@ def test_vec_stc_inv_fixed(invs, pick_ori): assert_allclose( abs(stc_fixed).data, stc_fixedish.magnitude().data, atol=1e-2) # ... and when picking the normal (signed) - stc_fixedish = stc_fixedish.normal(fixedish['src']) + stc_fixedish = stc_fixedish.project('normal', fixedish['src'])[0] elif pick_ori is None: stc_fixed = abs(stc_fixed) else: diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py index c62f170d7b2..fa25c98b033 100644 --- a/mne/tests/test_surface.py +++ b/mne/tests/test_surface.py @@ -90,7 +90,7 @@ def test_compute_nearest(): """Test nearest neighbor searches.""" x = rng.randn(500, 3) x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None] - nn_true = rng.permutation(np.arange(500, dtype=np.int))[:20] + nn_true = rng.permutation(np.arange(500, dtype=np.int64))[:20] y = x[nn_true] nn1 = _compute_nearest(x, y, method='BallTree') diff --git a/mne/time_frequency/_stft.py b/mne/time_frequency/_stft.py index 2b7f540a578..a414afae6a3 100644 --- a/mne/time_frequency/_stft.py +++ b/mne/time_frequency/_stft.py @@ -65,7 +65,7 @@ def stft(x, wsize, tstep=None, verbose=None): logger.info("Number of frequencies: %d" % n_freq) logger.info("Number of time steps: %d" % n_step) - X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex) + X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex128) if n_signals == 0: return X @@ -143,7 +143,7 @@ def istft(X, tstep=None, Tx=None): T = n_step * tstep - x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float) + x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float64) if n_signals == 0: return x[:, :Tx] @@ -153,7 +153,7 @@ def istft(X, tstep=None, Tx=None): # win = win / norm(win); # Pre-processing for edges - swin = np.zeros(T + wsize - tstep, dtype=np.float) + swin = np.zeros(T + wsize - tstep, dtype=np.float64) for t in range(n_step): swin[t * tstep:t * tstep + wsize] += win ** 2 swin = np.sqrt(swin / wsize) diff --git a/mne/time_frequency/_stockwell.py b/mne/time_frequency/_stockwell.py index 3d1c7b852d6..6777c1ded22 100644 --- a/mne/time_frequency/_stockwell.py +++ b/mne/time_frequency/_stockwell.py @@ -47,7 +47,7 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): k = width # 1 for classical stowckwell transform f_range = np.arange(start_f, stop_f, 1) - windows = np.empty((len(f_range), len(tw)), dtype=np.complex) + windows = np.empty((len(f_range), len(tw)), dtype=np.complex128) for i_f, f in enumerate(f_range): if f == 0.: window = np.ones(len(tw)) @@ -62,7 +62,7 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): def _st(x, start_f, windows): """Compute ST based on Ali Moukadem MATLAB code (used in tests).""" n_samp = x.shape[-1] - ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex) + ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128) # do the work Fx = fftpack.fft(x) XF = np.concatenate([Fx, Fx], axis=-1) diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index 55974812bc3..43656462863 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -1132,7 +1132,7 @@ def _execute_csd_function(X, times, frequencies, csd_function, params, n_fft, n_freqs = len(frequencies) csds_mean = np.zeros((n_channels * (n_channels + 1) // 2, n_freqs), - dtype=np.complex) + dtype=np.complex128) # Prepare the function that does the actual CSD computation for parallel # execution. diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py index 080620d867c..0784ace90b8 100644 --- a/mne/time_frequency/tests/test_stft.py +++ b/mne/time_frequency/tests/test_stft.py @@ -12,7 +12,7 @@ def test_stft(): f = 7. # Hz for T in [127, 128]: # try with even and odd numbers # Test with low frequency signal - t = np.arange(T).astype(np.float) + t = np.arange(T).astype(np.float64) x = np.sin(2 * np.pi * f * t / sfreq) x = np.array([x, x + 1.]) wsize = 128 diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py index 62f4e2932a0..747d35035bf 100644 --- a/mne/time_frequency/tests/test_tfr.py +++ b/mne/time_frequency/tests/test_tfr.py @@ -285,7 +285,7 @@ def test_tfr_multitaper(): seed = 42 rng = np.random.RandomState(seed) noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times) - t = np.arange(n_times, dtype=np.float) / sfreq + t = np.arange(n_times, dtype=np.float64) / sfreq signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) @@ -302,7 +302,7 @@ def test_tfr_multitaper(): epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id, reject=reject) - freqs = np.arange(35, 70, 5, dtype=np.float) + freqs = np.arange(35, 70, 5, dtype=np.float64) power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0) @@ -655,9 +655,9 @@ def test_compute_tfr(): # Check types if output in ('complex', 'avg_power_itc'): - assert_equal(np.complex, out.dtype) + assert_equal(np.complex128, out.dtype) else: - assert_equal(np.float, out.dtype) + assert_equal(np.float64, out.dtype) assert (np.all(np.isfinite(out))) # Check errors params diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 1cbac6de50e..388b960653a 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -372,7 +372,7 @@ def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet', elif output in ('complex', 'avg_power_itc'): # avg_power_itc is stored as power + 1i * itc to keep a # simple dimensionality - dtype = np.complex + dtype = np.complex128 if ('avg_' in output) or ('itc' in output): out = np.empty((n_chans, n_freqs, n_times), dtype) @@ -497,7 +497,7 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Set output type dtype = np.float if output in ['complex', 'avg_power_itc']: - dtype = np.complex + dtype = np.complex128 # Init outputs decim = _check_decim(decim) @@ -514,7 +514,7 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Inter-trial phase locking is apparently computed per taper... if 'itc' in output: - plf = np.zeros((n_freqs, n_times), dtype=np.complex) + plf = np.zeros((n_freqs, n_times), dtype=np.complex128) # Loop across epochs for epoch_idx, tfr in enumerate(coefs): @@ -591,7 +591,7 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1): coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft) - tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex) + tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128) for k, tfr in enumerate(coefs): tfrs[k] = tfr diff --git a/mne/utils/check.py b/mne/utils/check.py index 723785d8f62..773ca3d6c73 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -557,11 +557,11 @@ def _check_option(parameter, value, allowed_values, extra=''): '{options}, but got {value!r} instead.') allowed_values = list(allowed_values) # e.g., if a dict was given if len(allowed_values) == 1: - options = 'The only allowed value is %r' % allowed_values[0] + options = f'The only allowed value is {repr(allowed_values[0])}' else: options = 'Allowed values are ' - options += ', '.join(['%r' % v for v in allowed_values[:-1]]) - options += ' and %r' % allowed_values[-1] + options += ', '.join([f'{repr(v)}' for v in allowed_values[:-1]]) + options += f' and {repr(allowed_values[-1])}' raise ValueError(msg.format(parameter=parameter, options=options, value=value, extra=extra)) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 03b172abff8..299ef72dcf9 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -42,6 +42,7 @@ _ensure_int, _validate_type, _check_option) from .utils import (mne_analyze_colormap, _get_color_list, plt_show, tight_layout, figure_nobar, _check_time_unit) +from .misc import _check_mri from ..bem import (ConductorModel, _bem_find_surface, _surf_dict, _surf_name, read_bem_surfaces) @@ -1463,7 +1464,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', curv = nib.freesurfer.read_morph_data( op.join(subjects_dir, subject, 'surf', '%s.curv' % hemi))[inuse] - curv = np.clip(np.array(curv > 0, np.int), 0.33, 0.66) + curv = np.clip(np.array(curv > 0, np.int64), 0.33, 0.66) params = dict(ax=ax, stc=stc, coords=coords, faces=faces, hemi_idx=hemi_idx, vertices=vertices, e=e, smoothing_steps=smoothing_steps, n_verts=n_verts, @@ -1811,10 +1812,20 @@ def _ijk_to_cut_coords(ijk, img): return apply_trans(img.affine, ijk) +def _load_subject_mri(mri, stc, subject, subjects_dir, name): + import nibabel as nib + from nibabel.spatialimages import SpatialImage + _validate_type(mri, ('path-like', SpatialImage), name) + if isinstance(mri, str): + subject = _check_subject(stc.subject, subject, True) + mri = nib.load(_check_mri(mri, subject, subjects_dir)) + return mri + + @verbose def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None, - mode='stat_map', bg_img=None, colorbar=True, - colormap='auto', clim='auto', + mode='stat_map', bg_img='T1.mgz', + colorbar=True, colormap='auto', clim='auto', transparent=None, show=True, initial_time=None, initial_pos=None, verbose=None): @@ -1839,9 +1850,10 @@ def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None, The plotting mode to use. Either 'stat_map' (default) or 'glass_brain'. For "glass_brain", activation absolute values are displayed after being transformed to a standard MNI brain. - bg_img : instance of SpatialImage | None + bg_img : instance of SpatialImage | str The background image used in the nilearn plotting function. - If None, it is the T1.mgz file that is found in the subjects_dir. + Can also be a string to use the ``bg_img`` file in the subject's + MRI directory (default is ``'T1.mgz'``). Not used in "glass brain" plotting. colorbar : bool, optional If True, display a colorbar on the right of the plots. @@ -2071,11 +2083,9 @@ def _onclick(event, params, verbose=None): bg_img = None # not used else: # stat_map if bg_img is None: - subject = _check_subject(stc.subject, subject, True) - subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, - raise_error=True) - t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') - bg_img = nib.load(t1_fname) + bg_img = 'T1.mgz' + bg_img = _load_subject_mri( + bg_img, stc, subject, subjects_dir, 'bg_img') if initial_time is None: time_sl = slice(0, None) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 275abea105b..d967d45320f 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -868,7 +868,7 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255)) cmap[:, :3] = rgb.astype(cmap.dtype) - ctable = cmap.astype(np.float) / 255. + ctable = cmap.astype(np.float64) / 255. mesh_data = self._renderer.mesh( x=self.geo[hemi].coords[:, 0], @@ -1447,7 +1447,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): edges = mesh_edges(self.geo[hemi].faces) edges = edges.tocoo() border_edges = label[edges.row] != label[edges.col] - show = np.zeros(n_vertices, dtype=np.int) + show = np.zeros(n_vertices, dtype=np.int64) keep_idx = np.unique(edges.row[border_edges]) if isinstance(borders, int): for _ in range(borders): diff --git a/mne/viz/_brain/colormap.py b/mne/viz/_brain/colormap.py index a9ef3302fca..23f1fbdb5ab 100644 --- a/mne/viz/_brain/colormap.py +++ b/mne/viz/_brain/colormap.py @@ -12,7 +12,7 @@ def create_lut(cmap, n_colors=256, center=None): """Return a colormap suitable for setting as a LUT.""" from matplotlib import cm cmap = cm.get_cmap(cmap) - lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int) + lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64) return lut @@ -169,5 +169,5 @@ def calculate_lut(lut_table, alpha, fmin, fmid, fmax, center=None, lut_table[:, chan]) lut_table = lut - lut_table = lut_table.astype(np.float) / 255.0 + lut_table = lut_table.astype(np.float64) / 255.0 return lut_table diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py index 30f65b2c3b3..de5b9cfde03 100644 --- a/mne/viz/_brain/surface.py +++ b/mne/viz/_brain/surface.py @@ -151,7 +151,7 @@ def load_curvature(self): """Load in curvature values from the ?h.curv file.""" curv_path = path.join(self.data_path, 'surf', '%s.curv' % self.hemi) self.curv = read_curvature(curv_path, binary=False) - self.bin_curv = np.array(self.curv > 0, np.int) + self.bin_curv = np.array(self.curv > 0, np.int64) # morphometry (curvature) normalization in order to get gray cortex # TODO: delete self.grey_curv after cortex parameter # will be fully supported diff --git a/mne/viz/backends/_notebook.py b/mne/viz/backends/_notebook.py index 9af1f21fb89..6e5c4b1d0a1 100644 --- a/mne/viz/backends/_notebook.py +++ b/mne/viz/backends/_notebook.py @@ -3,7 +3,8 @@ # License: Simplified BSD import matplotlib.pyplot as plt -from contextlib import contextmanager, nullcontext +from contextlib import contextmanager +from ...fixes import nullcontext from ._pyvista import _Renderer as _PyVistaRenderer diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index e7adc34c9ae..af1ae21b75c 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -124,11 +124,11 @@ def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, l_m = surface.module_manager.scalar_lut_manager if colormap.dtype == np.uint8: l_m.lut.table = colormap - elif colormap.dtype == np.float: + elif colormap.dtype == np.float64: l_m.load_lut_from_list(colormap) else: raise TypeError('Expected type for colormap values are' - ' np.float or np.uint8: ' + ' np.float64 or np.uint8: ' '{} was given'.format(colormap.dtype)) surface.actor.property.shading = shading surface.actor.property.backface_culling = backface_culling diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index 18ad3eb2409..b3762133a5d 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -221,7 +221,7 @@ def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, rgba = True if isinstance(colormap, np.ndarray): if colormap.dtype == np.uint8: - colormap = colormap.astype(np.float) / 255. + colormap = colormap.astype(np.float64) / 255. from matplotlib.colors import ListedColormap colormap = ListedColormap(colormap) diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index b3dabfc0516..fba8a0f9a46 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -36,15 +36,16 @@ def _check_color(color): np_color = np.array(color) if np_color.size % 3 != 0 and np_color.size % 4 != 0: raise ValueError("The expected valid format is RGB or RGBA.") - if np_color.dtype == np.int: + if np_color.dtype in (np.int64, np.int32): if (np_color < 0).any() or (np_color > 255).any(): raise ValueError("Values out of range [0, 255].") - elif np_color.dtype == np.float: + elif np_color.dtype == np.float64: if (np_color < 0.0).any() or (np_color > 1.0).any(): raise ValueError("Values out of range [0.0, 1.0].") else: - raise TypeError("Expected data type is `np.int` or `np.float` but " - "{} was given.".format(np_color.dtype)) + raise TypeError("Expected data type is `np.int64`, `np.int32`, or " + "`np.float64` but {} was given." + .format(np_color.dtype)) else: raise TypeError("Expected type is `str` or iterable but " "{} was given.".format(type(color))) diff --git a/mne/viz/circle.py b/mne/viz/circle.py index 840a2f3cf1b..ff00d92bc61 100644 --- a/mne/viz/circle.py +++ b/mne/viz/circle.py @@ -50,7 +50,7 @@ def circular_layout(node_names, node_order, start_pos=90, start_between=True, raise ValueError('node_order has to be the same length as node_names') if group_boundaries is not None: - boundaries = np.array(group_boundaries, dtype=np.int) + boundaries = np.array(group_boundaries, dtype=np.int64) if np.any(boundaries >= n_nodes) or np.any(boundaries < 0): raise ValueError('"group_boundaries" has to be between 0 and ' 'n_nodes - 1.') @@ -78,7 +78,7 @@ def circular_layout(node_names, node_order, start_pos=90, start_between=True, start_pos += group_sep / 2 boundaries = boundaries[1:] if n_group_sep > 1 else None - node_angles = np.ones(n_nodes, dtype=np.float) * node_sep + node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep node_angles[0] = start_pos if boundaries is not None: node_angles[boundaries] += group_sep @@ -325,7 +325,7 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None, # edges: We modulate the noise with the number of connections of the # node and the connection strength, such that the strongest connections # are closer to the node center - nodes_n_con = np.zeros((n_nodes), dtype=np.int) + nodes_n_con = np.zeros((n_nodes), dtype=np.int64) for i, j in zip(indices[0], indices[1]): nodes_n_con[i] += 1 nodes_n_con[j] += 1 diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 40bd6216cdf..fad1b99946d 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -290,7 +290,7 @@ def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline, picks = np.array([pick for pick in picks if pick not in exclude]) - types = np.array(_get_channel_types(info, picks), np.unicode) + types = np.array(_get_channel_types(info, picks), str) ch_types_used = list() for this_type in _VALID_CHANNEL_TYPES: if this_type in types: @@ -2317,7 +2317,7 @@ def click_func( invert_y, vlines, tmin, tmax, units, skip_axlabel) # add inset scalp plot showing location of sensors picked if show_sensors: - _validate_type(show_sensors, (np.int, bool, str, type(None)), + _validate_type(show_sensors, (np.int64, bool, str, type(None)), 'show_sensors', 'numeric, str, None or bool') if not _check_ch_locs(np.array(one_evoked.info['chs'])[pos_picks]): warn('Cannot find channel coordinates in the supplied Evokeds. ' diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index e1a4977c960..a30aeb2111e 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -136,7 +136,7 @@ def test_plot_sparse_source_estimates(renderer_interactive): stc_data = np.zeros((len(inds), n_time)) stc_data[0, 1] = 1. stc_data[1, 4] = 2. - vertices = [vertices[inds], np.empty(0, dtype=np.int)] + vertices = [vertices[inds], np.empty(0, dtype=np.int64)] stc = SourceEstimate(stc_data, vertices, 1, 1) surf = plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1), opacity=0.5, high_resolution=False) @@ -605,13 +605,14 @@ def test_snapshot_brain_montage(renderer): @requires_dipy() @requires_nibabel() @requires_version('nilearn', '0.4') -@pytest.mark.parametrize('mode, stype, init_t, want_t, init_p, want_p', [ - ('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7)), - ('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3)), - ('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9.0, 19.9)), - ('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7))]) +@pytest.mark.parametrize( + 'mode, stype, init_t, want_t, init_p, want_p, bg_img', [ + ('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7), None), + ('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3), None), + ('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9., 19.9), None), + ('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7), 'brain.mgz')]) def test_plot_volume_source_estimates(mode, stype, init_t, want_t, - init_p, want_p): + init_p, want_p, bg_img): """Test interactive plotting of volume source estimates.""" forward = read_forward_solution(fwd_fname) sample_src = forward['src'] @@ -634,7 +635,7 @@ def test_plot_volume_source_estimates(mode, stype, init_t, want_t, fig = stc.plot( sample_src, subject='sample', subjects_dir=subjects_dir, mode=mode, initial_time=init_t, initial_pos=init_p, - verbose=True) + bg_img=bg_img, verbose=True) log = log.getvalue() want_str = 't = %0.3f s' % want_t assert want_str in log, (want_str, init_t) @@ -644,6 +645,10 @@ def test_plot_volume_source_estimates(mode, stype, init_t, want_t, _fake_click(fig, fig.axes[ax_idx], (0.3, 0.5)) fig.canvas.key_press_event('left') fig.canvas.key_press_event('shift+right') + if bg_img is not None: + with pytest.raises(FileNotFoundError, match='MRI file .* not found'): + stc.plot(sample_src, subject='sample', subjects_dir=subjects_dir, + mode='stat_map', bg_img='junk.mgz') @pytest.mark.slowtest # can be slow on OSX diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py index 8c9963823bb..3c02346bad2 100644 --- a/mne/viz/tests/test_epochs.py +++ b/mne/viz/tests/test_epochs.py @@ -282,7 +282,7 @@ def test_plot_butterfly(): sfreq = 1000. data = np.sin(rng.randn(n_epochs, n_channels, n_times)) events = np.array([np.arange(n_epochs), [0] * n_epochs, np.ones([n_epochs], - dtype=np.int)]).T + dtype=np.int64)]).T chanlist = ['eeg' if chan < n_channels // 3 else 'ecog' if chan < n_channels // 2 else 'seeg' for chan in range(n_channels)] diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py index f7ed392fef1..26a5fd6ce3e 100644 --- a/mne/viz/tests/test_ica.py +++ b/mne/viz/tests/test_ica.py @@ -200,7 +200,7 @@ def test_plot_ica_sources(): assert_array_equal(ica.exclude, [1]) plt.close('all') - # dtype can change int->np.int after load, test it explicitly + # dtype can change int->np.int64 after load, test it explicitly ica.n_components_ = np.int64(ica.n_components_) fig = ica.plot_sources(raw) # also test mouse clicks diff --git a/mne/viz/utils.py b/mne/viz/utils.py index b03b64d9ab2..b6f29af6a03 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -2963,7 +2963,7 @@ def _plot_masked_image(ax, data, times, mask=None, yvals=None, if mask.all(): t_end = ", all points masked)" else: - fraction = 1 - (np.float(mask.sum()) / np.float(mask.size)) + fraction = 1 - (np.float64(mask.sum()) / np.float64(mask.size)) t_end = ", %0.3g%% of points masked)" % (fraction * 100,) else: t_end = ")" diff --git a/tutorials/source-modeling/plot_beamformer_lcmv.py b/tutorials/source-modeling/plot_beamformer_lcmv.py index 485305938f9..692e03a1232 100644 --- a/tutorials/source-modeling/plot_beamformer_lcmv.py +++ b/tutorials/source-modeling/plot_beamformer_lcmv.py @@ -9,6 +9,7 @@ .. contents:: Page contents :local: :depth: 2 + """ # Author: Britta Westner #