From 56587f33e2cd3fd253a29784cbe1781235aa0447 Mon Sep 17 00:00:00 2001 From: Maksym Balatsko Date: Sun, 1 Oct 2023 18:50:10 +0200 Subject: [PATCH 01/37] Mark tests as network tests (#12041) --- doc/changes/devel.rst | 1 + doc/changes/names.inc | 2 ++ mne/channels/tests/test_channels.py | 2 ++ mne/datasets/tests/test_datasets.py | 1 + 4 files changed, 6 insertions(+) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index a47f500bc5f..994cd842707 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -40,6 +40,7 @@ Bugs ~~~~ - Fix bugs with :func:`mne.preprocessing.realign_raw` where the start of ``other`` was incorrectly cropped; and onsets and durations in ``other.annotations`` were left unsynced with the resampled data (:gh:`11950` by :newcontrib:`Qian Chu`) - Fix bug where ``encoding`` argument was ignored when reading annotations from an EDF file (:gh:`11958` by :newcontrib:`Andrew Gilbert`) +- Mark tests ``test_adjacency_matches_ft`` and ``test_fetch_uncompressed_file`` as network tests (:gh:`12041` by :newcontrib:`Maksym Balatsko`) - Fix bugs with saving splits for :class:`~mne.Epochs` (:gh:`11876` by `Dmitrii Altukhov`_) - Fix bug with multi-plot 3D rendering where only one plot was updated (:gh:`11896` by `Eric Larson`_) - Fix bug where subject birthdays were not correctly read by :func:`mne.io.read_raw_snirf` (:gh:`11912` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 3ebfc3e3ca5..389583fe616 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -320,6 +320,8 @@ .. _Mainak Jas: https://jasmainak.github.io +.. _Maksym Balatsko: https://github.com/mbalatsko + .. _Marcin Koculak: https://github.com/mkoculak .. _Marian Dovgialo: https://github.com/mdovgialo diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 835be5432bb..42695ae76bf 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -51,6 +51,7 @@ ) from mne.datasets import testing from mne.parallel import parallel_func +from mne.utils import requires_good_network io_dir = Path(__file__).parent.parent.parent / "io" base_dir = io_dir / "tests" / "data" @@ -362,6 +363,7 @@ def _download_one_ft_neighbor(neighbor: _BuiltinChannelAdjacency): @pytest.mark.slowtest +@requires_good_network def test_adjacency_matches_ft(tmp_path): """Test correspondence of built-in adjacency matrices with FT repo.""" builtin_neighbors_dir = Path(__file__).parents[1] / "data" / "neighbors" diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index e3599fe8e6f..6b43565cf33 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -306,6 +306,7 @@ def test_phantom(tmp_path, monkeypatch): assert op.isfile(tmp_path / "phantom_otaniemi" / "mri" / "T1.mgz") +@requires_good_network def test_fetch_uncompressed_file(tmp_path): """Test downloading an uncompressed file with our fetch function.""" dataset_dict = dict( From e4afb0aa211ca2990ead7f4bd93adbf99ad007ef Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 1 Oct 2023 13:10:15 -0400 Subject: [PATCH 02/37] DOC: Remove make test (#12042) --- Makefile | 2 -- doc/install/advanced.rst | 8 ++++++++ doc/install/contributing.rst | 2 +- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index c0e47ada7fb..b0b61e8370c 100644 --- a/Makefile +++ b/Makefile @@ -33,8 +33,6 @@ sample_data: testing_data: @python -c "import mne; mne.datasets.testing.data_path(verbose=True);" -pytest: test - test-no-network: in sudo unshare -n -- sh -c 'MNE_SKIP_NETWORK_TESTS=1 py.test mne' diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index 36c4e440fd9..065b9c1f9e7 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -209,6 +209,14 @@ or by doing :func:`mne.viz.set_3d_options(antialias=False) ` within a given Python session. +Some hardware-accelerated graphics on linux (e.g., some Intel graphics cards) +provide an insufficient implementation of OpenGL, and in those cases it can help to +force software rendering instead with something like: + +.. code-block:: console + + $ export LIBGL_ALWAYS_SOFTWARE=true + Another issue that may come up is that the MESA software itself may be out of date in certain operating systems, for example CentOS. This may lead to incomplete rendering of some 3D plots. A solution is described in this `Github comment `_. diff --git a/doc/install/contributing.rst b/doc/install/contributing.rst index 21fb16b337d..d741c540479 100644 --- a/doc/install/contributing.rst +++ b/doc/install/contributing.rst @@ -872,7 +872,7 @@ Running the test suite The ``--pdb`` flag will automatically start the python debugger upon test failure. -The full test suite can be run by calling ``make test`` from the +The full test suite can be run by calling ``pytest -m "not ultraslowtest" mne`` from the ``mne-python`` root folder. Testing the entire module can be quite slow, however, so to run individual tests while working on a new feature, you can run the following line:: From bd4d1d6df252e499f52598733da15d0757bce173 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 1 Oct 2023 14:58:19 -0400 Subject: [PATCH 03/37] ENH: Add Forward.save and hdf5 support (#12036) --- doc/changes/devel.rst | 1 + mne/_fiff/meas_info.py | 3 ++ mne/_fiff/tests/test_meas_info.py | 8 ++++++ mne/forward/forward.py | 47 +++++++++++++++++++++++++------ mne/forward/tests/test_forward.py | 9 ++++++ mne/utils/docs.py | 9 ++++++ 6 files changed, 69 insertions(+), 8 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 994cd842707..aac7801085d 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -33,6 +33,7 @@ Enhancements - Add helpful error messages when using methods on empty :class:`mne.Epochs`-objects (:gh:`11306` by `Martin Schulz`_) - Add inferring EEGLAB files' montage unit automatically based on estimated head radius using :func:`read_raw_eeglab(..., montage_units="auto") ` (:gh:`11925` by `Jack Zhang`_, :gh:`11951` by `Eric Larson`_) - Add :class:`~mne.time_frequency.EpochsSpectrumArray` and :class:`~mne.time_frequency.SpectrumArray` to support creating power spectra from :class:`NumPy array ` data (:gh:`11803` by `Alex Rockhill`_) +- Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) - Refactored internals of :func:`mne.read_annotations` (:gh:`11964` by `Paul Roujansky`_) - Enhance :func:`~mne.viz.plot_evoked_field` with a GUI that has controls for time, colormap, and contour lines (:gh:`11942` by `Marijn van Vliet`_) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index fe5c9d0d881..672f805c1b8 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -80,6 +80,7 @@ _check_on_missing, fill_doc, _check_fname, + check_fname, repr_html, ) from ._digitization import ( @@ -2006,6 +2007,8 @@ def read_info(fname, verbose=None): ------- %(info_not_none)s """ + check_fname(fname, "Info", (".fif", ".fif.gz")) + fname = _check_fname(fname, must_exist=True, overwrite="read") f, tree, _ = fiff_open(fname) with f as fid: info = read_meas_info(fid, tree)[0] diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py index 6cee0c94d76..844d04fc624 100644 --- a/mne/_fiff/tests/test_meas_info.py +++ b/mne/_fiff/tests/test_meas_info.py @@ -345,6 +345,14 @@ def test_read_write_info(tmp_path): write_info(fname, info) +@testing.requires_testing_data +def test_dir_warning(): + """Test that trying to read a bad filename emits a warning before an error.""" + with pytest.raises(OSError, match="directory"): + with pytest.warns(RuntimeWarning, match="foo"): + read_info(ctf_fname) + + def test_io_dig_points(tmp_path): """Test Writing for dig files.""" dest = tmp_path / "test.txt" diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 0fcb821ab2d..07ea99d59ce 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -81,6 +81,7 @@ _stamp_to_dt, _on_missing, repr_html, + _import_h5io_funcs, ) from ..label import Label @@ -165,6 +166,18 @@ def copy(self): """Copy the Forward instance.""" return Forward(deepcopy(self)) + @verbose + def save(self, fname, *, overwrite=False, verbose=None): + """Save the forward solution. + + Parameters + ---------- + %(fname_fwd)s + %(overwrite)s + %(verbose)s + """ + write_forward_solution(fname, self, overwrite=overwrite) + def _get_src_type_and_ori_for_repr(self): src_types = np.array([src["type"] for src in self["src"]]) @@ -520,7 +533,8 @@ def read_forward_solution(fname, include=(), exclude=(), *, ordered=None, verbos Parameters ---------- fname : path-like - The file name, which should end with ``-fwd.fif`` or ``-fwd.fif.gz``. + The file name, which should end with ``-fwd.fif``, ``-fwd.fif.gz``, + ``_fwd.fif``, ``_fwd.fif.gz``, ``-fwd.h5``, or ``_fwd.h5``. include : list, optional List of names of channels to include. If empty all channels are included. @@ -554,11 +568,15 @@ def read_forward_solution(fname, include=(), exclude=(), *, ordered=None, verbos forward solution with :func:`read_forward_solution`. """ check_fname( - fname, "forward", ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz") + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), ) fname = _check_fname(fname=fname, must_exist=True, overwrite="read") # Open the file, create directory logger.info("Reading forward solution from %s..." % fname) + if fname.suffix == ".h5": + return _read_forward_hdf5(fname) f, tree, _ = fiff_open(fname) with f as fid: # Find all forward solutions @@ -861,9 +879,7 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None): Parameters ---------- - fname : path-like - File name to save the forward solution to. It should end with - ``-fwd.fif`` or ``-fwd.fif.gz``. + %(fname_fwd)s fwd : Forward Forward solution. %(overwrite)s @@ -889,13 +905,28 @@ def write_forward_solution(fname, fwd, overwrite=False, verbose=None): forward solution with :func:`read_forward_solution`. """ check_fname( - fname, "forward", ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz") + fname, + "forward", + ("-fwd.fif", "-fwd.fif.gz", "_fwd.fif", "_fwd.fif.gz", "-fwd.h5", "_fwd.h5"), ) # check for file existence and expand `~` if present fname = _check_fname(fname, overwrite) - with start_and_end_file(fname) as fid: - _write_forward_solution(fid, fwd) + if fname.suffix == ".h5": + _write_forward_hdf5(fname, fwd) + else: + with start_and_end_file(fname) as fid: + _write_forward_solution(fid, fwd) + + +def _write_forward_hdf5(fname, fwd): + _, write_hdf5 = _import_h5io_funcs() + write_hdf5(fname, dict(fwd=fwd), overwrite=True) + + +def _read_forward_hdf5(fname): + read_hdf5, _ = _import_h5io_funcs() + return Forward(read_hdf5(fname)["fwd"]) def _write_forward_solution(fid, fwd): diff --git a/mne/forward/tests/test_forward.py b/mne/forward/tests/test_forward.py index d6981945ac6..ee37f11676c 100644 --- a/mne/forward/tests/test_forward.py +++ b/mne/forward/tests/test_forward.py @@ -197,6 +197,15 @@ def test_io_forward(tmp_path): fwd_read = read_forward_solution(fname_temp) assert_forward_allclose(fwd, fwd_read) + h5py = pytest.importorskip("h5py") + pytest.importorskip("h5io") + fname_h5 = fname_temp.with_suffix(".h5") + fwd.save(fname_h5) + with h5py.File(fname_h5, "r"): + pass # just checks for hdf5-ness + fwd_read = read_forward_solution(fname_h5) + assert_forward_allclose(fwd, fwd_read) + @testing.requires_testing_data def test_apply_forward(): diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 53a394022a3..7ec2dbc4534 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1694,6 +1694,15 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): Name of the output file. """ +docdict[ + "fname_fwd" +] = """ +fname : path-like + File name to save the forward solution to. It should end with + ``-fwd.fif`` or ``-fwd.fif.gz`` to save to FIF, or ``-fwd.h5`` to save to + HDF5. +""" + docdict[ "fnirs" ] = """ From 3c3ec57feebbdddb826535fe16768db787519bae Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 1 Oct 2023 14:58:42 -0400 Subject: [PATCH 04/37] MAINT: Warn when fitting cHPI amplitudes on Maxwell filtered data (#12038) --- mne/chpi.py | 10 ++++++++++ mne/tests/test_chpi.py | 9 +++++++++ 2 files changed, 19 insertions(+) diff --git a/mne/chpi.py b/mne/chpi.py index 311d372384b..96ce72ee195 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -624,6 +624,16 @@ def _setup_hpi_amplitude_fitting( on_missing = "raise" if not allow_empty else "ignore" hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing=on_missing) + # check for maxwell filtering + for ent in info["proc_history"]: + for key in ("sss_info", "max_st"): + if len(ent["max_info"]["sss_info"]) > 0: + warn( + "Fitting cHPI amplutudes after Maxwell filtering may not to work, " + "consider fitting on the original data" + ) + break + _validate_type(t_window, (str, "numeric"), "t_window") if info["line_freq"] is not None: line_freqs = np.arange( diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index c374917676d..1e9b249ce02 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -422,6 +422,15 @@ def test_calculate_chpi_positions_artemis(): ) +@testing.requires_testing_data +def test_warn_maxwell_filtered(): + """Test that trying to compute locations on Maxwell filtered data warns.""" + raw = read_raw_fif(sss_fif_fname).crop(0, 1) + with pytest.warns(RuntimeWarning, match="Maxwell filter"): + amps = compute_chpi_amplitudes(raw) + assert len(amps["times"]) > 0 # but for this file, it does work! + + @testing.requires_testing_data def test_initial_fit_redo(): """Test that initial fits can be redone based on moments.""" From ff6bad289da40e08b82e4db010815c607a09d2a7 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 1 Oct 2023 17:29:04 -0400 Subject: [PATCH 05/37] BUG: Fix bug with validation of info["bads"] (#12039) --- doc/changes/devel.rst | 1 + mne/_fiff/meas_info.py | 74 +++++++++++++------ mne/_fiff/tests/test_meas_info.py | 36 +++++++-- mne/_fiff/tests/test_pick.py | 4 +- mne/channels/channels.py | 3 +- mne/forward/forward.py | 6 +- mne/io/artemis123/artemis123.py | 14 ++-- mne/io/cnt/cnt.py | 2 +- mne/preprocessing/__init__.py | 2 +- .../{annotate_nan.py => _annotate_nan.py} | 0 mne/preprocessing/ica.py | 2 +- mne/viz/topomap.py | 3 +- tutorials/io/60_ctf_bst_auditory.py | 2 +- 13 files changed, 99 insertions(+), 50 deletions(-) rename mne/preprocessing/{annotate_nan.py => _annotate_nan.py} (100%) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index aac7801085d..b9ec5e5777f 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -55,6 +55,7 @@ Bugs - Fix bug with :meth:`~mne.viz.Brain.add_annotation` when reading an annotation from a file with both hemispheres shown (:gh:`11946` by `Marijn van Vliet`_) - Fix bug with axis clip box boundaries in :func:`mne.viz.plot_evoked_topo` and related functions (:gh:`11999` by `Eric Larson`_) - Fix bug with ``subject_info`` when loading data from and exporting to EDF file (:gh:`11952` by `Paul Roujansky`_) +- Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) - Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` by `Paul Roujansky`_) - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) - Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index 672f805c1b8..5f3fa7c0000 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -939,18 +939,52 @@ def _check_ch_keys(ch, ci, name='info["chs"]', check_min=True): ) -# As options are added here, test_meas_info.py:test_info_bad should be updated -def _check_bads(bads): +def _check_bads_info_compat(bads, info): _validate_type(bads, list, "bads") - return bads + if not len(bads): + return # e.g. in empty_info + for bi, bad in enumerate(bads): + _validate_type(bad, str, f"bads[{bi}]") + if "ch_names" not in info: # somewhere in init, or deepcopy, or _empty_info, etc. + return + missing = [bad for bad in bads if bad not in info["ch_names"]] + if len(missing) > 0: + raise ValueError(f"bad channel(s) {missing} marked do not exist in info") + + +class MNEBadsList(list): + """Subclass of bads that checks inplace operations.""" + + def __init__(self, *, bads, info): + _check_bads_info_compat(bads, info) + self._mne_info = info + super().__init__(bads) + + def extend(self, iterable): + if not isinstance(iterable, list): + iterable = list(iterable) + _check_bads_info_compat(iterable, self._mne_info) + return super().extend(iterable) + + def append(self, x): + return self.extend([x]) + def __iadd__(self, x): + self.extend(x) + return self + + +# As options are added here, test_meas_info.py:test_info_bad should be updated +def _check_bads(bads, *, info): + return MNEBadsList(bads=bads, info=info) -def _check_description(description): + +def _check_description(description, *, info): _validate_type(description, (None, str), "info['description']") return description -def _check_dev_head_t(dev_head_t): +def _check_dev_head_t(dev_head_t, *, info): from ..transforms import Transform, _ensure_trans _validate_type(dev_head_t, (Transform, None), "info['dev_head_t']") @@ -959,23 +993,23 @@ def _check_dev_head_t(dev_head_t): return dev_head_t -def _check_experimenter(experimenter): +def _check_experimenter(experimenter, *, info): _validate_type(experimenter, (None, str), "experimenter") return experimenter -def _check_line_freq(line_freq): +def _check_line_freq(line_freq, *, info): _validate_type(line_freq, (None, "numeric"), "line_freq") line_freq = float(line_freq) if line_freq is not None else line_freq return line_freq -def _check_subject_info(subject_info): +def _check_subject_info(subject_info, *, info): _validate_type(subject_info, (None, dict), "subject_info") return subject_info -def _check_device_info(device_info): +def _check_device_info(device_info, *, info): _validate_type( device_info, ( @@ -987,7 +1021,7 @@ def _check_device_info(device_info): return device_info -def _check_helium_info(helium_info): +def _check_helium_info(helium_info, *, info): _validate_type( helium_info, ( @@ -1472,7 +1506,7 @@ class Info(dict, SetChannelsMixin, MontageMixin, ContainsMixin): "sfreq": "sfreq cannot be set directly. " "Please use method inst.resample() instead.", "subject_info": _check_subject_info, - "temp": lambda x: x, + "temp": lambda x, info=None: x, "utc_offset": "utc_offset cannot be set directly.", "working_dir": "working_dir cannot be set directly.", "xplotter_layout": "xplotter_layout cannot be set directly.", @@ -1482,6 +1516,8 @@ def __init__(self, *args, **kwargs): self._unlocked = True super().__init__(*args, **kwargs) # Deal with h5io writing things as dict + if "bads" in self: + self["bads"] = MNEBadsList(bads=self["bads"], info=self) for key in ("dev_head_t", "ctf_head_t", "dev_ctf_t"): _format_trans(self, key) for res in self.get("hpi_results", []): @@ -1526,7 +1562,9 @@ def __setitem__(self, key, val): if not unlocked: raise RuntimeError(self._attributes[key]) else: - val = self._attributes[key](val) # attribute checker function + val = self._attributes[key]( + val, info=self + ) # attribute checker function else: raise RuntimeError( f"Info does not support directly setting the key {repr(key)}. " @@ -1724,16 +1762,6 @@ def __deepcopy__(self, memodict): def _check_consistency(self, prepend_error=""): """Do some self-consistency checks and datatype tweaks.""" - missing = [bad for bad in self["bads"] if bad not in self["ch_names"]] - if len(missing) > 0: - msg = "%sbad channel(s) %s marked do not exist in info" - raise RuntimeError( - msg - % ( - prepend_error, - missing, - ) - ) meas_date = self.get("meas_date") if meas_date is not None: if ( @@ -3335,7 +3363,7 @@ def _force_update_info(info_base, info_target): The Info object(s) you wish to overwrite using info_base. These objects will be modified in-place. """ - exclude_keys = ["chs", "ch_names", "nchan"] + exclude_keys = ["chs", "ch_names", "nchan", "bads"] info_target = np.atleast_1d(info_target).ravel() all_infos = np.hstack([info_base, info_target]) for ii in all_infos: diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py index 844d04fc624..feb30400d42 100644 --- a/mne/_fiff/tests/test_meas_info.py +++ b/mne/_fiff/tests/test_meas_info.py @@ -59,6 +59,7 @@ _dt_to_stamp, _add_timedelta_to_stamp, _read_extended_ch_info, + MNEBadsList, ) from mne.minimum_norm import ( make_inverse_operator, @@ -495,8 +496,8 @@ def test_check_consistency(): # Bad channels that are not in the info object info2 = info.copy() - info2["bads"] = ["b", "foo", "bar"] - pytest.raises(RuntimeError, info2._check_consistency) + with pytest.raises(ValueError, match="do not exist"): + info2["bads"] = ["b", "foo", "bar"] # Bad data types info2 = info.copy() @@ -1088,21 +1089,42 @@ def test_pickle(fname_info, unlocked): def test_info_bad(): """Test our info sanity checkers.""" - info = create_info(2, 1000.0, "eeg") + info = create_info(5, 1000.0, "eeg") info["description"] = "foo" info["experimenter"] = "bar" info["line_freq"] = 50.0 info["bads"] = info["ch_names"][:1] info["temp"] = ("whatever", 1.0) - # After 0.24 these should be pytest.raises calls - check, klass = pytest.raises, RuntimeError - with check(klass, match=r"info\['temp'\]"): + with pytest.raises(RuntimeError, match=r"info\['temp'\]"): info["bad_key"] = 1.0 for key, match in [("sfreq", r"inst\.resample"), ("chs", r"inst\.add_channels")]: - with check(klass, match=match): + with pytest.raises(RuntimeError, match=match): info[key] = info[key] with pytest.raises(ValueError, match="between meg<->head"): info["dev_head_t"] = Transform("mri", "head", np.eye(4)) + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"] = ["foo"] + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"] += ["foo"] + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"].append("foo") + assert isinstance(info["bads"], MNEBadsList) + with pytest.raises(ValueError, match="do not exist in info"): + info["bads"].extend(["foo"]) + assert isinstance(info["bads"], MNEBadsList) + x = info["bads"] + with pytest.raises(ValueError, match="do not exist in info"): + x.append("foo") + assert info["bads"] == info["ch_names"][:1] # unchonged + x = info["bads"] + info["ch_names"][1:2] + assert x == info["ch_names"][:2] + assert not isinstance(x, MNEBadsList) # plain list + x = info["ch_names"][1:2] + info["bads"] + assert x == info["ch_names"][1::-1] # like [1, 0] in fancy indexing + assert not isinstance(x, MNEBadsList) # plain list def test_get_montage(): diff --git a/mne/_fiff/tests/test_pick.py b/mne/_fiff/tests/test_pick.py index 51aaa6b3631..786a14a728b 100644 --- a/mne/_fiff/tests/test_pick.py +++ b/mne/_fiff/tests/test_pick.py @@ -567,8 +567,8 @@ def test_clean_info_bads(): info = pick_info(raw.info, picks_meg) info._check_consistency() - info["bads"] += ["EEG 053"] - pytest.raises(RuntimeError, info._check_consistency) + with pytest.raises(ValueError, match="do not exist"): + info["bads"] += ["EEG 053"] with pytest.raises(ValueError, match="unique"): pick_info(raw.info, [0, 0]) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index b6c82f27be2..d57610d257f 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -976,7 +976,7 @@ def rename_channels(info, mapping, allow_duplicates=False, *, verbose=None): raise ValueError("New channel names are not unique, renaming failed") # do the remapping in info - info["bads"] = bads + info["bads"] = [] ch_names_mapping = dict() for ch, ch_name in zip(info["chs"], ch_names): ch_names_mapping[ch["ch_name"]] = ch_name @@ -989,6 +989,7 @@ def rename_channels(info, mapping, allow_duplicates=False, *, verbose=None): proj["data"]["col_names"], ch_names_mapping ) info._update_redundant() + info["bads"] = bads info._check_consistency() diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 07ea99d59ce..31aa3c2bdfc 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -458,11 +458,9 @@ def _read_forward_meas_info(tree, fid): else: raise ValueError("MEG/head coordinate transformation not found") - info["bads"] = _read_bad_channels( - fid, parent_meg, ch_names_mapping=ch_names_mapping - ) + bads = _read_bad_channels(fid, parent_meg, ch_names_mapping=ch_names_mapping) # clean up our bad list, old versions could have non-existent bads - info["bads"] = [bad for bad in info["bads"] if bad in info["ch_names"]] + info["bads"] = [bad for bad in bads if bad in info["ch_names"]] # Check if a custom reference has been applied tag = find_tag(fid, parent_mri, FIFF.FIFF_MNE_CUSTOM_REF) diff --git a/mne/io/artemis123/artemis123.py b/mne/io/artemis123/artemis123.py index 34a0c0118a6..8d937067a5d 100644 --- a/mne/io/artemis123/artemis123.py +++ b/mne/io/artemis123/artemis123.py @@ -192,7 +192,7 @@ def _get_artemis123_info(fname, pos_fname=None): # load mne loc dictionary loc_dict = _load_mne_locs() info["chs"] = [] - info["bads"] = [] + bads = [] for i, chan in enumerate(header_info["channels"]): # build chs struct @@ -209,7 +209,7 @@ def _get_artemis123_info(fname, pos_fname=None): # a value of another ref channel to make writers/readers happy. if t["cal"] == 0: t["cal"] = 4.716e-10 - info["bads"].append(t["ch_name"]) + bads.append(t["ch_name"]) t["loc"] = loc_dict.get(chan["name"], np.zeros(12)) if chan["name"].startswith("MEG"): @@ -247,7 +247,7 @@ def _get_artemis123_info(fname, pos_fname=None): t["coil_type"] = FIFF.FIFFV_COIL_NONE t["kind"] = FIFF.FIFFV_MISC_CH t["unit"] = FIFF.FIFF_UNIT_V - info["bads"].append(t["ch_name"]) + bads.append(t["ch_name"]) elif chan["name"].startswith(("AUX", "TRG", "MIO")): t["coil_type"] = FIFF.FIFFV_COIL_NONE @@ -268,10 +268,7 @@ def _get_artemis123_info(fname, pos_fname=None): # append this channel to the info info["chs"].append(t) if chan["FLL_ResetLock"] == "TRUE": - info["bads"].append(t["ch_name"]) - - # reduce info['bads'] to unique set - info["bads"] = list(set(info["bads"])) + bads.append(t["ch_name"]) # HPI information # print header_info.keys() @@ -313,6 +310,9 @@ def _get_artemis123_info(fname, pos_fname=None): info._unlocked = False info._update_redundant() + # reduce info['bads'] to unique set + info["bads"] = list(set(bads)) + del bads return info, header_info diff --git a/mne/io/cnt/cnt.py b/mne/io/cnt/cnt.py index 7d810a0cd49..1a6aa15b7f3 100644 --- a/mne/io/cnt/cnt.py +++ b/mne/io/cnt/cnt.py @@ -404,12 +404,12 @@ def _get_cnt_info(input_fname, eog, ecg, emg, misc, data_format, date_format): meas_date=meas_date, dig=dig, description=session_label, - bads=bads, subject_info=subject_info, chs=chs, ) info._unlocked = False info._update_redundant() + info["bads"] = bads return info, cnt_info diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py index 8358f006e09..372df010e8c 100644 --- a/mne/preprocessing/__init__.py +++ b/mne/preprocessing/__init__.py @@ -52,7 +52,7 @@ "read_fine_calibration", "write_fine_calibration", ], - "annotate_nan": ["annotate_nan"], + "_annotate_nan": ["annotate_nan"], "interpolate": ["equalize_bads", "interpolate_bridged_electrodes"], "_css": ["cortical_signal_suppression"], "hfc": ["compute_proj_hfc"], diff --git a/mne/preprocessing/annotate_nan.py b/mne/preprocessing/_annotate_nan.py similarity index 100% rename from mne/preprocessing/annotate_nan.py rename to mne/preprocessing/_annotate_nan.py diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index 1414f2a4ca5..15c1d286d6e 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -1372,8 +1372,8 @@ def _export_info(self, info, container, add_channels): ] with info._unlock(update_redundant=True, check_after=True): info["chs"] = ch_info - info["bads"] = [ch_names[k] for k in self.exclude] info["projs"] = [] # make sure projections are removed. + info["bads"] = [ch_names[k] for k in self.exclude] @verbose def score_sources( diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index ec518ec37f0..bac42416a29 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -121,11 +121,10 @@ def _prepare_topomap_plot(inst, ch_type, sphere=None): clean_ch_names = _clean_names(info["ch_names"]) for ii, this_ch in enumerate(info["chs"]): this_ch["ch_name"] = clean_ch_names[ii] - info["bads"] = _clean_names(info["bads"]) for comp in info["comps"]: comp["data"]["col_names"] = _clean_names(comp["data"]["col_names"]) - info._update_redundant() + info["bads"] = _clean_names(info["bads"]) info._check_consistency() # special case for merging grad channels diff --git a/tutorials/io/60_ctf_bst_auditory.py b/tutorials/io/60_ctf_bst_auditory.py index 7cb970bc986..01a65ef3234 100644 --- a/tutorials/io/60_ctf_bst_auditory.py +++ b/tutorials/io/60_ctf_bst_auditory.py @@ -153,7 +153,7 @@ # plotted by adding the event list as a keyword argument. As the bad segments # and saccades were added as annotations to the raw data, they are plotted as # well. -raw.plot(block=True) +raw.plot() # %% # Typical preprocessing step is the removal of power line artifact (50 Hz or From 110947f35abc93fd88f4e6b72f9e573dc25b355f Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Sun, 1 Oct 2023 19:03:10 -0400 Subject: [PATCH 06/37] MAINT: Speed up doc build (#12040) --- examples/inverse/dics_epochs.py | 9 +--- tutorials/intro/70_report.py | 47 ++++++------------- .../75_cluster_ftest_spatiotemporal.py | 3 +- 3 files changed, 18 insertions(+), 41 deletions(-) diff --git a/examples/inverse/dics_epochs.py b/examples/inverse/dics_epochs.py index dc8a0b7e14c..5ea93986fda 100644 --- a/examples/inverse/dics_epochs.py +++ b/examples/inverse/dics_epochs.py @@ -45,7 +45,7 @@ events = mne.find_events(raw) epochs = mne.Epochs( raw, - events, + events[:22], # just for execution speed of the tutorial event_id=1, tmin=-1, tmax=2.5, @@ -56,7 +56,6 @@ ), preload=True, ) -epochs = epochs[:10] # just for speed of execution for the tutorial # We are mostly interested in the beta band since it has been shown to be # active for somatosensory stimulation @@ -121,7 +120,7 @@ subjects_dir=subjects_dir, hemi="both", views="dorsal", - initial_time=0.55, + initial_time=1.2, brain_kwargs=dict(show=False), add_data_kwargs=dict( fmin=fmax / 10, @@ -131,7 +130,3 @@ colorbar_kwargs=dict(label_font_size=10), ), ) - -# You can save a movie like the one on our documentation website with: -# brain.save_movie(tmin=0.55, tmax=1.5, interpolation='linear', -# time_viewer=True) diff --git a/tutorials/intro/70_report.py b/tutorials/intro/70_report.py index 61f35466349..0c8c086eff8 100644 --- a/tutorials/intro/70_report.py +++ b/tutorials/intro/70_report.py @@ -203,12 +203,9 @@ # ignored; instead, only the explicitly passed projectors will be plotted. ecg_proj_path = sample_dir / "sample_audvis_ecg-proj.fif" -eog_proj_path = sample_dir / "sample_audvis_eog-proj.fif" - report = mne.Report(title="Projectors example") report.add_projs(info=raw_path, title="Projs from info") report.add_projs(info=raw_path, projs=ecg_proj_path, title="ECG projs from path") -report.add_projs(info=raw_path, projs=eog_proj_path, title="EOG projs from path") report.save("report_projs.html", overwrite=True) # %% @@ -289,7 +286,11 @@ report = mne.Report(title="BEM example") report.add_bem( - subject="sample", subjects_dir=subjects_dir, title="MRI & BEM", decim=20, width=256 + subject="sample", + subjects_dir=subjects_dir, + title="MRI & BEM", + decim=40, + width=256, ) report.save("report_mri_and_bem.html", overwrite=True) @@ -452,7 +453,7 @@ mne_logo_path = Path(mne.__file__).parent / "icons" / "mne_icon-cropped.png" fig_array = plt.imread(mne_logo_path) -rotation_angles = np.linspace(start=0, stop=360, num=17) +rotation_angles = np.linspace(start=0, stop=360, num=8, endpoint=False) figs = [] captions = [] @@ -462,7 +463,7 @@ fig_array_rotated = fig_array_rotated.clip(min=0, max=1) # Create the figure - fig, ax = plt.subplots() + fig, ax = plt.subplots(figsize=(3, 3), constrained_layout=True) ax.imshow(fig_array_rotated) ax.set_axis_off() @@ -470,17 +471,11 @@ figs.append(fig) captions.append(f"Rotation angle: {round(angle, 1)}°") -# can also be a MNEQtBrowser instance -with mne.viz.use_browser_backend("qt"): - figs.append(raw.plot()) -captions.append("... plus a raw data plot") - report = mne.Report(title="Multiple figures example") report.add_figure(fig=figs, title="Fun with figures! 🥳", caption=captions) report.save("report_custom_figures.html", overwrite=True) -for fig in figs[:-1]: +for fig in figs: plt.close(fig) -figs[-1].close() del figs # %% @@ -517,7 +512,9 @@ report = mne.Report(title="Tags example") report.add_image( - image=mne_logo_path, title="MNE Logo", tags=("image", "mne", "logo", "open-source") + image=mne_logo_path, + title="MNE Logo", + tags=("image", "mne", "logo", "open-source"), ) report.save("report_tags.html", overwrite=True) @@ -611,7 +608,7 @@ report = mne.Report( title="parse_folder example 3", subject="sample", subjects_dir=subjects_dir ) -report.parse_folder(data_path=data_path, pattern="", mri_decim=25) +report.parse_folder(data_path=data_path, pattern="", mri_decim=40) report.save("report_parse_folder_mri_bem.html", overwrite=True) # %% @@ -638,8 +635,8 @@ baseline = (None, 0) cov_fname = sample_dir / "sample_audvis-cov.fif" -pattern = "sample_audvis-no-filter-ave.fif" -evoked = mne.read_evokeds(sample_dir / pattern)[0] +pattern = "sample_audvis-ave.fif" +evoked = mne.read_evokeds(sample_dir / pattern)[0].pick("eeg").decimate(4) report = mne.Report( title="parse_folder example 4", baseline=baseline, cov_fname=cov_fname ) @@ -650,22 +647,6 @@ ) report.save("report_parse_folder_evoked.html", overwrite=True) -# %% -# If you want to actually *view* the noise covariance in the report, make sure -# it is captured by the pattern passed to :meth:`~mne.Report.parse_folder`, and -# also include a source for an :class:`~mne.Info` object (any of the -# :class:`~mne.io.Raw`, :class:`~mne.Epochs` or :class:`~mne.Evoked` -# :file:`.fif` files that contain subject data also contain the measurement -# information and should work): - -pattern = "sample_audvis-cov.fif" -info_fname = sample_dir / "sample_audvis-ave.fif" -report = mne.Report(title="parse_folder example 5", info_fname=info_fname) -report.parse_folder( - data_path, pattern=pattern, render_bem=False, n_time_points_evokeds=5 -) -report.save("report_parse_folder_cov.html", overwrite=True) - # %% # # Adding custom HTML (e.g., a description text) diff --git a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py index 5d219211689..462a40ca433 100644 --- a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py +++ b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py @@ -58,7 +58,7 @@ # Setup for reading the raw data raw = mne.io.read_raw_fif(raw_fname, preload=True) -raw.filter(1, 30) +raw.filter(1, 25) events = mne.read_events(event_fname) # %% @@ -75,6 +75,7 @@ tmin, tmax, picks=picks, + decim=2, # just for speed! baseline=None, reject=reject, preload=True, From d585ff4ad2672fce92b629da681b2b5057a85888 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Oct 2023 09:28:12 -0400 Subject: [PATCH 07/37] Bump actions/checkout from 3 to 4 (#12046) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 43cfded6dad..dd85f1bb8a4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -18,7 +18,7 @@ jobs: package: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - uses: actions/setup-python@v4 with: python-version: '3.10' From 9f19bd694ce131b95535dd706c4d856333caaf66 Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 2 Oct 2023 15:48:12 +0200 Subject: [PATCH 08/37] [MRG] Don't look for an offset in an eyelink message if the message contains only 2 elements (#12003) --- doc/changes/devel.rst | 1 + mne/io/eyelink/_utils.py | 33 ++++++---- mne/io/eyelink/eyelink.py | 61 ++++--------------- mne/io/eyelink/tests/test_eyelink.py | 30 ++++++++- mne/preprocessing/realign.py | 2 +- mne/utils/docs.py | 50 +++++++++++++++ .../preprocessing/90_eyetracking_data.py | 4 +- 7 files changed, 115 insertions(+), 66 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index b9ec5e5777f..9b5205d4178 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -58,6 +58,7 @@ Bugs - Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) - Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` by `Paul Roujansky`_) - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) +- Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) - Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) - Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) diff --git a/mne/io/eyelink/_utils.py b/mne/io/eyelink/_utils.py index fc7a955e72c..23c9cb38329 100644 --- a/mne/io/eyelink/_utils.py +++ b/mne/io/eyelink/_utils.py @@ -40,7 +40,9 @@ } -def _parse_eyelink_ascii(fname, find_overlaps=True, overlap_threshold=0.05): +def _parse_eyelink_ascii( + fname, find_overlaps=True, overlap_threshold=0.05, apply_offsets=False +): # ======================== Parse ASCII File ========================= raw_extras = dict() raw_extras.update(_parse_recording_blocks(fname)) @@ -49,7 +51,7 @@ def _parse_eyelink_ascii(fname, find_overlaps=True, overlap_threshold=0.05): _validate_data(raw_extras) # ======================== Create DataFrames ======================== - raw_extras["dfs"] = _create_dataframes(raw_extras) + raw_extras["dfs"] = _create_dataframes(raw_extras, apply_offsets) del raw_extras["sample_lines"] # free up memory # add column names to dataframes and set the dtype of each column col_names, ch_names = _infer_col_names(raw_extras) @@ -252,7 +254,7 @@ def _get_sfreq_from_ascii(rec_info): return float(rec_info[rec_info.index("RATE") + 1]) -def _create_dataframes(raw_extras): +def _create_dataframes(raw_extras, apply_offsets): """Create pandas.DataFrame for Eyelink samples and events. Creates a pandas DataFrame for sample_lines and for each @@ -280,17 +282,22 @@ def _create_dataframes(raw_extras): # make dataframe for experiment messages if raw_extras["event_lines"]["MSG"]: msgs = [] - for tokens in raw_extras["event_lines"]["MSG"]: - timestamp = tokens[0] - # if offset token exists, it will be the 1st index and is numeric - if tokens[1].lstrip("-").replace(".", "", 1).isnumeric(): - offset = float(tokens[1]) - msg = " ".join(str(x) for x in tokens[2:]) - else: - # there is no offset token + for token in raw_extras["event_lines"]["MSG"]: + if apply_offsets and len(token) == 2: + ts, msg = token offset = np.nan - msg = " ".join(str(x) for x in tokens[1:]) - msgs.append([timestamp, offset, msg]) + elif apply_offsets: + ts = token[0] + try: + offset = float(token[1]) + msg = " ".join(str(x) for x in token[2:]) + except ValueError: + offset = np.nan + msg = " ".join(str(x) for x in token[1:]) + else: + ts, offset = token[0], np.nan + msg = " ".join(str(x) for x in token[1:]) + msgs.append([ts, offset, msg]) df_dict["messages"] = pd.DataFrame(msgs) # make dataframe for recording block start, end times diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py index 9987f809375..501b8ad798b 100644 --- a/mne/io/eyelink/eyelink.py +++ b/mne/io/eyelink/eyelink.py @@ -28,35 +28,15 @@ def read_raw_eyelink( overlap_threshold=0.05, verbose=None, ): - """Reader for an Eyelink .asc file. + """Reader for an Eyelink ``.asc`` file. Parameters ---------- - fname : path-like - Path to the eyelink file (.asc). - create_annotations : bool | list (default True) - Whether to create mne.Annotations from occular events - (blinks, fixations, saccades) and experiment messages. If a list, must - contain one or more of ['fixations', 'saccades',' blinks', messages']. - If True, creates mne.Annotations for both occular events and experiment - messages. - apply_offsets : bool (default False) - Adjusts the onset time of the mne.Annotations created from Eyelink - experiment messages, if offset values exist in the ASCII file. - find_overlaps : bool (default False) - Combine left and right eye :class:`mne.Annotations` (blinks, fixations, - saccades) if their start times and their stop times are both not - separated by more than overlap_threshold. - overlap_threshold : float (default 0.05) - Time in seconds. Threshold of allowable time-gap between both the start and - stop times of the left and right eyes. If the gap is larger than the threshold, - the :class:`mne.Annotations` will be kept separate (i.e. ``"blink_L"``, - ``"blink_R"``). If the gap is smaller than the threshold, the - :class:`mne.Annotations` will be merged and labeled as ``"blink_both"``. - Defaults to ``0.05`` seconds (50 ms), meaning that if the blink start times of - the left and right eyes are separated by less than 50 ms, and the blink stop - times of the left and right eyes are separated by less than 50 ms, then the - blink will be merged into a single :class:`mne.Annotations`. + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s %(verbose)s Returns @@ -95,28 +75,11 @@ class RawEyelink(BaseRaw): Parameters ---------- - fname : path-like - Path to the data file (.XXX). - create_annotations : bool | list (default True) - Whether to create mne.Annotations from occular events - (blinks, fixations, saccades) and experiment messages. If a list, must - contain one or more of ['fixations', 'saccades',' blinks', messages']. - If True, creates mne.Annotations for both occular events and experiment - messages. - apply_offsets : bool (default False) - Adjusts the onset time of the mne.Annotations created from Eyelink - experiment messages, if offset values exist in the ASCII file. - find_overlaps : boolean (default False) - Combine left and right eye :class:`mne.Annotations` (blinks, fixations, - saccades) if their start times and their stop times are both not - separated by more than overlap_threshold. - overlap_threshold : float (default 0.05) - Time in seconds. Threshold of allowable time-gap between the start and - stop times of the left and right eyes. If gap is larger than threshold, - the :class:`mne.Annotations` will be kept separate (i.e. "blink_L", - "blink_R"). If the gap is smaller than the threshold, the - :class:`mne.Annotations` will be merged (i.e. "blink_both"). - + %(eyelink_fname)s + %(eyelink_create_annotations)s + %(eyelink_apply_offsets)s + %(eyelink_find_overlaps)s + %(eyelink_overlap_threshold)s %(verbose)s See Also @@ -141,7 +104,7 @@ def __init__( # ======================== Parse ASCII file ========================== eye_ch_data, info, raw_extras = _parse_eyelink_ascii( - fname, find_overlaps, overlap_threshold + fname, find_overlaps, overlap_threshold, apply_offsets ) # ======================== Create Raw Object ========================= super(RawEyelink, self).__init__( diff --git a/mne/io/eyelink/tests/test_eyelink.py b/mne/io/eyelink/tests/test_eyelink.py index 17ab2d35311..c482826dde8 100644 --- a/mne/io/eyelink/tests/test_eyelink.py +++ b/mne/io/eyelink/tests/test_eyelink.py @@ -3,6 +3,7 @@ import pytest import numpy as np +from numpy.testing import assert_allclose from mne.datasets.testing import data_path, requires_testing_data from mne.io import read_raw_eyelink @@ -254,7 +255,7 @@ def test_multi_block_misc_channels(fname, tmp_path): _simulate_eye_tracking_data(fname, out_file) with pytest.warns(RuntimeWarning, match="Raw eyegaze coordinates"): - raw = read_raw_eyelink(out_file) + raw = read_raw_eyelink(out_file, apply_offsets=True) chs_in_file = [ "xpos_right", @@ -286,3 +287,30 @@ def test_multi_block_misc_channels(fname, tmp_path): def test_basics(this_fname): """Test basics of reading.""" _test_raw_reader(read_raw_eyelink, fname=this_fname, test_preloading=False) + + +def test_annotations_without_offset(tmp_path): + """Test read of annotations without offset.""" + out_file = tmp_path / "tmp_eyelink.asc" + + # create fake dataset + with open(fname_href, "r") as file: + lines = file.readlines() + ts = lines[-3].split("\t")[0] + line = f"MSG\t{ts} test string\n" + lines = lines[:-3] + [line] + lines[-3:] + + with open(out_file, "w") as file: + file.writelines(lines) + + raw = read_raw_eyelink(out_file, apply_offsets=False) + assert raw.annotations[-1]["description"] == "test string" + onset1 = raw.annotations[-1]["onset"] + assert raw.annotations[1]["description"] == "-2 SYNCTIME" + onset2 = raw.annotations[1]["onset"] + + raw = read_raw_eyelink(out_file, apply_offsets=True) + assert raw.annotations[-1]["description"] == "test string" + assert raw.annotations[1]["description"] == "SYNCTIME" + assert_allclose(raw.annotations[-1]["onset"], onset1) + assert_allclose(raw.annotations[1]["onset"], onset2 - 2 / raw.info["sfreq"]) diff --git a/mne/preprocessing/realign.py b/mne/preprocessing/realign.py index 35dadf86411..e3710fa9d58 100644 --- a/mne/preprocessing/realign.py +++ b/mne/preprocessing/realign.py @@ -29,7 +29,7 @@ def realign_raw(raw, other, t_raw, t_other, verbose=None): t_raw : array-like, shape (n_events,) The times of shared events in ``raw`` relative to ``raw.times[0]`` (0). Typically these could be events on some TTL channel like - ``find_events(raw)[:, 0] - raw.first_event``. + ``find_events(raw)[:, 0] - raw.first_samp``. t_other : array-like, shape (n_events,) The times of shared events in ``other`` relative to ``other.times[0]``. %(verbose)s diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 7ec2dbc4534..d32e1923aa4 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1518,6 +1518,56 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): the head circle. """ +docdict[ + "eyelink_apply_offsets" +] = """ +apply_offsets : bool (default False) + Adjusts the onset time of the :class:`~mne.Annotations` created from Eyelink + experiment messages, if offset values exist in the ASCII file. If False, any + offset-like values will be prepended to the annotation description. +""" + +docdict[ + "eyelink_create_annotations" +] = """ +create_annotations : bool | list (default True) + Whether to create :class:`~mne.Annotations` from occular events + (blinks, fixations, saccades) and experiment messages. If a list, must + contain one or more of ``['fixations', 'saccades',' blinks', messages']``. + If True, creates :class:`~mne.Annotations` for both occular events and + experiment messages. +""" + +docdict[ + "eyelink_find_overlaps" +] = """ +find_overlaps : bool (default False) + Combine left and right eye :class:`mne.Annotations` (blinks, fixations, + saccades) if their start times and their stop times are both not + separated by more than overlap_threshold. +""" + +docdict[ + "eyelink_fname" +] = """ +fname : path-like + Path to the eyelink file (``.asc``).""" + +docdict[ + "eyelink_overlap_threshold" +] = """ +overlap_threshold : float (default 0.05) + Time in seconds. Threshold of allowable time-gap between both the start and + stop times of the left and right eyes. If the gap is larger than the threshold, + the :class:`mne.Annotations` will be kept separate (i.e. ``"blink_L"``, + ``"blink_R"``). If the gap is smaller than the threshold, the + :class:`mne.Annotations` will be merged and labeled as ``"blink_both"``. + Defaults to ``0.05`` seconds (50 ms), meaning that if the blink start times of + the left and right eyes are separated by less than 50 ms, and the blink stop + times of the left and right eyes are separated by less than 50 ms, then the + blink will be merged into a single :class:`mne.Annotations`. +""" + # %% # F diff --git a/tutorials/preprocessing/90_eyetracking_data.py b/tutorials/preprocessing/90_eyetracking_data.py index a82a7147d24..2e71f3d00b4 100644 --- a/tutorials/preprocessing/90_eyetracking_data.py +++ b/tutorials/preprocessing/90_eyetracking_data.py @@ -156,8 +156,8 @@ event_dict = dict(Flash=2) # %% -# Align the eye-tracking data with EEG the data -# --------------------------------------------- +# Align the eye-tracking data with EEG data +# ----------------------------------------- # # In this dataset, eye-tracking and EEG data were recorded simultaneously, but on # different systems, so we'll need to align the data before we can analyze them From 578f2a9c56f41329be375669d0c79600cc57b6ad Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Mon, 2 Oct 2023 18:11:51 +0200 Subject: [PATCH 09/37] Stack vertices in plot_volume_source_estimates (#12025) --- doc/changes/devel.rst | 1 + mne/minimum_norm/inverse.py | 10 ++++++ mne/viz/_3d.py | 29 ++++++++-------- mne/viz/tests/test_3d_mpl.py | 67 ++++++++++++++++++++++++++++++++++-- 4 files changed, 90 insertions(+), 17 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 9b5205d4178..729915a453b 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -60,6 +60,7 @@ Bugs - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) - Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) - Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) +- Fix :func:`~mne.viz.plot_volume_source_estimates` with :class:`~mne.VolSourceEstimate` which include a list of vertices (:gh:`12025` by `Mathieu Scheltienne`_) - Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) API changes diff --git a/mne/minimum_norm/inverse.py b/mne/minimum_norm/inverse.py index e505b155490..7b23d137858 100644 --- a/mne/minimum_norm/inverse.py +++ b/mne/minimum_norm/inverse.py @@ -145,6 +145,16 @@ def _repr_html_(self): ) return html + @property + def ch_names(self): + """Name of channels attached to the inverse operator.""" + return self["info"].ch_names + + @property + def info(self): + """:class:`~mne.Info` attached to the inverse operator.""" + return self["info"] + def _pick_channels_inverse_operator(ch_names, inv): """Return data channel indices to be used knowing an inverse operator. diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index 2b994f5f9bb..f4aa2b1999c 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -2650,10 +2650,9 @@ def plot_volume_source_estimates( %(subject_none)s If ``None``, ``stc.subject`` will be used. %(subjects_dir)s - mode : str - The plotting mode to use. Either 'stat_map' (default) or 'glass_brain'. - For "glass_brain", activation absolute values are displayed - after being transformed to a standard MNI brain. + mode : ``'stat_map'`` | ``'glass_brain'`` + The plotting mode to use. For ``'glass_brain'``, activation absolute values are + displayed after being transformed to a standard MNI brain. bg_img : instance of SpatialImage | str The background image used in the nilearn plotting function. Can also be a string to use the ``bg_img`` file in the subject's @@ -2714,10 +2713,11 @@ def plot_volume_source_estimates( >>> morph = mne.compute_source_morph(src_sample, subject_to='fsaverage') # doctest: +SKIP >>> fig = stc_vol_sample.plot(morph) # doctest: +SKIP """ # noqa: E501 - from matplotlib import pyplot as plt, colors import nibabel as nib - from ..source_estimate import VolSourceEstimate + from matplotlib import pyplot as plt, colors + from ..morph import SourceMorph + from ..source_estimate import VolSourceEstimate from ..source_space._source_space import _ensure_src if not check_version("nilearn", "0.4"): @@ -2745,8 +2745,9 @@ def plot_volume_source_estimates( level="debug", ) subject = _check_subject(src_subject, subject, first_kind=kind) - stc_ijk = np.array(np.unravel_index(stc.vertices[0], img.shape[:3], order="F")).T - assert stc_ijk.shape == (len(stc.vertices[0]), 3) + vertices = np.hstack(stc.vertices) + stc_ijk = np.array(np.unravel_index(vertices, img.shape[:3], order="F")).T + assert stc_ijk.shape == (vertices.size, 3) del kind # XXX this assumes zooms are uniform, should probably mult by zooms... @@ -2756,12 +2757,11 @@ def _cut_coords_to_idx(cut_coords, img): """Convert voxel coordinates to index in stc.data.""" ijk = _cut_coords_to_ijk(cut_coords, img) del cut_coords - logger.debug(" Affine remapped cut coords to [%d, %d, %d] idx" % tuple(ijk)) + logger.debug(" Affine remapped cut coords to [%d, %d, %d] idx", tuple(ijk)) dist, loc_idx = dist_to_verts.query(ijk[np.newaxis]) dist, loc_idx = dist[0], loc_idx[0] logger.debug( - " Using vertex %d at a distance of %d voxels" - % (stc.vertices[0][loc_idx], dist) + " Using vertex %d at a distance of %d voxels", (vertices[loc_idx], dist) ) return loc_idx @@ -2848,7 +2848,7 @@ def _update_timeslice(idx, params): plot_map_callback(params["img_idx"], title="", cut_coords=cut_coords) def _update_vertlabel(loc_idx): - vert_legend.get_texts()[0].set_text(f"{stc.vertices[0][loc_idx]}") + vert_legend.get_texts()[0].set_text(f"{vertices[loc_idx]}") @verbose_dec def _onclick(event, params, verbose=None): @@ -2932,7 +2932,7 @@ def _onclick(event, params, verbose=None): (stc.times[time_idx],) + tuple(cut_coords) + tuple(ijk) - + (stc.vertices[0][loc_idx],) + + (vertices[loc_idx],) ) ) del ijk @@ -3046,8 +3046,7 @@ def plot_and_correct(*args, **kwargs): plot_and_correct(stat_map_img=params["img_idx"], title="", cut_coords=cut_coords) - if show: - plt.show() + plt_show(show) fig.canvas.mpl_connect( "button_press_event", partial(_onclick, params=params, verbose=verbose) ) diff --git a/mne/viz/tests/test_3d_mpl.py b/mne/viz/tests/test_3d_mpl.py index 9fdd1d02f85..2060de1ebbe 100644 --- a/mne/viz/tests/test_3d_mpl.py +++ b/mne/viz/tests/test_3d_mpl.py @@ -13,13 +13,21 @@ import pytest from mne import ( + compute_covariance, + compute_source_morph, + make_fixed_length_epochs, + make_forward_solution, + read_bem_solution, read_forward_solution, - VolSourceEstimate, + read_trans, + setup_volume_source_space, SourceEstimate, + VolSourceEstimate, VolVectorSourceEstimate, - compute_source_morph, ) from mne.datasets import testing +from mne.io import read_raw_fif +from mne.minimum_norm import apply_inverse, make_inverse_operator from mne.utils import catch_logging, _record_warnings from mne.viz import plot_volume_source_estimates from mne.viz.utils import _fake_click, _fake_keypress @@ -148,3 +156,58 @@ def test_plot_volume_source_estimates_morph(): stc.plot( sample_src, "sample", subjects_dir, clim=dict(lims=[-1, 2, 3], kind="value") ) + + +@testing.requires_testing_data +def test_plot_volume_source_estimates_on_vol_labels(): + """Test plot of source estimate on srcs setup on 2 labels.""" + pytest.importorskip("nibabel") + pytest.importorskip("dipy") + pytest.importorskip("nilearn") + raw = read_raw_fif( + data_dir / "MEG" / "sample" / "sample_audvis_trunc_raw.fif", preload=False + ) + raw.pick("meg").crop(0, 10) + raw.pick(raw.ch_names[::2]).del_proj().load_data() + epochs = make_fixed_length_epochs(raw, preload=True).apply_baseline((None, None)) + evoked = epochs.average() + subject = "sample" + bem = read_bem_solution( + subjects_dir / f"{subject}" / "bem" / "sample-320-bem-sol.fif" + ) + pos = 25.0 # spacing in mm + volume_label = [ + "Right-Cerebral-Cortex", + "Left-Cerebral-Cortex", + ] + src = setup_volume_source_space( + subject, + subjects_dir=subjects_dir, + pos=pos, + mri=subjects_dir / subject / "mri" / "aseg.mgz", + bem=bem, + volume_label=volume_label, + add_interpolator=False, + ) + trans = read_trans(data_dir / "MEG" / "sample" / "sample_audvis_trunc-trans.fif") + fwd = make_forward_solution( + evoked.info, + trans, + src, + bem, + meg=True, + eeg=False, + mindist=0, + n_jobs=1, + ) + cov = compute_covariance( + epochs, + tmin=None, + tmax=None, + method="empirical", + ) + inverse_operator = make_inverse_operator(evoked.info, fwd, cov, loose=1, depth=0.8) + stc = apply_inverse( + evoked, inverse_operator, 1.0 / 3**2, method="sLORETA", pick_ori=None + ) + stc.plot(src, subject, subjects_dir, initial_time=0.03) From fd08b5273ed2e2c02cde409252e10b3b580f5f37 Mon Sep 17 00:00:00 2001 From: paulroujansky Date: Mon, 2 Oct 2023 20:56:00 +0200 Subject: [PATCH 10/37] Do not set annotation channel when missing from input data when reading EDF (#12044) Co-authored-by: Paul ROUJANSKY --- doc/changes/devel.rst | 2 +- mne/io/edf/edf.py | 33 ++++++++-------- mne/io/edf/tests/test_edf.py | 77 +++++++++++++++++++++++++++++++++++- 3 files changed, 93 insertions(+), 19 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 729915a453b..a639a64428b 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -56,7 +56,7 @@ Bugs - Fix bug with axis clip box boundaries in :func:`mne.viz.plot_evoked_topo` and related functions (:gh:`11999` by `Eric Larson`_) - Fix bug with ``subject_info`` when loading data from and exporting to EDF file (:gh:`11952` by `Paul Roujansky`_) - Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) -- Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` by `Paul Roujansky`_) +- Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` :gh:`12044` by `Paul Roujansky`_) - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) - Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) - Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index dd8a80d3fef..a1dce08c049 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -206,6 +206,7 @@ def __init__( ) annotations = _read_annotations_edf( tal_data[0], + ch_names=info["ch_names"], encoding=encoding, ) self.set_annotations(annotations, on_missing="warn") @@ -1892,25 +1893,21 @@ def read_raw_gdf( @fill_doc -def _read_annotations_edf(annotations, encoding="utf8"): +def _read_annotations_edf(annotations, ch_names=None, encoding="utf8"): """Annotation File Reader. Parameters ---------- annotations : ndarray (n_chans, n_samples) | str Channel data in EDF+ TAL format or path to annotation file. + ch_names : list of string + List of channels' names. %(encoding_edf)s Returns ------- - onset : array of float, shape (n_annotations,) - The starting time of annotations in seconds after ``orig_time``. - duration : array of float, shape (n_annotations,) - Durations of the annotations in seconds. - description : array of str, shape (n_annotations,) - Array of strings containing description for each annotation. If a - string, all the annotations are given the same description. To reject - epochs, use description starting with keyword 'bad'. See example above. + annot : instance of Annotations + The annotations. """ pat = "([+-]\\d+\\.?\\d*)(\x15(\\d+\\.?\\d*))?(\x14.*?)\x14\x00" if isinstance(annotations, str): @@ -1949,7 +1946,11 @@ def _read_annotations_edf(annotations, encoding="utf8"): duration = float(ev[2]) if ev[2] else 0 for description in ev[3].split("\x14")[1:]: if description: - if "@@" in description: + if ( + "@@" in description + and ch_names is not None + and description.split("@@")[1] in ch_names + ): description, ch_name = description.split("@@") key = f"{onset}_{duration}_{description}" else: @@ -1979,22 +1980,20 @@ def _read_annotations_edf(annotations, encoding="utf8"): offset = -onset if events: - onset, duration, description, ch_names = zip(*events.values()) + onset, duration, description, annot_ch_names = zip(*events.values()) else: - onset, duration, description, ch_names = list(), list(), list(), list() + onset, duration, description, annot_ch_names = list(), list(), list(), list() - assert len(onset) == len(duration) == len(description) == len(ch_names) + assert len(onset) == len(duration) == len(description) == len(annot_ch_names) - annotations = Annotations( + return Annotations( onset=onset, duration=duration, description=description, orig_time=None, - ch_names=ch_names, + ch_names=annot_ch_names, ) - return annotations - def _get_annotations_gdf(edf_info, sfreq): onset, duration, desc = list(), list(), list() diff --git a/mne/io/edf/tests/test_edf.py b/mne/io/edf/tests/test_edf.py index 9fad34c064b..e1c176c7e4c 100644 --- a/mne/io/edf/tests/test_edf.py +++ b/mne/io/edf/tests/test_edf.py @@ -24,7 +24,7 @@ import pytest from mne import pick_types, Annotations -from mne.annotations import events_from_annotations, read_annotations +from mne.annotations import _ndarray_ch_names, events_from_annotations, read_annotations from mne.datasets import testing from mne.io import read_raw_edf, read_raw_bdf, read_raw_fif, edf, read_raw_gdf from mne.io.tests.test_raw import _test_raw_reader @@ -504,6 +504,81 @@ def test_read_utf8_annotations(): assert raw.annotations[1]["description"] == "仰卧" +def test_read_annotations_edf(tmp_path): + """Test reading annotations from EDF file.""" + annot = ( + b"+1.1\x14Event A@@CH1\x14\x00\x00" + b"+1.2\x14Event A\x14\x00\x00" + b"+1.3\x14Event B@@CH1\x14\x00\x00" + b"+1.3\x14Event B@@CH2\x14\x00\x00" + b"+1.4\x14Event A@@CH3\x14\x00\x00" + b"+1.5\x14Event B\x14\x00\x00" + ) + annot_file = tmp_path / "annotations.edf" + with open(annot_file, "wb") as f: + f.write(annot) + + # Test reading annotations from channel data + with open(annot_file, "rb") as f: + tal_channel = _read_ch( + f, + subtype="EDF", + dtype=" Date: Mon, 2 Oct 2023 16:14:19 -0400 Subject: [PATCH 11/37] BUG: Fix bug with plot_projs_topomap (#11792) --- mne/viz/tests/test_topomap.py | 26 ++++++++++-------- mne/viz/topomap.py | 50 ++++++++++++++++++++++------------- 2 files changed, 46 insertions(+), 30 deletions(-) diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 86a73290e58..4f95f586d98 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -27,6 +27,7 @@ create_info, read_cov, EvokedArray, + compute_proj_raw, Projection, ) from mne._fiff.proj import make_eeg_average_ref_proj @@ -71,6 +72,8 @@ layout = read_layout("Vectorview-all") cov_fname = base_dir / "test-cov.fif" +fast_test = dict(res=8, contours=0, sensors=False) + @pytest.mark.parametrize("constrained_layout", (False, True)) def test_plot_topomap_interactive(constrained_layout): @@ -135,25 +138,18 @@ def test_plot_projs_topomap(): """Test plot_projs_topomap.""" projs = read_proj(ecg_fname) info = read_info(raw_fname) - fast_test = {"res": 8, "contours": 0, "sensors": False} plot_projs_topomap(projs, info=info, colorbar=True, **fast_test) - plt.close("all") - ax = plt.subplot(111) + _, ax = plt.subplots() projs[3].plot_topomap(info) plot_projs_topomap(projs[:1], info, axes=ax, **fast_test) # test axes - plt.close("all") triux_info = read_info(triux_fname) plot_projs_topomap(triux_info["projs"][-1:], triux_info, **fast_test) - plt.close("all") plot_projs_topomap(triux_info["projs"][:1], triux_info, **fast_test) - plt.close("all") eeg_avg = make_eeg_average_ref_proj(info) eeg_avg.plot_topomap(info, **fast_test) - plt.close("all") # test vlims for vlim in ("joint", (-1, 1), (None, 0.5), (0.5, None), (None, None)): plot_projs_topomap(projs[:-1], info, vlim=vlim, colorbar=True) - plt.close("all") eeg_proj = make_eeg_average_ref_proj(info) info_meg = pick_info(info, pick_types(info, meg=True, eeg=False)) @@ -161,6 +157,17 @@ def test_plot_projs_topomap(): plot_projs_topomap([eeg_proj], info_meg) +@pytest.mark.parametrize("vlim", ("joint", None)) +@pytest.mark.parametrize("meg", ("combined", "separate")) +def test_plot_projs_topomap_joint(meg, vlim, raw): + """Test that plot_projs_topomap works with joint vlim.""" + if vlim is None: + vlim = (None, None) + projs = compute_proj_raw(raw, meg=meg) + fig = plot_projs_topomap(projs, info=raw.info, vlim=vlim, **fast_test) + assert len(fig.axes) == 4 # 2 mag, 2 grad + + def test_plot_topomap_animation(capsys): """Test topomap plotting.""" # evoked @@ -322,7 +329,6 @@ def test_plot_topomap_basic(): """Test basics of topomap plotting.""" evoked = read_evokeds(evoked_fname, "Left Auditory", baseline=(None, 0)) res = 8 - fast_test = dict(res=res, contours=0, sensors=False, time_unit="s") fast_test_noscale = dict(res=res, contours=0, sensors=False) ev_bad = evoked.copy().pick(picks="eeg") ev_bad.pick(ev_bad.ch_names[:2]) @@ -649,8 +655,6 @@ def test_plot_arrowmap(evoked): @testing.requires_testing_data def test_plot_topomap_neuromag122(): """Test topomap plotting.""" - res = 8 - fast_test = dict(res=res, contours=0, sensors=False) evoked = read_evokeds(evoked_fname, "Left Auditory", baseline=(None, 0)) evoked.pick(picks="grad") evoked.pick(evoked.ch_names[:122]) diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index bac42416a29..0802362a27f 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -474,34 +474,46 @@ def _plot_projs_topomap( projs = _check_type_projs(projs) _validate_type(info, "info", "info") - types, datas, poss, spheres, outliness, ch_typess = [], [], [], [], [], [] + # Preprocess projs to deal with joint MEG projectors. If we duplicate these and + # split into mag and grad, they should work as expected + info_names = _clean_names(info["ch_names"], remove_whitespace=True) + use_projs = list() + for proj in projs: + proj = _eliminate_zeros(proj) # gh 5641, makes a copy + proj["data"]["col_names"] = _clean_names( + proj["data"]["col_names"], + remove_whitespace=True, + ) + picks = pick_channels(info_names, proj["data"]["col_names"], ordered=True) + proj_types = info.get_channel_types(picks) + unique_types = sorted(set(proj_types)) + for type_ in unique_types: + proj_picks = np.where([proj_type == type_ for proj_type in proj_types])[0] + use_projs.append(copy.deepcopy(proj)) + use_projs[-1]["data"]["data"] = proj["data"]["data"][:, proj_picks] + use_projs[-1]["data"]["col_names"] = [ + proj["data"]["col_names"][pick] for pick in proj_picks + ] + projs = use_projs + + datas, poss, spheres, outliness, ch_typess = [], [], [], [], [] for proj in projs: # get ch_names, ch_types, data - proj = _eliminate_zeros(proj) # gh 5641 - ch_names = _clean_names(proj["data"]["col_names"], remove_whitespace=True) - if vlim == "joint": - ch_idxs = np.where(np.isin(info["ch_names"], proj["data"]["col_names"]))[0] - these_ch_types = info.get_channel_types(ch_idxs, unique=True) - # each projector should have only one channel type - assert len(these_ch_types) == 1 - types.append(list(these_ch_types)[0]) data = proj["data"]["data"].ravel() - info_names = _clean_names(info["ch_names"], remove_whitespace=True) - picks = pick_channels(info_names, ch_names, ordered=True) + picks = pick_channels(info_names, proj["data"]["col_names"], ordered=True) use_info = pick_info(info, picks) + these_ch_types = use_info.get_channel_types(unique=True) + assert len(these_ch_types) == 1 # should be guaranteed above + ch_type = these_ch_types[0] ( data_picks, pos, merge_channels, names, - ch_type, + _, this_sphere, clip_origin, - ) = _prepare_topomap_plot( - use_info, - _get_plot_ch_type(use_info, None), - sphere=sphere, - ) + ) = _prepare_topomap_plot(use_info, ch_type, sphere=sphere) these_outlines = _make_head_outlines(sphere, pos, outlines, clip_origin) data = data[data_picks] if merge_channels: @@ -530,8 +542,8 @@ def _plot_projs_topomap( # handle vmin/vmax vlims = [None for _ in range(len(datas))] if vlim == "joint": - for _ch_type in set(types): - idx = np.where(np.isin(types, _ch_type))[0] + for _ch_type in set(ch_typess): + idx = np.where(np.isin(ch_typess, _ch_type))[0] these_data = np.concatenate(np.array(datas, dtype=object)[idx]) norm = all(these_data >= 0) _vl = _setup_vmin_vmax(these_data, vmin=None, vmax=None, norm=norm) From 40da6d9123debed6eaf8879c96c604b80c2f9f6e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 3 Oct 2023 03:08:50 -0400 Subject: [PATCH 12/37] BUG: Fix bug with camera resetting (#12052) --- mne/viz/evoked_field.py | 2 +- tutorials/visualization/20_ui_events.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/mne/viz/evoked_field.py b/mne/viz/evoked_field.py index 5d50bb42f73..9e314a917ed 100644 --- a/mne/viz/evoked_field.py +++ b/mne/viz/evoked_field.py @@ -195,7 +195,6 @@ def __init__( ) self._in_brain_figure = False - self._renderer.set_interaction(interaction) self.plotter = self._renderer.plotter self.interaction = interaction @@ -243,6 +242,7 @@ def current_time_func(): subscribe(self, "contours", self._on_contours) if not self._in_brain_figure: + self._renderer.set_interaction(interaction) self._renderer.set_camera(azimuth=10, elevation=60, distance="auto") self._renderer.show() diff --git a/tutorials/visualization/20_ui_events.py b/tutorials/visualization/20_ui_events.py index 290f38952ce..1c799a60956 100644 --- a/tutorials/visualization/20_ui_events.py +++ b/tutorials/visualization/20_ui_events.py @@ -76,7 +76,6 @@ link(fig_brain, fig_field) fig_brain.set_time(0.1) # updates both source estimate and field lines - ######################################################################################## # Hooking a custom plot into the event system # =========================================== From c6863e5470a4ea312f0d7762d1ba4e9139d63785 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Tue, 3 Oct 2023 04:09:33 -0500 Subject: [PATCH 13/37] Doc folder reorg (#12043) --- .../images => _static}/flow_diagram.svg | 0 doc/{ => api}/connectivity.rst | 0 doc/{ => api}/covariance.rst | 0 doc/{ => api}/creating_from_arrays.rst | 0 doc/{ => api}/datasets.rst | 0 doc/{ => api}/decoding.rst | 0 doc/{ => api}/events.rst | 0 doc/{ => api}/export.rst | 0 doc/{ => api}/file_io.rst | 0 doc/{ => api}/forward.rst | 0 doc/{ => api}/inverse.rst | 0 doc/{ => api}/logging.rst | 0 doc/{ => api}/most_used_classes.rst | 0 doc/{ => api}/mri.rst | 0 doc/{ => api}/preprocessing.rst | 0 doc/{ => api}/python_reference.rst | 0 doc/{ => api}/reading_raw_data.rst | 0 doc/{ => api}/realtime.rst | 0 doc/{ => api}/report.rst | 0 doc/{ => api}/sensor_space.rst | 0 doc/{ => api}/simulation.rst | 0 doc/{ => api}/source_space.rst | 0 doc/{ => api}/statistics.rst | 0 doc/{ => api}/time_frequency.rst | 0 doc/{ => api}/visualization.rst | 0 doc/conf.py | 86 +- doc/{install => development}/contributing.rst | 0 doc/{overview => development}/governance.rst | 0 .../development.rst => development/index.rst} | 4 +- doc/{overview => development}/roadmap.rst | 0 doc/development/whats_new.rst | 42 + doc/{overview => documentation}/cite.rst | 0 doc/{ => documentation}/cited.rst | 0 doc/{overview => documentation}/cookbook.rst | 2 +- .../datasets.rst} | 0 .../design_philosophy.rst | 0 doc/{ => documentation}/glossary.rst | 0 .../implementation.rst | 0 doc/{overview => documentation}/index.rst | 16 +- doc/{overview => help}/faq.rst | 0 doc/{overview/get_help.rst => help/index.rst} | 0 doc/{overview => help}/learn_python.rst | 0 doc/{overview => help}/migrating.rst | 0 doc/index.rst | 8 +- doc/install/advanced.rst | 41 - doc/install/manual_install_python.rst | 154 +-- doc/install/mne_tools_suite.rst | 7 +- doc/links.inc | 3 +- doc/overview/images/Digitizer-example.png | Bin 7356 -> 0 bytes doc/overview/matlab.rst | 1218 ----------------- doc/whats_new.rst | 42 - tutorials/io/70_reading_eyetracking_data.py | 3 +- 52 files changed, 140 insertions(+), 1486 deletions(-) rename doc/{overview/images => _static}/flow_diagram.svg (100%) rename doc/{ => api}/connectivity.rst (100%) rename doc/{ => api}/covariance.rst (100%) rename doc/{ => api}/creating_from_arrays.rst (100%) rename doc/{ => api}/datasets.rst (100%) rename doc/{ => api}/decoding.rst (100%) rename doc/{ => api}/events.rst (100%) rename doc/{ => api}/export.rst (100%) rename doc/{ => api}/file_io.rst (100%) rename doc/{ => api}/forward.rst (100%) rename doc/{ => api}/inverse.rst (100%) rename doc/{ => api}/logging.rst (100%) rename doc/{ => api}/most_used_classes.rst (100%) rename doc/{ => api}/mri.rst (100%) rename doc/{ => api}/preprocessing.rst (100%) rename doc/{ => api}/python_reference.rst (100%) rename doc/{ => api}/reading_raw_data.rst (100%) rename doc/{ => api}/realtime.rst (100%) rename doc/{ => api}/report.rst (100%) rename doc/{ => api}/sensor_space.rst (100%) rename doc/{ => api}/simulation.rst (100%) rename doc/{ => api}/source_space.rst (100%) rename doc/{ => api}/statistics.rst (100%) rename doc/{ => api}/time_frequency.rst (100%) rename doc/{ => api}/visualization.rst (100%) rename doc/{install => development}/contributing.rst (100%) rename doc/{overview => development}/governance.rst (100%) rename doc/{overview/development.rst => development/index.rst} (97%) rename doc/{overview => development}/roadmap.rst (100%) create mode 100644 doc/development/whats_new.rst rename doc/{overview => documentation}/cite.rst (100%) rename doc/{ => documentation}/cited.rst (100%) rename doc/{overview => documentation}/cookbook.rst (99%) rename doc/{overview/datasets_index.rst => documentation/datasets.rst} (100%) rename doc/{overview => documentation}/design_philosophy.rst (100%) rename doc/{ => documentation}/glossary.rst (100%) rename doc/{overview => documentation}/implementation.rst (100%) rename doc/{overview => documentation}/index.rst (86%) rename doc/{overview => help}/faq.rst (100%) rename doc/{overview/get_help.rst => help/index.rst} (100%) rename doc/{overview => help}/learn_python.rst (100%) rename doc/{overview => help}/migrating.rst (100%) delete mode 100644 doc/overview/images/Digitizer-example.png delete mode 100644 doc/overview/matlab.rst delete mode 100644 doc/whats_new.rst diff --git a/doc/overview/images/flow_diagram.svg b/doc/_static/flow_diagram.svg similarity index 100% rename from doc/overview/images/flow_diagram.svg rename to doc/_static/flow_diagram.svg diff --git a/doc/connectivity.rst b/doc/api/connectivity.rst similarity index 100% rename from doc/connectivity.rst rename to doc/api/connectivity.rst diff --git a/doc/covariance.rst b/doc/api/covariance.rst similarity index 100% rename from doc/covariance.rst rename to doc/api/covariance.rst diff --git a/doc/creating_from_arrays.rst b/doc/api/creating_from_arrays.rst similarity index 100% rename from doc/creating_from_arrays.rst rename to doc/api/creating_from_arrays.rst diff --git a/doc/datasets.rst b/doc/api/datasets.rst similarity index 100% rename from doc/datasets.rst rename to doc/api/datasets.rst diff --git a/doc/decoding.rst b/doc/api/decoding.rst similarity index 100% rename from doc/decoding.rst rename to doc/api/decoding.rst diff --git a/doc/events.rst b/doc/api/events.rst similarity index 100% rename from doc/events.rst rename to doc/api/events.rst diff --git a/doc/export.rst b/doc/api/export.rst similarity index 100% rename from doc/export.rst rename to doc/api/export.rst diff --git a/doc/file_io.rst b/doc/api/file_io.rst similarity index 100% rename from doc/file_io.rst rename to doc/api/file_io.rst diff --git a/doc/forward.rst b/doc/api/forward.rst similarity index 100% rename from doc/forward.rst rename to doc/api/forward.rst diff --git a/doc/inverse.rst b/doc/api/inverse.rst similarity index 100% rename from doc/inverse.rst rename to doc/api/inverse.rst diff --git a/doc/logging.rst b/doc/api/logging.rst similarity index 100% rename from doc/logging.rst rename to doc/api/logging.rst diff --git a/doc/most_used_classes.rst b/doc/api/most_used_classes.rst similarity index 100% rename from doc/most_used_classes.rst rename to doc/api/most_used_classes.rst diff --git a/doc/mri.rst b/doc/api/mri.rst similarity index 100% rename from doc/mri.rst rename to doc/api/mri.rst diff --git a/doc/preprocessing.rst b/doc/api/preprocessing.rst similarity index 100% rename from doc/preprocessing.rst rename to doc/api/preprocessing.rst diff --git a/doc/python_reference.rst b/doc/api/python_reference.rst similarity index 100% rename from doc/python_reference.rst rename to doc/api/python_reference.rst diff --git a/doc/reading_raw_data.rst b/doc/api/reading_raw_data.rst similarity index 100% rename from doc/reading_raw_data.rst rename to doc/api/reading_raw_data.rst diff --git a/doc/realtime.rst b/doc/api/realtime.rst similarity index 100% rename from doc/realtime.rst rename to doc/api/realtime.rst diff --git a/doc/report.rst b/doc/api/report.rst similarity index 100% rename from doc/report.rst rename to doc/api/report.rst diff --git a/doc/sensor_space.rst b/doc/api/sensor_space.rst similarity index 100% rename from doc/sensor_space.rst rename to doc/api/sensor_space.rst diff --git a/doc/simulation.rst b/doc/api/simulation.rst similarity index 100% rename from doc/simulation.rst rename to doc/api/simulation.rst diff --git a/doc/source_space.rst b/doc/api/source_space.rst similarity index 100% rename from doc/source_space.rst rename to doc/api/source_space.rst diff --git a/doc/statistics.rst b/doc/api/statistics.rst similarity index 100% rename from doc/statistics.rst rename to doc/api/statistics.rst diff --git a/doc/time_frequency.rst b/doc/api/time_frequency.rst similarity index 100% rename from doc/time_frequency.rst rename to doc/api/time_frequency.rst diff --git a/doc/visualization.rst b/doc/api/visualization.rst similarity index 100% rename from doc/visualization.rst rename to doc/api/visualization.rst diff --git a/doc/conf.py b/doc/conf.py index 9c35173af44..68e63396bd4 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1591,6 +1591,32 @@ def reset_warnings(gallery_conf, fname): "xdawn_denoising.py", "xhemi.py", } +api_redirects = { + "connectivity", + "covariance", + "creating_from_arrays", + "datasets", + "decoding", + "events", + "export", + "file_io", + "forward", + "inverse", + "logging", + "most_used_classes", + "mri", + "preprocessing", + "python_reference", + "reading_raw_data", + "realtime", + "report", + "sensor_space", + "simulation", + "source_space", + "statistics", + "time_frequency", + "visualization", +} ex = "auto_examples" co = "connectivity" mne_conn = "https://mne.tools/mne-connectivity/stable" @@ -1609,36 +1635,39 @@ def reset_warnings(gallery_conf, fname): custom_redirects = { # Custom redirects (one HTML path to another, relative to outdir) # can be added here as fr->to key->value mappings + "install/contributing.html": "development/contributing.html", + "overview/roadmap.html": "development/roadmap.html", + "whats_new.html": "development/whats_new.html", f"{tu}/evoked/plot_eeg_erp.html": f"{tu}/evoked/30_eeg_erp.html", f"{tu}/evoked/plot_whitened.html": f"{tu}/evoked/40_whitened.html", - f"{tu}/misc/plot_modifying_data_inplace.html": f"{tu}/intro/15_inplace.html", # noqa E501 + f"{tu}/misc/plot_modifying_data_inplace.html": f"{tu}/intro/15_inplace.html", f"{tu}/misc/plot_report.html": f"{tu}/intro/70_report.html", f"{tu}/misc/plot_seeg.html": f"{tu}/clinical/20_seeg.html", f"{tu}/misc/plot_ecog.html": f"{tu}/clinical/30_ecog.html", f"{tu}/{ml}/plot_receptive_field.html": f"{tu}/{ml}/30_strf.html", f"{tu}/{ml}/plot_sensors_decoding.html": f"{tu}/{ml}/50_decoding.html", f"{tu}/{sm}/plot_background_freesurfer.html": f"{tu}/{fw}/10_background_freesurfer.html", # noqa E501 - f"{tu}/{sm}/plot_source_alignment.html": f"{tu}/{fw}/20_source_alignment.html", # noqa E501 + f"{tu}/{sm}/plot_source_alignment.html": f"{tu}/{fw}/20_source_alignment.html", f"{tu}/{sm}/plot_forward.html": f"{tu}/{fw}/30_forward.html", f"{tu}/{sm}/plot_eeg_no_mri.html": f"{tu}/{fw}/35_eeg_no_mri.html", f"{tu}/{sm}/plot_background_freesurfer_mne.html": f"{tu}/{fw}/50_background_freesurfer_mne.html", # noqa E501 - f"{tu}/{sm}/plot_fix_bem_in_blender.html": f"{tu}/{fw}/80_fix_bem_in_blender.html", # noqa E501 - f"{tu}/{sm}/plot_compute_covariance.html": f"{tu}/{fw}/90_compute_covariance.html", # noqa E501 - f"{tu}/{sm}/plot_object_source_estimate.html": f"{tu}/{nv}/10_stc_class.html", # noqa E501 + f"{tu}/{sm}/plot_fix_bem_in_blender.html": f"{tu}/{fw}/80_fix_bem_in_blender.html", + f"{tu}/{sm}/plot_compute_covariance.html": f"{tu}/{fw}/90_compute_covariance.html", + f"{tu}/{sm}/plot_object_source_estimate.html": f"{tu}/{nv}/10_stc_class.html", f"{tu}/{sm}/plot_dipole_fit.html": f"{tu}/{nv}/20_dipole_fit.html", f"{tu}/{sm}/plot_mne_dspm_source_localization.html": f"{tu}/{nv}/30_mne_dspm_loreta.html", # noqa E501 f"{tu}/{sm}/plot_dipole_orientations.html": f"{tu}/{nv}/35_dipole_orientations.html", # noqa E501 f"{tu}/{sm}/plot_mne_solutions.html": f"{tu}/{nv}/40_mne_fixed_free.html", - f"{tu}/{sm}/plot_beamformer_lcmv.html": f"{tu}/{nv}/50_beamformer_lcmv.html", # noqa E501 + f"{tu}/{sm}/plot_beamformer_lcmv.html": f"{tu}/{nv}/50_beamformer_lcmv.html", f"{tu}/{sm}/plot_visualize_stc.html": f"{tu}/{nv}/60_visualize_stc.html", f"{tu}/{sm}/plot_eeg_mri_coords.html": f"{tu}/{nv}/70_eeg_mri_coords.html", f"{tu}/{sd}/plot_brainstorm_phantom_elekta.html": f"{tu}/{nv}/80_brainstorm_phantom_elekta.html", # noqa E501 f"{tu}/{sd}/plot_brainstorm_phantom_ctf.html": f"{tu}/{nv}/85_brainstorm_phantom_ctf.html", # noqa E501 f"{tu}/{sd}/plot_phantom_4DBTi.html": f"{tu}/{nv}/90_phantom_4DBTi.html", - f"{tu}/{sd}/plot_brainstorm_auditory.html": f"{tu}/io/60_ctf_bst_auditory.html", # noqa E501 + f"{tu}/{sd}/plot_brainstorm_auditory.html": f"{tu}/io/60_ctf_bst_auditory.html", f"{tu}/{sd}/plot_sleep.html": f"{tu}/clinical/60_sleep.html", f"{tu}/{di}/plot_background_filtering.html": f"{tu}/preprocessing/25_background_filtering.html", # noqa E501 - f"{tu}/{di}/plot_background_statistics.html": f"{tu}/{sn}/10_background_stats.html", # noqa E501 + f"{tu}/{di}/plot_background_statistics.html": f"{tu}/{sn}/10_background_stats.html", f"{tu}/{sn}/plot_stats_cluster_erp.html": f"{tu}/{sn}/20_erp_stats.html", f"{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html": f"{tu}/{sn}/40_cluster_1samp_time_freq.html", # noqa E501 f"{tu}/{sn}/plot_stats_cluster_time_frequency.html": f"{tu}/{sn}/50_cluster_between_time_freq.html", # noqa E501 @@ -1649,10 +1678,10 @@ def reset_warnings(gallery_conf, fname): f"{tu}/{sr}/plot_stats_cluster_time_frequency_repeated_measures_anova.html": f"{tu}/{sn}/70_cluster_rmANOVA_time_freq.html", # noqa E501 f"{tu}/{tf}/plot_sensors_time_frequency.html": f"{tu}/{tf}/20_sensors_time_frequency.html", # noqa E501 f"{tu}/{tf}/plot_ssvep.html": f"{tu}/{tf}/50_ssvep.html", - f"{tu}/{si}/plot_creating_data_structures.html": f"{tu}/{si}/10_array_objs.html", # noqa E501 + f"{tu}/{si}/plot_creating_data_structures.html": f"{tu}/{si}/10_array_objs.html", f"{tu}/{si}/plot_point_spread.html": f"{tu}/{si}/70_point_spread.html", f"{tu}/{si}/plot_dics.html": f"{tu}/{si}/80_dics.html", - f"{tu}/{tf}/plot_eyetracking.html": f"{tu}/preprocessing/90_eyetracking_data.html", # noqa E501 + f"{tu}/{tf}/plot_eyetracking.html": f"{tu}/preprocessing/90_eyetracking_data.html", f"{ex}/{co}/mne_inverse_label_connectivity.html": f"{mne_conn}/{ex}/mne_inverse_label_connectivity.html", # noqa E501 f"{ex}/{co}/cwt_sensor_connectivity.html": f"{mne_conn}/{ex}/cwt_sensor_connectivity.html", # noqa E501 f"{ex}/{co}/mixed_source_space_connectivity.html": f"{mne_conn}/{ex}/mixed_source_space_connectivity.html", # noqa E501 @@ -1661,11 +1690,21 @@ def reset_warnings(gallery_conf, fname): f"{ex}/{co}/mne_inverse_envelope_correlation_volume.html": f"{mne_conn}/{ex}/mne_inverse_envelope_correlation_volume.html", # noqa E501 f"{ex}/{co}/mne_inverse_envelope_correlation.html": f"{mne_conn}/{ex}/mne_inverse_envelope_correlation.html", # noqa E501 f"{ex}/{co}/mne_inverse_psi_visual.html": f"{mne_conn}/{ex}/mne_inverse_psi_visual.html", # noqa E501 - f"{ex}/{co}/sensor_connectivity.html": f"{mne_conn}/{ex}/sensor_connectivity.html", # noqa E501 - f"{ex}/{vi}/publication_figure.html": f"{tu}/{vi}/10_publication_figure.html", # noqa E501 + f"{ex}/{co}/sensor_connectivity.html": f"{mne_conn}/{ex}/sensor_connectivity.html", + f"{ex}/{vi}/publication_figure.html": f"{tu}/{vi}/10_publication_figure.html", } +def check_existing_redirect(path): + """Make sure existing HTML files are redirects, before overwriting.""" + if os.path.isfile(path): + with open(path, "r") as fid: + for _ in range(8): + next(fid) + line = fid.readline() + assert "Page Redirection" in line, line + + def make_redirects(app, exception): """Make HTML redirects.""" # https://www.sphinx-doc.org/en/master/extdev/appapi.html @@ -1675,7 +1714,6 @@ def make_redirects(app, exception): and exception is None ): return - logger = sphinx.util.logging.getLogger("mne") TEMPLATE = """\ @@ -1715,25 +1753,30 @@ def make_redirects(app, exception): sphinx_logger.info( f"Added {len(fnames):3d} HTML plot_* redirects for {out_dir}" ) + # API redirects + for page in api_redirects: + fname = f"{page}.html" + fr_path = os.path.join(app.outdir, fname) + to_path = os.path.join(app.outdir, "api", fname) + # allow overwrite if existing file is just a redirect + check_existing_redirect(fr_path) + with open(fr_path, "w") as fid: + fid.write(TEMPLATE.format(to=to_path)) + sphinx_logger.info(f"Added {len(api_redirects):3d} HTML API redirects") # custom redirects for fr, to in custom_redirects.items(): if not to.startswith("http"): assert os.path.isfile(os.path.join(app.outdir, to)), to # handle links to sibling folders path_parts = to.split("/") - assert tu in path_parts, path_parts # need to refactor otherwise - path_parts = [".."] + path_parts[(path_parts.index(tu) + 1) :] + if tu in path_parts: + path_parts = [".."] + path_parts[(path_parts.index(tu) + 1) :] to = os.path.join(*path_parts) assert to.endswith("html"), to fr_path = os.path.join(app.outdir, fr) assert fr_path.endswith("html"), fr_path # allow overwrite if existing file is just a redirect - if os.path.isfile(fr_path): - with open(fr_path, "r") as fid: - for _ in range(8): - next(fid) - line = fid.readline() - assert "Page Redirection" in line, line + check_existing_redirect(fr_path) # handle folders that no longer exist if fr_path.split("/")[-2] in ( "misc", @@ -1755,7 +1798,6 @@ def make_version(app, exception): and exception is None ): return - logger = sphinx.util.logging.getLogger("mne") try: stdout, _ = run_subprocess(["git", "rev-parse", "HEAD"], verbose=False) except Exception as exc: diff --git a/doc/install/contributing.rst b/doc/development/contributing.rst similarity index 100% rename from doc/install/contributing.rst rename to doc/development/contributing.rst diff --git a/doc/overview/governance.rst b/doc/development/governance.rst similarity index 100% rename from doc/overview/governance.rst rename to doc/development/governance.rst diff --git a/doc/overview/development.rst b/doc/development/index.rst similarity index 97% rename from doc/overview/development.rst rename to doc/development/index.rst index 7840b245a7a..1bdc5322f36 100644 --- a/doc/overview/development.rst +++ b/doc/development/index.rst @@ -29,7 +29,7 @@ experience. .. toctree:: :hidden: - ../install/contributing - ../whats_new + contributing + whats_new roadmap governance diff --git a/doc/overview/roadmap.rst b/doc/development/roadmap.rst similarity index 100% rename from doc/overview/roadmap.rst rename to doc/development/roadmap.rst diff --git a/doc/development/whats_new.rst b/doc/development/whats_new.rst new file mode 100644 index 00000000000..f9cb6e1c4b0 --- /dev/null +++ b/doc/development/whats_new.rst @@ -0,0 +1,42 @@ +.. _whats_new: + +What's new +========== + +Changes for each version of MNE-Python are listed below. + +.. toctree:: + :maxdepth: 1 + + ../changes/devel.rst + ../changes/v1.5.rst + ../changes/v1.4.rst + ../changes/v1.3.rst + ../changes/v1.2.rst + ../changes/v1.1.rst + ../changes/v1.0.rst + ../changes/v0.24.rst + ../changes/v0.23.rst + ../changes/v0.22.rst + ../changes/v0.21.rst + ../changes/v0.20.rst + ../changes/v0.19.rst + ../changes/v0.18.rst + ../changes/v0.17.rst + ../changes/v0.16.rst + ../changes/v0.15.rst + ../changes/v0.14.rst + ../changes/v0.13.rst + ../changes/v0.12.rst + ../changes/v0.11.rst + ../changes/v0.10.rst + ../changes/v0.9.rst + ../changes/v0.8.rst + ../changes/v0.7.rst + ../changes/v0.6.rst + ../changes/v0.5.rst + ../changes/v0.4.rst + ../changes/v0.3.rst + ../changes/v0.2.rst + ../changes/v0.1.rst + ../old_versions/index.rst diff --git a/doc/overview/cite.rst b/doc/documentation/cite.rst similarity index 100% rename from doc/overview/cite.rst rename to doc/documentation/cite.rst diff --git a/doc/cited.rst b/doc/documentation/cited.rst similarity index 100% rename from doc/cited.rst rename to doc/documentation/cited.rst diff --git a/doc/overview/cookbook.rst b/doc/documentation/cookbook.rst similarity index 99% rename from doc/overview/cookbook.rst rename to doc/documentation/cookbook.rst index 5aab1045e2e..6a735ccd703 100644 --- a/doc/overview/cookbook.rst +++ b/doc/documentation/cookbook.rst @@ -13,7 +13,7 @@ References below refer to Python functions and objects. .. _flow_diagram: -.. figure:: images/flow_diagram.svg +.. figure:: ../_static/flow_diagram.svg :alt: MNE Workflow Flowchart :align: center diff --git a/doc/overview/datasets_index.rst b/doc/documentation/datasets.rst similarity index 100% rename from doc/overview/datasets_index.rst rename to doc/documentation/datasets.rst diff --git a/doc/overview/design_philosophy.rst b/doc/documentation/design_philosophy.rst similarity index 100% rename from doc/overview/design_philosophy.rst rename to doc/documentation/design_philosophy.rst diff --git a/doc/glossary.rst b/doc/documentation/glossary.rst similarity index 100% rename from doc/glossary.rst rename to doc/documentation/glossary.rst diff --git a/doc/overview/implementation.rst b/doc/documentation/implementation.rst similarity index 100% rename from doc/overview/implementation.rst rename to doc/documentation/implementation.rst diff --git a/doc/overview/index.rst b/doc/documentation/index.rst similarity index 86% rename from doc/overview/index.rst rename to doc/documentation/index.rst index 0f20136e990..6830edff012 100644 --- a/doc/overview/index.rst +++ b/doc/documentation/index.rst @@ -7,7 +7,7 @@ Documentation overview If you haven't already installed MNE-Python, please take a look at our :ref:`installation guides`. Please also kindly find some - resources for :doc:`learn_python` if you need to. + resources for :doc:`../help/learn_python` if you need to. The documentation for MNE-Python is divided into four main sections: @@ -24,12 +24,12 @@ The documentation for MNE-Python is divided into four main sections: how a particular technique you've read about can be applied using MNE-Python. -3. The :doc:`../glossary` provides short definitions of MNE-Python-specific +3. The :doc:`glossary` provides short definitions of MNE-Python-specific vocabulary and general neuroimaging concepts. The glossary is often a good place to look if you don't understand a term or acronym used somewhere else in the documentation. -4. The :doc:`API reference <../python_reference>` provides documentation for +4. The :doc:`API reference <../api/python_reference>` provides documentation for the classes, functions and methods in the MNE-Python codebase. This is the same information that is rendered when running :samp:`help(mne.{})` in an interactive Python session, or @@ -42,7 +42,7 @@ categories above) are shown in the navigation menu, including the :ref:`implementation details`, and more. Documentation for the related C and MATLAB tools are available here: -- :ref:`MNE-MATLAB ` (HTML) +- `MNE-MATLAB`_ (repository) - `MNE-C `_ (PDF) .. toctree:: @@ -50,12 +50,12 @@ Documentation for the related C and MATLAB tools are available here: Tutorials<../auto_tutorials/index> Examples<../auto_examples/index> - ../glossary + glossary Implementation details design_philosophy - Example datasets + Example datasets Command-line tools<../generated/commands> - migrating + ../help/migrating cookbook cite - ../cited + cited diff --git a/doc/overview/faq.rst b/doc/help/faq.rst similarity index 100% rename from doc/overview/faq.rst rename to doc/help/faq.rst diff --git a/doc/overview/get_help.rst b/doc/help/index.rst similarity index 100% rename from doc/overview/get_help.rst rename to doc/help/index.rst diff --git a/doc/overview/learn_python.rst b/doc/help/learn_python.rst similarity index 100% rename from doc/overview/learn_python.rst rename to doc/help/learn_python.rst diff --git a/doc/overview/migrating.rst b/doc/help/migrating.rst similarity index 100% rename from doc/overview/migrating.rst rename to doc/help/migrating.rst diff --git a/doc/index.rst b/doc/index.rst index 0d17b9bf395..d97ce27d9bc 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -35,7 +35,7 @@ MNE-Python Homepage :hidden: Install - Documentation - API Reference - Get help - Development + Documentation + API Reference + Get help + Development diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index 065b9c1f9e7..d22d9f7770f 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -102,47 +102,6 @@ just prefer to use git rather than pip to make frequent updates, there are instructions for installing from a ``git clone`` in the :ref:`contributing`. -.. _other-py-distros: - -Other Python distributions -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -While the `Anaconda`_ Python distribution provides many conveniences, other -distributions of Python should also work with MNE-Python. In particular, -`Miniconda`_ is a lightweight alternative to Anaconda that is fully compatible; -like Anaconda, Miniconda includes the ``conda`` command line tool for -installing new packages and managing environments; unlike Anaconda, Miniconda -starts off with a minimal set of around 30 packages instead of Anaconda's -hundreds. See the `installation instructions for Miniconda`_ for more info. -A similar alternative is `MiniForge`_, which uses the ``conda-forge`` channel -as the default source for package installation (saving you the trouble of -typing ``--channel=conda-forge`` with each ``conda install`` command). - -.. warning:: - - If you have the ``PYTHONPATH`` or ``PYTHONHOME`` environment variables set, - you may run into difficulty using Anaconda. See the - `Anaconda troubleshooting guide`_ for more information. Note that it is - easy to switch between ``conda``-managed Python installations and the - system Python installation using the ``conda activate`` and ``conda - deactivate`` commands, so you may find that after adopting Anaconda it is - possible (indeed, preferable) to leave ``PYTHONPATH`` and ``PYTHONHOME`` - permanently unset. - - -It is also possible to use a system-level installation of Python (version -|min_python_version| or higher) and use ``pip`` to install MNE-Python and its -dependencies, using the provided `requirements file`_: - -.. code-block:: console - - $ curl --remote-name https://raw.githubusercontent.com/mne-tools/mne-python/main/requirements.txt - $ pip install --user -r requirements.txt - -Other configurations will probably also work, but we may be unable to offer -support if you encounter difficulties related to your particular Python -installation choices. - .. _CUDA: GPU acceleration with CUDA diff --git a/doc/install/manual_install_python.rst b/doc/install/manual_install_python.rst index 182ee258a01..1cb071363bc 100644 --- a/doc/install/manual_install_python.rst +++ b/doc/install/manual_install_python.rst @@ -3,151 +3,23 @@ .. _install-python: Installing Python -^^^^^^^^^^^^^^^^^ +================= MNE-Python requires Python and several Python packages. MNE-Python -version |version| requires Python version |min_python_version| or higher. We -recommend the `Anaconda`_ distribution of Python, which comes with more than -250 scientific packages pre-bundled and includes the ``conda`` command line -tool for installing new packages and managing different package sets -("environments") for different projects. +version |version| requires Python version |min_python_version| or higher. -To get started, follow the `installation instructions for Anaconda`_. -When you are done, if you type the following commands in a command shell, -you should see outputs similar to the following (assuming you installed -conda to ``/home/user/anaconda3``): +We recommend using a conda-based Python installation, such as `Anaconda`_, `Miniconda`_, +`MiniForge`_, or `Mambaforge`_. For new users we recommend our pre-built :ref:`installers`, +which use conda environments under the hood. -.. tab-set:: - :class: platform-selector-tabset +.. _other-py-distros: - .. tab-item:: Linux - :name: linux-manual-install +Other Python distributions +^^^^^^^^^^^^^^^^^^^^^^^^^^ - .. code-block:: console - - $ conda --version && python --version - conda 4.9.2 - Python 3.8.13 :: Anaconda, Inc. - $ which python - /home/user/anaconda3/bin/python - $ which pip - /home/user/anaconda3/bin/pip - - - .. tab-item:: macOS - :name: macos-manual-install - - .. code-block:: console - - $ conda --version && python --version - conda 4.9.2 - Python 3.8.13 - $ which python - /Users/user/opt/anaconda3/bin/python - $ which pip - /Users/user/opt/anaconda3/bin/pip - - - .. tab-item:: Windows - :name: windows-manual-install - - Most of our instructions start with ``$``, which indicates - that the commands are designed to be run from a ``bash`` command shell. - - Windows command prompts do not expose the same command-line tools as - ``bash`` shells, so commands like ``which`` will not work. You can test - your installation in Windows ``cmd.exe`` shells with ``where`` instead: - - .. code-block:: doscon - - > where python - C:\Users\user\anaconda3\python.exe - > where pip - C:\Users\user\anaconda3\Scripts\pip.exe - - -.. raw:: html - -
- - -.. dropdown:: If you get an error... - :color: danger - :icon: alert-fill - - .. rubric:: If you see something like: - - :: - - conda: command not found - - It means that your ``PATH`` variable (what the system uses to find - programs) is not set properly. In a correct installation, doing:: - - $ echo $PATH - ...:/home/user/anaconda3/bin:... - - Will show the Anaconda binary path (above) somewhere in the output - (probably at or near the beginning), but the ``command not found`` error - suggests that it is missing. - - On Linux or macOS, the installer should have put something - like the following in your ``~/.bashrc`` or ``~/.bash_profile`` (or your - ``.zprofile`` if you're using macOS Catalina or later, where the default - shell is ``zsh``): - - .. code-block:: console - - # >>> conda initialize >>> - # !! Contents within this block are managed by 'conda init' !! - __conda_setup= ... - ... - # <<< conda initialize <<< - - If this is missing, it is possible that you are not on the same shell that - was used during the installation. You can verify which shell you are on by - using the command:: - - $ echo $SHELL - - If you do not find this line in the configuration file for the shell you - are using (bash, zsh, tcsh, etc.), try running:: - - conda init - - in your command shell. If your shell is not ``cmd.exe`` (Windows) or - ``bash`` (Linux, macOS) you will need to pass the name of the shell to the - ``conda init`` command. See ``conda init --help`` for more info and - supported shells. - - You can also consult the Anaconda documentation and search for - Anaconda install tips (`Stack Overflow`_ results are often helpful) - to fix these or other problems when ``conda`` does not work. - - -.. raw:: html - - +While conda-based CPython distributions provide many conveniences, other types of +installation (``pip`` / ``poetry``, ``venv`` / system-level) and/or other Python +distributions (PyPy) *should* also work with MNE-Python. Generally speaking, if you can +install SciPy, getting MNE-Python to work should be unproblematic. Note however that we +do not offer installation support for anything other than conda-based installations. diff --git a/doc/install/mne_tools_suite.rst b/doc/install/mne_tools_suite.rst index 1f16621837b..579e3c77c08 100644 --- a/doc/install/mne_tools_suite.rst +++ b/doc/install/mne_tools_suite.rst @@ -21,11 +21,10 @@ Related software types like functional near-infrared spectroscopy (fNIRS). MNE-Python is collaboratively developed and has more than 200 contributors. -- :ref:`MNE MATLAB ` provides a MATLAB interface to the .fif file - format and other MNE data structures, and provides example MATLAB +- `MNE-MATLAB`_ provides a MATLAB interface to the .fif + file format and other MNE data structures, and provides example MATLAB implementations of some of the core analysis functionality of MNE-C. It is - distributed alongside MNE-C, and can also be downloaded from the `MNE-MATLAB - git repository`_. + distributed alongside MNE-C, and can also be downloaded from the `MNE-MATLAB`_ GitHub repository. - :ref:`MNE-CPP ` provides core MNE functionality implemented in C++ and is primarily intended for embedded and real-time applications. diff --git a/doc/links.inc b/doc/links.inc index f60bd54c852..388144d3ddf 100644 --- a/doc/links.inc +++ b/doc/links.inc @@ -19,7 +19,7 @@ .. _`MNE-BIDS-Pipeline`: https://mne.tools/mne-bids-pipeline .. _`MNE-HCP`: http://mne.tools/mne-hcp .. _`MNE-Realtime`: https://mne.tools/mne-realtime -.. _`MNE-MATLAB git repository`: https://github.com/mne-tools/mne-matlab +.. _`MNE-MATLAB`: https://github.com/mne-tools/mne-matlab .. _`MNE-Docker`: https://github.com/mne-tools/mne-docker .. _`MNE-ICAlabel`: https://github.com/mne-tools/mne-icalabel .. _`MNE-Connectivity`: https://github.com/mne-tools/mne-connectivity @@ -104,6 +104,7 @@ .. _anaconda: https://www.anaconda.com/products/individual .. _miniconda: https://conda.io/en/latest/miniconda.html .. _miniforge: https://github.com/conda-forge/miniforge +.. _mambaforge: https://mamba.readthedocs.io/en/latest/mamba-installation.html#mamba-install .. _installation instructions for Anaconda: http://docs.continuum.io/anaconda/install .. _installation instructions for Miniconda: https://conda.io/projects/conda/en/latest/user-guide/install/index.html .. _Anaconda troubleshooting guide: http://conda.pydata.org/docs/troubleshooting.html diff --git a/doc/overview/images/Digitizer-example.png b/doc/overview/images/Digitizer-example.png deleted file mode 100644 index 35ca1aa50c7aa2ff653911fdd46e6926825609ad..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 7356 zcmc&&cT`hbm%pGQ3W&f{FjS>W4bml`pme25Z-R6}kR}ii5CKt|fFMYD2%!ie(h^$0 zCjkVcm(Wo{4@F7{5X!*&zBlWec{B6JtXXT;UFWX7e)sHq_Bs1^es|v!qyJEY;UefF z000bHn)eI=fXe2iU7@8pL5iy4ssVtu%2iEG-_^kZ00PF|;o(ZgOWfyw^?>=mCceC< zetjsBcCz`^L;?7Enznkja#wEGplO0K;N4hC3das;wk%=a(;|!23gY=*AY$ zTeEHLdKa@t6QXDT`L+U1l&mwSeOGA-_9Pe0^9eVS5fn<0n&%^;X;=d2QC)rU7?)| zzVqh79iGH32P&jT*e)Bp2os<<3V7@qrc?I{pSNRY2DS3on%kYP8KJKF!y13J0BE~K z<#Sl(&g%gXgnsN!TjOoK?gBjYC9?+cmjFO@oO96T1*+c*4^qlZJ?oBES zv0r)bb9i)&=k8xN4MQCY^pt|6sd;`cD@NY7`@zHOz+!aEC#uCur-VtCS0m18Y%H#v zoA_f8Us5HKXab$A|vNb#o*b+ zK8H5FOrfK){Mje2=7INHiq}Q-aul|j6P$YveUH9I=13fQFX5-fZEL-&m&ZuU>~ij& zlZ{)HlNY)Xmx;$NHKE6UG;4$^d%Ah<5!(kApb|EI4UayeqEh46!onlyeQMW)m`NloTxB^hYT>+N?XQc_Y))>x;mF+X!6-WfJh^Y)@${mrmU zz4Q832iEQF{S_6PuZ^ZnO|nh0jvJN_nyefDoB;da-uh$-vg9}~x16$(Q(fH@d^nF} zUd|Zv#ZsMNppT1v5cNKTLB`pPp54>c(Gd#y>NNY!AW!|)YHRFiCQlNXUiz{&!=x*Iak`$791kFuUVF?i}Rr8?A{4N<6tZ*6Xo zS5>@z+1>q_`>|OgUjK1QYLdCWfv?Hep-=8Za&F%^jD?kgw`*zU+5c%!yuZA_ zvX{JUu^iVY!u_8=6-}(l$N@>I-+&`{JKo~<9?@Q=zeRdolDK$IXc!i zJCECd+A>>=<$H&+kwAVh8z;od+rA`6%#Yiw>6ADKYb;Si=*FQBe z(0BMO^~8luB6u$IW9`ruGGOA{Z+F#Fo>!$6J^6#B{DD<=KKS{2-J*ch#fIJfq`NwM z?JdH0JL!zxiM%j%b)vo1gVeI7@N~LI9JL}oaCy#q6i+nz6z-IzOiK7%osFDhUuBfn zfAmGX!QwYB=@ql)3kFZ&?G=W|iq$K;B8Pf%N1qo(4_79)JXx2;#j(=6c%!stlu*fL z$++iQgUq4-IK&@WvG(vVh7!ST1y9)p+;^WbT5#s zjJfI~$)zTQ_qa>Hit}Ah2OG2J40EBoPS1{Czb$;|x0iE!WZ<}tap@S|eLCx-dlA2z zo?}xc?)sh!{`j(CBSkrsuxWje4VOZRy^x)Q_z$lMa{zXGGn)dZ@Fb zB>O<7)?#;uo@wSS zz;yPqYFV8Ml|AiS;F{7O5%hhRznWC5t%tnWxGeRm@w4h#Dn6!<(&ipdjb~)F`f)$l zC^slh6tZ2B3B`X+=$_R=^0!Ie;%QUMPK(>?c7DoPeYN}7pS(K4!$)phRQi@kdwtgI zmCAKBkC#G7qH^Lf1h`|(I5Kw@D6|3v4aKO|m{9?uG16KBuj36QC-M!;6aPrUX|w#M zdM)`UXB);rieqIp3;4l(ziuacVuk&*u%WTn(YOa3pI*OX3zJWf3tpP$J^(<+eA=k` zC!Zgjg8p82o&TzTfJzgiYXHDZ(7JcmIACmbvhyjSjyde{-mnKY3;6B)ne=KFudDI* z!zC}B`@t8?DxRII5FpJp8E|O(j+n_L%U^h%)2&u4w ze<{-|AXF2MdmxT@_qMw7!%Tc_qr@;fBeco4HvggUQ|-|v)KU4q6{aLG)^ItoQbH?T zXC@H3!2dLHd(7MZedWZ>^jdqi*QT(jh8q8~!$Z4Pg3;?ZI_$!}jp;SjaS3bj;^bK) z73br!eVq>YuNKrixK6yjb=E3sBE;JEcBl-tRYfLS7%F@<`u&7|Vx^McXf-Grrf_iU z$&BAQksXWf%@psNhoAZn(^3_x)~Xwl_VKZ4RrLIhx|6B1)x8*W(ryYaH=k1z4SmX$ zr7~hTwLl5;uC+0xQZi9$-`VDTwI*OOFybsD8Hfqu~I-G}%KxfDpHdl4~%P*+@KzA``2 zo$-TND$@TpZB`y=!QJ&MbkwKIF(UvKu~?>8yCtuuC5akWXc7ns6{^LdZR@Nmdiqz8;gkn_)-E$mE}tfC(O_c1pv=xu#yIAN zm{A;Bs&0vS9S!Yf8TR{OPi#%4pv~PW!hU`6k*LO8$6IYP`$5{)4(q7e5D{U=n^2Kk zc15`#M?o4fpz=jXD#9-otKvw(wteEeRKCxt1s_L;GVhYkh&ft67$5~WOKr9rvzISg z61kR1<9)Td_=^0}l1Q9o%skcYP-e`LV&GL!bElYdX&I#DqaO^DW|o2zsPfc4EbBCi zYO%n1F5%cgmdT>{spCMbTFFoPB{LT-Yc)u6wms80thB5;$@7hwrg+$}kVeVAxw+!T zl;@o3a01?&J@N^K_d?6Z9qWro;* z;D)PPNfs?P9Bp%jncY36Z*rCS`Gg+mOD%Xxka5~$J}!sp&qvvASE4M z$&>kuknGeL?<$4BLgL2opidsUfb-3i{iJ1}lhNjbUgvy;K&=Mqq`m{#%Xy68+|v`a=g%>O^v{3yM)*~vb~9MHug!pbJ`a1a=h`GW^$%s! z)!pmbE%-hwJ8*vJa}1{NjAUoSM}*HAvJ~bssb+>$osw;{c!BdN3VZ~Ed zu?I;ACgqDFIyGKL<}0;DBDbbD>h7%^Vq5ZL+pPv~S&DB+ok!Y;s%t29XtlYU%B|=} zF#7bOW9}2p%-hr~iHDZ7UDLPWhv^9?QZ4r1Ex6xpb3LCPi!P{+Xlu;N^hva}&SLXu{;PzR8Gn%vp|uzVH=hM2WgSR-}n{R9f6wO`c93c7$S&nPQ~`C1T`wWLb;mm9Tk(kM;+; z`_1Ani)e|u9f;QXiMk2!Yc&{#2662_TdS*PNDDxUi@#LnTREUf0M4#fN0{jj2j=YD*djbBN`MNNwbu|r(S9)MlS;tEgx*z)ck!foFXIrF=@4T zk8fNJWuk-NcE+~{RHHn<_(M87F>AD)G3Uy^#YA3R+S)yNRq+yw1hDP2bP|>?XwG*ip?D5g-}S#W9)=WTO!|7A*}ZH3KR zqnrE={0QcfrM3vv2Z4(D7`|O;1`&t(;yA%&Z_rk4DXe%4K8rN{{;tWX>TC#iG!v79 z%9uA~V6qK<^xEryv@*e~2MtCBZDA9KbJ)_7cf?~VzkiM~^_F<*zK|Qm^?jmb5#fnV zaoABLnHBuz6hjU|eBRld@RK3JrDM%Dkr2M}q(K`A@WbSBAO7yg0x@#_(RY_Ox7_4MAK_m=HZs+a)qWTosJEV{kjz22f zmnN`{^K6)G?(GFqPO#Ojtx7+Gg8Q)~CI|Kh0_|E^Dl&7fi0G?w(F0~DSP0H+`Zvhb zR)mK;%=-bbfZta;>fjy3`2k)D$ta~S3dg?0K;r#^AJIuNwWDPW^9d-_L*DZ{@9w@ju>u3 zW(9hKAHso5i8N$RWjr(g{ z-b*Pwi!Yt1>@Z+c zO*rkhQxVj_&CQMMoSdLg;D4deh7}my^7oQ; zN;_Ty@$1`fkW(t5@6I4+10G&U(Ct(ElY}>8o{h_{Kfon@cw@9Pzyd1yle5IUHcKYk zp6x#wX}iNHWWN*A1RXEfuf$C~-waq2wqi%2Sbw_DZYxfB+Xvg=t1q?jpr#tH$7B+c z>el;;`KQJoqy{@QaWjfFd0JMi1!j%KP+VS>(Od(`St81Z5?Kgaa4W;4jf?gU4Y?z; z9BC;bJs+3!o8O)#ha5{>$YvqD73~QvFL#(mmyP>~NmQ#wxQfZNDhO`g6l~HfUCYVI z(P=BBA>r7yQy2IG4lFAm?Na!lVv6>aeZ@D5CTlSk!Z914AS1?~)EJm^Z3;zP$+Bhe zc{kri$3~Jpu}qv_cH4hz@KdO0YT3`6NA|F!fDi}l!q0G``vN&T&DHj^$Wy2bp6+g$ zIzK1nD~4A#7d*Zdq`C0Ar1ktrheJabyg^t?TH2#8;tELDy^#?;zB>K#r#4S63;bQO za)C4r4&1#mlOf@1BT=xIs`V1>czt0ZHP*b~ZR_shm7L>vJd;VkfL5KZ3HP&dl6TGVfqFZX2>D_!p=(b|G zvLT~=qAD7(q_+VgF^8g8=3bR6+j~UdEH9XL{i#ADG=D27%yvQ=658_oMnAwqLZH?! zmrl7cVq=m(J{8T{5Ae`X*gw&lxYNIM3s!2t3niwFWkZsh$YqWj&G*b2Cwt9-En!p;_^(!tjx3-jL~R9EBc~g zYJKA;WUyhW)iaTDwze?`w;5#^>>Xjeiwz}_i)NGh_HN(@UtA{FW>1|h5=1AK%hsmI zidgt^6I%ZPM|5ksCZ?=Xf@aVQpo9(2)6w}at$35?j0j2O5iky8Zs$E~LNRZu9(Aio@&UvbtCUH`+j>j;|x?mZ}ex1_Oh=b&^;-ueds3e zpgxmr-aj;mI|h?9(!*;I|4$u$=S4F4lfoxy{?DgBj?-y!?w zj-&t;Wo3)B`dTuX{1-YN@Q*SAu;Y*Yi=_X~*8jm^(MxL1mj`&r3q9QHQX5u(3OEF? zW9wNMs9!nOQ8h(RWpW#xoPqKC-Z%4g@OFIc>nQf**9H6i#u1k`VSqvVb6TTyd1s7e0;rKy#SdYTc-Sz4uIDEhxf`< H?OyyF|AST{ diff --git a/doc/overview/matlab.rst b/doc/overview/matlab.rst deleted file mode 100644 index 73dee9b0930..00000000000 --- a/doc/overview/matlab.rst +++ /dev/null @@ -1,1218 +0,0 @@ -:orphan: - -.. _mne_matlab: - -======================== -MNE-MATLAB documentation -======================== - -.. note:: The MNE MATLAB Toolbox is compatible with Matlab versions 7.0 or later. - -Overview -######## - -The MNE software contains a collection Matlab ``.m``-files to -facilitate interfacing with binary file formats of the MNE software. -The toolbox is located at ``$MNE_ROOT/share/matlab`` . The -names of the MNE Matlab toolbox functions begin either with ``mne_`` or -with ``fiff_`` . When you source the ``mne_setup`` script -as described in :ref:`user_environment`, one of the following actions -takes place: - -- If you do not have the Matlab startup.m - file, it will be created and lines allowing access to the MNE Matlab - toolbox are added. - -- If you have startup.m and it does not have the standard MNE - Matlab toolbox setup lines, you will be instructed to add them manually. - -- If you have startup.m and the standard MNE Matlab toolbox - setup lines are there, nothing happens. - -A summary of the available routines is provided in the `MNE-C manual`_. The -toolbox also contains a set of examples which may be useful starting points -for your own development. The names of these functions start with ``mne_ex``. - -.. note:: - - The MATLAB function ``fiff_setup_read_raw`` has a significant change. The - sample numbers now take into account possible initial skip in the file, - *i.e.*, the time between the start of the data acquisition and the start of - saving the data to disk. The ``first_samp`` member of the returned structure - indicates the initial skip in samples. If you want your own routines, which - assume that initial skip has been removed, perform identically with the - previous version, subtract ``first_samp`` from the sample numbers you - specify to ``fiff_read_raw_segment``. Furthermore, ``fiff_setup_read_raw`` - has an optional argument to allow reading of unprocessed MaxShield data - acquired with the Elekta MEG systems. - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBCGHAG: -.. table:: High-level reading routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_find_evoked | Find all evoked data sets from a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_bad_channels | Read the bad channel list. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_ctf_comp | Read CTF software gradient compensation data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_evoked | Read evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_evoked_all | Read all evoked-response data from a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_meas_info | Read measurement information. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_mri | Read an MRI description file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_proj | Read signal-space projection data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_raw_segment | Read a segment of raw data with time limits are specified | - | | in samples. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_raw_segment_times | Read a segment of raw data with time limits specified | - | | in seconds. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_setup_read_raw | Set up data structures before using fiff_read_raw_segment | - | | or fiff_read_raw_segment_times. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Channel selection utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_pick_channels | Create a selector to pick desired channels from data | - | | according to include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_channels_evoked | Pick desired channels from evoked-response data according | - | | to include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_info | Modify measurement info to include only selected channels. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_types | Create a selector to pick desired channels from data | - | | according to channel types (MEG, EEG, STIM) in combination | - | | with include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_pick_types_evoked | Pick desired channels from evoked-response data according | - | | to channel types (MEG, EEG, STIM) in combination with | - | | include and exclude lists. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Coordinate transformation utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_invert_transform | Invert a coordinate transformation structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_reset_ch_pos | Reset channel position transformation to the default values | - | | present in the file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_transform_eeg_chs | Transform electrode positions to another coordinate frame. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_transform_meg_chs | Apply a coordinate transformation to the sensor location | - | | data to bring the integration points to another coordinate | - | | frame. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Basic reading routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_define_constants | Define a structure which contains the constant relevant | - | | to fif files. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_dir_tree_find | Find nodes of a given type in a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_list_dir_tree | List a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_make_dir_tree | Create a directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_open | Open a fif file and create the directory tree structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_named_matrix | Read a named matrix from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_tag | Read one tag from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_read_tag_info | Read the info of one tag from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_split_name_list | Split a colon-separated list of names into a cell array | - | | of strings. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Writing routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_end_block | Write a FIFF_END_BLOCK tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_end_file | Write the standard closing. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_block | Write a FIFF_START_BLOCK tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_file | Write the appropriate beginning of a file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_ch_info | Write a channel information structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_coord_trans | Write a coordinate transformation structure. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_ctf_comp | Write CTF compensation data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_dig_point | Write one digitizer data point. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_complex | Write single-precision complex numbers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_complex_matrix | Write a single-precision complex matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double | Write double-precision floats. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double_complex | Write double-precision complex numbers. | - +--------------------------------+--------------------------------------------------------------+ - |fiff_write_double_complex_matrix| Write a double-precision complex matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_double_matrix | Write a double-precision matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_evoked | Write an evoked-reponse data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_float | Write single-precision floats. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_float_matrix | Write a single-precision matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_id | Write an id tag. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_int | Write 32-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_int_matrix | Write a matrix of 32-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_name_list | Write a name list. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_named_matrix | Write a named matrix. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_proj | Write SSP data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_short | Write 16-bit integers. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_string | Write a string. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level data writing routines. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | fiff_write_evoked | Write an evoked-response data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_finish_writing_raw | Write the closing tags to a raw data file. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_start_writing_raw | Start writing raw data file, *i.e.*, write the measurement | - | | information. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_dig_file | Write a fif file containing digitization data. | - +--------------------------------+--------------------------------------------------------------+ - | fiff_write_raw_buffer | Write one raw data buffer. This is used after a call to | - | | fiff_start_writing_raw. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Coil definition utilities. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_add_coil_defs | Add coil definitions to an array of channel information | - | | structures. | - +--------------------------------+--------------------------------------------------------------+ - | mne_load_coil_def | Load a coil definition file. | - +--------------------------------+--------------------------------------------------------------+ - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Routines for software gradient compensation and signal-space projection. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_compensate_to | Apply or remove CTF software gradient compensation from | - | | evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_get_current_comp | Get the state of software gradient compensation from | - | | measurement info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_compensator | Make a compensation matrix which switches the status of | - | | CTF software gradient compensation from one state to another.| - +--------------------------------+--------------------------------------------------------------+ - | mne_make_projector_info | Create a signal-space projection operator with the | - | | projection item definitions and cell arrays of channel names | - | | and bad channel names as input. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_projector_info | Like mne_make_projector but uses the measurement info | - | | structure as input. | - +--------------------------------+--------------------------------------------------------------+ - | mne_set_current_comp | Change the information about the compensation status in | - | | measurement info. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level routines for reading MNE data files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_pick_channels_cov | Pick desired channels from a sensor covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_pick_channels_forward | Pick desired channels (rows) from a forward solution. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_bem_surfaces | Read triangular tessellations of surfaces for | - | | boundary-element models. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_cov | Read a covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_epoch | Read an epoch of data from the output file of mne_epochs2mat.| - +--------------------------------+--------------------------------------------------------------+ - | mne_read_events | Read an event list from a fif file produced by | - | | mne_browse_raw or mne_process_raw. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_forward_solution | Read a forward solution from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_inverse_operator | Read an inverse operator from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_morph_map | Read an morphing map produced with mne_make_morph_maps. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_noise_cov | Read a noise-covariance matrix from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_source_spaces | Read source space information from a fif file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: High-level routines for writing MNE data files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_write_cov | Write a covariance matrix to an open file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_cov_file | Write a complete file containing just a covariance matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_events | Write a fif format event file compatible with mne_browse_raw | - | | and mne_process_raw. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_inverse_sol_stc | Write stc files containing an inverse solution or other | - | | dynamic data on the cortical surface. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_inverse_sol_w | Write w files containing an inverse solution or other static | - | | data on the cortical surface. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BABBDDAI: -.. table:: Routines related to stc, w, and label files. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_read_stc_file | Read data from one stc file. The vertex numbering in the | - | | returned structure will start from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_stc_file1 | Read data from one stc file. The vertex numbering in the | - | | returned structure will start from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_w_file | Read data from one w file. The vertex numbering in the | - | | returned structure will start from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_w_file1 | Read data from one w file. The vertex numbering in the | - | | returned structure will start from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_stc_file | Write a new stc file. It is assumed the the vertex numbering | - | | in the input data structure containing the stc information | - | | starts from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_stc_file1 | Write a new stc file. It is assumed the the vertex numbering | - | | in the input data structure containing the stc information | - | | starts from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_w_file | Write a new w file. It is assumed the the vertex numbering | - | | in the input data structure containing the w file | - | | information starts from 0. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_w_file1 | Write a new w file. It is assumed the the vertex numbering | - | | in the input data structure containing the w file | - | | information starts from 1. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_label_file | Read a label file (ROI). | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_label_file | Write a label file (ROI). | - +--------------------------------+--------------------------------------------------------------+ - | mne_label_time_courses | Extract time courses corresponding to a label from an | - | | stc file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. table:: Routines for reading FreeSurfer surfaces. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_read_curvature | Read a curvature file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_surface | Read one surface, return the vertex locations and | - | | triangulation info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_read_surfaces | Read surfaces corresponding to one or both hemispheres. | - | | Optionally read curvature information and add derived | - | | surface data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_reduce_surface | Reduce the number of triangles on a surface using the | - | | reducepatch Matlab function. | - +--------------------------------+--------------------------------------------------------------+ - | mne_write_surface | Write a FreeSurfer surface file. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBEGFBD: -.. table:: Utility functions. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_block_diag | Create a sparse block-diagonal matrix out of a vector. | - +--------------------------------+--------------------------------------------------------------+ - | mne_combine_xyz | Calculate the square sum of the three Cartesian components | - | | of several vectors listed in one row or column vector. | - +--------------------------------+--------------------------------------------------------------+ - | mne_file_name | Compose a file name relative to $MNE_ROOT. | - +--------------------------------+--------------------------------------------------------------+ - | mne_find_channel | Find a channel by name from measurement info. | - +--------------------------------+--------------------------------------------------------------+ - | mne_find_source_space_hemi | Determine whether a given source space belongs to the left | - | | or right hemisphere. | - +--------------------------------+--------------------------------------------------------------+ - | mne_fread3 | Read a three-byte integer. | - +--------------------------------+--------------------------------------------------------------+ - | mne_fwrite3 | Write a three-byte integer. | - +--------------------------------+--------------------------------------------------------------+ - | mne_make_combined_event_file | Combine data from several trigger channels into one event | - | | file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_omit_first_line | Omit first line from a multi-line message. This routine is | - | | useful for formatting error messages. | - +--------------------------------+--------------------------------------------------------------+ - | mne_prepare_inverse_operator | Prepare inverse operator data for calculating L2 | - | | minimum-norm solutions and dSPM. | - +--------------------------------+--------------------------------------------------------------+ - | mne_setup_toolbox | Set up the MNE Matlab toolbox. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transform_coordinates | Transform locations between different coordinate systems. | - | | This function uses the output file from | - | | ``mne_collect_transforms``. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transpose_named_matrix | Create a transpose of a named matrix. | - +--------------------------------+--------------------------------------------------------------+ - | mne_transform_source_space_to | Transform source space data to another coordinate frame. | - +--------------------------------+--------------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.3\linewidth}|p{0.6\linewidth}| -.. _BGBEFADJ: -.. table:: Examples demonstrating the use of the toolbox. - - +--------------------------------+--------------------------------------------------------------+ - | Function | Purpose | - +================================+==============================================================+ - | mne_ex_average_epochs | Example of averaging epoch data produced by mne_epochs2mat. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_cancel_noise | Example of noise cancellation procedures. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_compute_inverse | Example of computing a L2 minimum-norm estimate or a dSPM | - | | solution. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_data_sets | Example of listing evoked-response data sets. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_evoked_grad_amp | Compute tangential gradient amplitudes from planar | - | | gradiometer data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_epochs | Read epoch data from a raw data file. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_evoked | Example of reading evoked-response data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_raw | Example of reading raw data. | - +--------------------------------+--------------------------------------------------------------+ - | mne_ex_read_write_raw | Example of processing raw data (read and write). | - +--------------------------------+--------------------------------------------------------------+ - -.. note:: In order for the inverse operator calculation to work correctly with data processed with the Elekta-Neuromag Maxfilter (TM) software, the so-called *processing history* block must be included in data files. Previous versions of the MNE Matlab functions did not copy processing history to files saved. As of March 30, 2009, the Matlab toolbox routines fiff_start_writing_raw and fiff_write_evoked have been enhanced to include these data to the output file as appropriate. If you have older raw data files created in Matlab from input which has been processed Maxfilter, it is necessary to copy the *processing history* block from the original to modified raw data file using the ``mne_copy_processing_history`` utility. The raw data processing programs mne_browse_raw and mne_process_raw have handled copying of the processing history since revision 2.5 of the MNE software. - -Some data structures -#################### - -The MNE Matlab toolbox relies heavily on structures to organize -the data. This section gives detailed information about fields in -the essential data structures employed in the MNE Matlab toolbox. -In the structure definitions, data types referring to other MNE -Matlab toolbox structures are shown in italics. In addition, :ref:`matlab_fif_constants` -lists the values of various FIFF constants defined by fiff_define_constants.m . -The documented structures are: - -**tag** - - Contains one tag from the fif file, see :ref:`BGBGIIGD`. - -**taginfo** - - Contains the information about one tag, see :ref:`BGBBJBJJ`. - -**directory** - - Contains the tag directory as a tree structure, see :ref:`BGBEDHBG`. - -**id** - - A fif ID, see :ref:`BGBDAHHJ`. - -**named matrix** - - Contains a matrix with names for rows and/or columns, see :ref:`BGBBEDID`. - A named matrix is used to store, *e.g.*, SSP vectors and forward solutions. - -**trans** - - A 4 x 4 coordinate-transformation matrix operating on augmented column - vectors. Indication of the coordinate frames to which this transformation - relates is included, see :ref:`BGBDHBIF`. - -**dig** - - A Polhemus digitizer data point, see :ref:`BGBHDEDG`. - -**coildef** - - The coil definition structure useful for forward calculations and array - visualization, see :ref:`BGBGBEBH`. For more detailed information on - coil definitions, see :ref:`coil_geometry_information`. - -**ch** - - Channel information structure, see :ref:`BGBIABGD`. - -**proj** - - Signal-space projection data, see :ref:`BGBCJHJB`. - -**comp** - - Software gradiometer compensation data, see :ref:`BGBJDIFD`. - -**measurement info** - - Translation of the FIFFB_MEAS_INFO entity, see :ref:`BGBFHDIJ` and - :class:`mne.Info`. This data structure is returned by fiff_read_meas_info, - will not be as complete as :class:`mne.Info`. - -**surf** - - Used to represent triangulated surfaces and cortical source spaces, see :ref:`BGBEFJCB`. - -**cov** - - Used for storing covariance matrices, see :ref:`BGBJJIED`. - -**fwd** - - Forward solution data returned by mne_read_forward_solution , - see :ref:`BGBFJIBJ`. - -**inv** - - Inverse operator decomposition data returned by mne_read_inverse_operator. - For more information on inverse operator - decomposition, see :ref:`minimum_norm_estimates`. For an example on how to - compute inverse solution using this data, see the sample routine mne_ex_compute_inverse . - -.. note:: The MNE Matlab toolbox tries it best to employ vertex numbering starting from 1 as opposed to 0 as recorded in the data files. There are, however, two exceptions where explicit attention to the vertex numbering convention is needed. First, the standard stc and w file reading and writing routines return and assume zero-based vertex numbering. There are now versions with names ending with '1', which return and assume one-based vertex numbering, see :ref:`BABBDDAI`. Second, the logno field of the channel information in the data files produced by mne_compute_raw_inverse is the zero-based number of the vertex whose source space signal is contained on this channel. - - -.. tabularcolumns:: |p{0.38\linewidth}|p{0.06\linewidth}|p{0.46\linewidth}| -.. _matlab_fif_constants: -.. table:: FIFF constants. - - +-------------------------------+-------+----------------------------------------------------------+ - | Name | Value | Purpose | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MEG_CH | 1 | This is a MEG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_REF_MEG_CH | 301 | This a reference MEG channel, located far away from the | - | | | head. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EEF_CH | 2 | This is an EEG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MCG_CH | 201 | This a MCG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_STIM_CH | 3 | This is a digital trigger channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EOG_CH | 202 | This is an EOG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_EMG_CH | 302 | This is an EMG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ECG_CH | 402 | This is an ECG channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MISC_CH | 502 | This is a miscellaneous analog channel. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_RESP_CH | 602 | This channel contains respiration monitor output. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_UNKNOWN | 0 | Unknown coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_DEVICE | 1 | The MEG device coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_ISOTRAK | 2 | The Polhemus digitizer coordinate frame (does not appear | - | | | in data files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_HPI | 3 | HPI coil coordinate frame (does not appear in data | - | | | files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_HEAD | 4 | The MEG head coordinate frame (Neuromag convention). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI | 5 | The MRI coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI_SLICE | 6 | The coordinate frame of a single MRI slice. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_MRI_DISPLAY | 7 | The preferred coordinate frame for displaying the MRIs | - | | | (used by MRIlab). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_DICOM_DEVICE | 8 | The DICOM coordinate frame (does not appear in files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_COORD_IMAGING_DEVICE | 9 | A generic imaging device coordinate frame (does not | - | | | appear in files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_TUFTS_EEG | 300 | The Tufts EEG data coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_CTF_DEVICE | 1001 | The CTF device coordinate frame (does not appear in | - | | | files). | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_COORD_CTF_HEAD | 1004 | The CTF/4D head coordinate frame. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_AVERAGE | 100 | Data aspect: average. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_STD_ERR | 101 | Data aspect: standard error of mean. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SINGLE | 102 | Single epoch. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SUBAVERAGE | 103 | One subaverage. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_ALTAVERAGE | 104 | One alternating (plus-minus) subaverage. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_SAMPLE | 105 | A sample cut from raw data. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_POWER_DENSITY | 106 | Power density spectrum. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_ASPECT_DIPOLE_WAVE | 200 | The time course of an equivalent current dipole. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_UNKNOWN | -1 | Unknown BEM surface. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_BRAIN | 1 | The inner skull surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_SKULL | 3 | The outer skull surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_BEM_SURF_ID_HEAD | 4 | The scalp surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SURF_LEFT_HEMI | 101 | Left hemisphere cortical surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SURF_RIGHT_HEMI | 102 | Right hemisphere cortical surface | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_CARDINAL | 1 | Digitization point which is a cardinal landmark a.k.a. | - | | | fiducial point | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_HPI | 2 | Digitized HPI coil location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_EEG | 3 | Digitized EEG electrode location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_ECG | 3 | Digitized ECG electrode location | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_EXTRA | 4 | Additional head surface point | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_LPA | 1 | Identifier for left auricular landmark | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_NASION | 2 | Identifier for nasion | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_POINT_RPA | 3 | Identifier for right auricular landmark | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FIXED_ORI | 1 | Fixed orientation constraint used in the computation of | - | | | a forward solution. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FREE_ORI | 2 | No orientation constraint used in the computation of | - | | | a forward solution | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_MEG | 1 | Indicates an inverse operator based on MEG only | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_EEG | 2 | Indicates an inverse operator based on EEG only. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_MEG_EEG | 3 | Indicates an inverse operator based on both MEG and EEG. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_UNKNOWN_COV | 0 | An unknown covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_NOISE_COV | 1 | Indicates a noise covariance matrix. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SENSOR_COV | 1 | Synonym for FIFFV_MNE_NOISE_COV | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SOURCE_COV | 2 | Indicates a source covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_FMRI_PRIOR_COV | 3 | Indicates a covariance matrix associated with fMRI priors| - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_SIGNAL_COV | 4 | Indicates the data (signal + noise) covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_DEPTH_PRIOR_COV | 5 | Indicates the depth prior (depth weighting) covariance | - | | | matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_MNE_ORIENT_PRIOR_COV | 6 | Indicates the orientation (loose orientation constrain) | - | | | prior covariance matrix | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_NONE | 0 | The nature of this projection item is unknown | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_FIELD | 1 | This is projection item is a generic field pattern or | - | | | field patterns. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_DIP_FIX | 2 | This projection item is the field of one dipole | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_DIP_ROT | 3 | This projection item corresponds to the fields of three | - | | | or two orthogonal dipoles at some location. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_HOMOG_GRAD | 4 | This projection item contains the homogeneous gradient | - | | | fields as seen by the sensor array. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_HOMOG_FIELD | 5 | This projection item contains the three homogeneous field| - | | | components as seen by the sensor array. | - +-------------------------------+-------+----------------------------------------------------------+ - | FIFFV_PROJ_ITEM_EEG_AVREF | 10 | This projection item corresponds to the average EEG | - | | | reference. | - +-------------------------------+-------+----------------------------------------------------------+ - -.. _BGBGIIGD: - -.. table:: The tag structure. - - ======= =========== ============================================ - Field Data type Description - ======= =========== ============================================ - kind int32 The kind of the data item. - type uint32 The data type used to represent the data. - size int32 Size of the data in bytes. - next int32 Byte offset of the next tag in the file. - data various The data itself. - ======= =========== ============================================ - -.. _BGBBJBJJ: - -.. table:: The taginfo structure. - - ======= =========== ============================================ - Field Data type Description - ======= =========== ============================================ - kind double The kind of the data item. - type double The data type used to represent the data. - size double Size of the data in bytes. - pos double Byte offset to this tag in the file. - ======= =========== ============================================ - -.. _BGBEDHBG: - -.. table:: The directory structure. - - ============ ============ ================================================================ - Field Data type Description - ============ ============ ================================================================ - block double The block id of this directory node. - id id The unique identifier of this node. - parent_id id The unique identifier of the node this node was derived from. - nent double Number of entries in this node. - nchild double Number of children to this node. - dir taginfo Information about tags in this node. - children directory The children of this node. - ============ ============ ================================================================ - -.. _BGBDAHHJ: - -.. table:: The id structure. - - ========== =========== ============================================================ - Field Data type Description - ========== =========== ============================================================ - version int32 The fif file version (major < < 16 | minor). - machid int32(2) Unique identifier of the computer this id was created on. - secs int32 Time since January 1, 1970 (seconds). - usecs int32 Time since January 1, 1970 (microseconds past secs ). - ========== =========== ============================================================ - -.. _BGBBEDID: - -.. table:: The named matrix structure. - - ============ =========== ====================================================================== - Field Data type Description - ============ =========== ====================================================================== - nrow int32 Number of rows. - ncol int32 Number of columns. - row_names cell(*) The names of associated with the rows. This member may be empty. - col_names cell(*) The names of associated with the columns. This member may be empty. - data various The matrix data, usually of type single or double. - ============ =========== ====================================================================== - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBDHBIF: -.. table:: The trans structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | from | int32 | The source coordinate frame, see | - | | | :ref:`matlab_fif_constants`. Look | - | | | for entries starting with FIFFV_COORD or FIFFV_MNE_COORD.| - +---------------------------+-----------+----------------------------------------------------------+ - | to | int32 | The destination coordinate frame. | - +---------------------------+-----------+----------------------------------------------------------+ - | trans |double(4,4)| The 4-by-4 coordinate transformation matrix. This | - | | | operates from augmented position column vectors given in | - | | | *from* coordinates to give results in *to* coordinates. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBHDEDG: -.. table:: The dig structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | int32 | The type of digitizing point. Possible values are listed | - | | | in :ref:`matlab_fif_constants`. Look for entries | - | | | starting with FIFF_POINT. | - +---------------------------+-----------+----------------------------------------------------------+ - | ident | int32 | Identifier for this point. | - +---------------------------+-----------+----------------------------------------------------------+ - | r | single(3) | The location of this point. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBGBEBH: -.. table:: The coildef structure. For more detailed information, see :ref:`coil_geometry_information`. - - +-------------------+-------------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===================+===================+==========================================================+ - | class | double | The coil (or electrode) class. | - +-------------------+-------------------+----------------------------------------------------------+ - | id | double | The coil (or electrode) id. | - +-------------------+-------------------+----------------------------------------------------------+ - | accuracy | double | Representation accuracy. | - +-------------------+-------------------+----------------------------------------------------------+ - | num_points | double | Number of integration points. | - +-------------------+-------------------+----------------------------------------------------------+ - | size | double | Coil size. | - +-------------------+-------------------+----------------------------------------------------------+ - | baseline | double | Coil baseline. | - +-------------------+-------------------+----------------------------------------------------------+ - | description | char(*) | Coil description. | - +-------------------+-------------------+----------------------------------------------------------+ - | coildefs | double | Each row contains the integration point weight, followed | - | | (num_points,7) | by location [m] and normal. | - +-------------------+-------------------+----------------------------------------------------------+ - | FV | struct | Contains the faces and vertices which can be used to | - | | | draw the coil for visualization. | - +-------------------+-------------------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBIABGD: -.. table:: The ch structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | scanno | int32 | Scanning order number, starting from 1. | - +---------------------------+-----------+----------------------------------------------------------+ - | logno | int32 | Logical channel number, conventions in the usage of this | - | | | number vary. | - +---------------------------+-----------+----------------------------------------------------------+ - | kind | int32 | The channel type (FIFFV_MEG_CH, FIFF_EEG_CH, etc., see | - | | | :ref:`matlab_fif_constants` ). | - +---------------------------+-----------+----------------------------------------------------------+ - | range | double | The hardware-oriented part of the calibration factor. | - | | | This should be only applied to the continuous raw data. | - +---------------------------+-----------+----------------------------------------------------------+ - | cal | double | The calibration factor to bring the channels to physical | - | | | units. | - +---------------------------+-----------+----------------------------------------------------------+ - | loc | double(12)| The channel location. The first three numbers indicate | - | | | the location [m], followed by the three unit vectors of | - | | | the channel-specific coordinate frame. These data contain| - | | | the values saved in the fif file and should not be | - | | | changed. The values are specified in device coordinates | - | | | for MEG and in head coordinates for EEG channels, | - | | | respectively. | - +---------------------------+-----------+----------------------------------------------------------+ - | coil_trans |double(4,4)| Initially, transformation from the channel coordinates | - | | | to device coordinates. This transformation is updated by | - | | | calls to fiff_transform_meg_chs and | - | | | fiff_transform_eeg_chs. | - +---------------------------+-----------+----------------------------------------------------------+ - | eeg_loc | double(6) | The location of the EEG electrode in coord_frame | - | | | coordinates. The first three values contain the location | - | | | of the electrode [m]. If six values are present, the | - | | | remaining ones indicate the location of the reference | - | | | electrode for this channel. | - +---------------------------+-----------+----------------------------------------------------------+ - | coord_frame | int32 | Initially, the coordinate frame is FIFFV_COORD_DEVICE | - | | | for MEG channels and FIFFV_COORD_HEAD for EEG channels. | - +---------------------------+-----------+----------------------------------------------------------+ - | unit | int32 | Unit of measurement. Relevant values are: 201 = T/m, | - | | | 112 = T, 107 = V, and 202 = Am. | - +---------------------------+-----------+----------------------------------------------------------+ - | unit_mul | int32 | The data are given in unit s multiplied by 10unit_mul. | - | | | Presently, unit_mul is always zero. | - +---------------------------+-----------+----------------------------------------------------------+ - | ch_name | char(*) | Name of the channel. | - +---------------------------+-----------+----------------------------------------------------------+ - | coil_def | coildef | The coil definition structure. This is present only if | - | | | mne_add_coil_defs has been successfully called. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBCJHJB: -.. table:: The proj structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | int32 | The type of the projection item. Possible values are | - | | | listed in :ref:`matlab_fif_constants`. Look for entries | - | | | starting with FIFFV_PROJ_ITEM or FIFFV_MNE_PROJ_ITEM. | - +---------------------------+-----------+----------------------------------------------------------+ - | active | int32 | Is this item active, i.e., applied or about to be | - | | | applied to the data. | - +---------------------------+-----------+----------------------------------------------------------+ - | data | named | The projection vectors. The column names indicate the | - | | matrix | names of the channels associated to the elements of the | - | | | vectors. | - +---------------------------+-----------+----------------------------------------------------------+ - - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBJDIFD: -.. table:: The comp structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | ctfkind | int32 | The kind of the compensation as stored in file. | - +---------------------------+-----------+----------------------------------------------------------+ - | kind | int32 | ctfkind mapped into small integer numbers. | - +---------------------------+-----------+----------------------------------------------------------+ - | save_calibrated | logical | Were the compensation data saved in calibrated form. If | - | | | this field is false, the matrix will be decalibrated | - | | | using the fields row_cals and col_cals when the | - | | | compensation data are saved by the toolbox. | - +---------------------------+-----------+----------------------------------------------------------+ - | row_cals | double(*) | Calibration factors applied to the rows of the | - | | | compensation data matrix when the data were read. | - +---------------------------+-----------+----------------------------------------------------------+ - | col_cals | double(*) | Calibration factors applied to the columns of the | - | | | compensation data matrix when the data were read. | - +---------------------------+-----------+----------------------------------------------------------+ - | data | named | The compensation data matrix. The row_names list the | - | | matrix | names of the channels to which this compensation applies | - | | | and the col_names the compensation channels. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBFHDIJ: -.. table:: The meas info structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | file_id | id | The fif ID of the measurement file. | - +---------------------------+-----------+----------------------------------------------------------+ - | meas_id | id | The ID assigned to this measurement by the acquisition | - | | | system or during file conversion. | - +---------------------------+-----------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +---------------------------+-----------+----------------------------------------------------------+ - | sfreq | double | Sampling frequency. | - +---------------------------+-----------+----------------------------------------------------------+ - | highpass | double | Highpass corner frequency [Hz]. Zero indicates a DC | - | | | recording. | - +---------------------------+-----------+----------------------------------------------------------+ - | lowpass | double | Lowpass corner frequency [Hz]. | - +---------------------------+-----------+----------------------------------------------------------+ - | chs | ch(nchan) | An array of channel information structures. | - +---------------------------+-----------+----------------------------------------------------------+ - | ch_names |cell(nchan)| Cell array of channel names. | - +---------------------------+-----------+----------------------------------------------------------+ - | dev_head_t | trans | The device to head transformation. | - +---------------------------+-----------+----------------------------------------------------------+ - | ctf_head_t | trans | The transformation from 4D/CTF head coordinates to | - | | | Neuromag head coordinates. This is only present in | - | | | 4D/CTF data. | - +---------------------------+-----------+----------------------------------------------------------+ - | dev_ctf_t | trans | The transformation from device coordinates to 4D/CTF | - | | | head coordinates. This is only present in 4D/CTF data. | - +---------------------------+-----------+----------------------------------------------------------+ - | dig | dig(*) | The Polhemus digitization data in head coordinates. | - +---------------------------+-----------+----------------------------------------------------------+ - | bads | cell(*) | Bad channel list. | - +---------------------------+-----------+----------------------------------------------------------+ - | projs | proj(*) | SSP operator data. | - +---------------------------+-----------+----------------------------------------------------------+ - | comps | comp(*) | Software gradient compensation data. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBEFJCB: - -.. table:: The surf structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | id | int32 | The surface ID. | - +---------------------------+-----------+----------------------------------------------------------+ - | sigma | double | The electrical conductivity of the compartment bounded by| - | | | this surface. This field is present in BEM surfaces only.| - +---------------------------+-----------+----------------------------------------------------------+ - | np | int32 | Number of vertices on the surface. | - +---------------------------+-----------+----------------------------------------------------------+ - | ntri | int32 | Number of triangles on the surface. | - +---------------------------+-----------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +---------------------------+-----------+----------------------------------------------------------+ - | rr | double | The vertex locations. | - | | (np,3) | | - +---------------------------+-----------+----------------------------------------------------------+ - | nn | double | The vertex normals. If derived surface data was not | - | | (np,3) | requested, this is empty. | - +---------------------------+-----------+----------------------------------------------------------+ - | tris | int32 | Vertex numbers of the triangles in counterclockwise | - | | (ntri,3) | order as seen from the outside. | - +---------------------------+-----------+----------------------------------------------------------+ - | nuse | int32 | Number of active vertices, *i.e.*, vertices included in | - | | | a decimated source space. | - +---------------------------+-----------+----------------------------------------------------------+ - | inuse | int32(np) | Which vertices are in use. | - +---------------------------+-----------+----------------------------------------------------------+ - | vertno |int32(nuse)| Indices of the vertices in use. | - +---------------------------+-----------+----------------------------------------------------------+ - | curv | double(np)| Curvature values at the vertices. If curvature | - | | | information was not requested, this field is empty or | - | | | absent. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_area | double | The triangle areas in m2.If derived surface data was not | - | | (ntri) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_cent | double | The triangle centroids. If derived surface data was not | - | | (ntri,3) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | tri_nn | double | The triangle normals. If derived surface data was not | - | | (ntri,3) | requested, this field will be missing. | - +---------------------------+-----------+----------------------------------------------------------+ - | nuse_tri | int32 | Number of triangles in use. This is present only if the | - | | | surface corresponds to a source space created with the | - | | | ``--ico`` option. | - +---------------------------+-----------+----------------------------------------------------------+ - | use_tris | int32 | The vertices of the triangles in use in the complete | - | | (nuse_tri)| triangulation. This is present only if the surface | - | | | corresponds to a source space created with the | - | | | ``--ico`` option. | - +---------------------------+-----------+----------------------------------------------------------+ - | nearest | int32(np) | This field is present only if patch information has been | - | | | computed for a source space. For each vertex in the | - | | | triangulation, these values indicate the nearest active | - | | | source space vertex. | - +---------------------------+-----------+----------------------------------------------------------+ - | nearest_dist | double(np)| This field is present only if patch information has been | - | | | computed for a source space. For each vertex in the | - | | | triangulation, these values indicate the distance to the | - | | | nearest active source space vertex. | - +---------------------------+-----------+----------------------------------------------------------+ - | dist | double | Distances between vertices on this surface given as a | - | | (np,np) | sparse matrix. A zero off-diagonal entry in this matrix | - | | | indicates that the corresponding distance has not been | - | | | calculated. | - +---------------------------+-----------+----------------------------------------------------------+ - | dist_limit | double | The value given to mne_add_patch_info with the ``--dist``| - | | | option. This value is presently | - | | | always negative, indicating that only distances between | - | | | active source space vertices, as indicated by the vertno | - | | | field of this structure, have been calculated. | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBJJIED: - -.. table:: The cov structure. - - +---------------------------+-----------+----------------------------------------------------------+ - | Field | Data Type | Description | - +===========================+===========+==========================================================+ - | kind | double | What kind of a covariance matrix (1 = noise covariance, | - | | | 2 = source covariance). | - +---------------------------+-----------+----------------------------------------------------------+ - | diag | double | Is this a diagonal matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | dim | int32 | Dimension of the covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | names | cell(*) | Names of the channels associated with the entries | - | | | (may be empty). | - +---------------------------+-----------+----------------------------------------------------------+ - | data | double | The covariance matrix. This a double(dim) vector for a | - | | (dim,dim) | diagonal covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | projs | proj(*) | The SSP vectors applied to these data. | - +---------------------------+-----------+----------------------------------------------------------+ - | bads | cell(*) | Bad channel names. | - +---------------------------+-----------+----------------------------------------------------------+ - | nfree | int32 | Number of data points used to compute this matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | eig |double(dim)| The eigenvalues of the covariance matrix. This field may | - | | | be empty for a diagonal covariance matrix. | - +---------------------------+-----------+----------------------------------------------------------+ - | eigvec | double | The eigenvectors of the covariance matrix. | - | | (dim,dim) | | - +---------------------------+-----------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBFJIBJ: - -.. table:: The fwd structure. - - +-------------------------+-------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +=========================+=============+==========================================================+ - | source_ori | int32 | Has the solution been computed for the current component | - | | | normal to the cortex only (1) or all three source | - | | | orientations (2). | - +-------------------------+-------------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +-------------------------+-------------+----------------------------------------------------------+ - | nsource | int32 | Total number of source space points. | - +-------------------------+-------------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +-------------------------+-------------+----------------------------------------------------------+ - | sol | named | The forward solution matrix. | - | | matrix | | - +-------------------------+-------------+----------------------------------------------------------+ - | sol_grad | named | The derivatives of the forward solution with respect to | - | | matrix | the dipole location coordinates. | - | | | This field is present only if the forward solution was | - | | | computed with the ``--grad`` option in MNE-C. | - +-------------------------+-------------+----------------------------------------------------------+ - | mri_head_t | trans | Transformation from the MRI coordinate frame to the | - | | | (Neuromag) head coordinate frame. | - +-------------------------+-------------+----------------------------------------------------------+ - | src | surf(:) | The description of the source spaces. | - +-------------------------+-------------+----------------------------------------------------------+ - | source_rr | double | The source locations. | - | | (nsource,3) | | - +-------------------------+-------------+----------------------------------------------------------+ - | source_nn | double(:,3) | The source orientations. Number of rows is either | - | | | nsource (fixed source orientations) or 3*nsource | - | | | (all source orientations). | - +-------------------------+-------------+----------------------------------------------------------+ - - -.. tabularcolumns:: |p{0.2\linewidth}|p{0.2\linewidth}|p{0.55\linewidth}| -.. _BGBIEIJE: - -.. table:: The inv structure. Note: The fields proj, whitener, reginv, and noisenorm are filled in by the routine mne_prepare_inverse_operator. - - +---------------------+-------------+----------------------------------------------------------+ - | Field | Data Type | Description | - +=====================+=============+==========================================================+ - | methods | int32 | Has the solution been computed using MEG data (1), EEG | - | | | data (2), or both (3). | - +---------------------+-------------+----------------------------------------------------------+ - | source_ori | int32 | Has the solution been computed for the current component | - | | | normal to the cortex only (1) or all three source | - | | | orientations (2). | - +---------------------+-------------+----------------------------------------------------------+ - | nsource | int32 | Total number of source space points. | - +---------------------+-------------+----------------------------------------------------------+ - | nchan | int32 | Number of channels. | - +---------------------+-------------+----------------------------------------------------------+ - | coord_frame | int32 | Coordinate frame in which the locations and orientations | - | | | are expressed. | - +---------------------+-------------+----------------------------------------------------------+ - | source_nn | double(:,3) | The source orientations. Number of rows is either | - | | | nsource (fixed source orientations) or 3*nsource (all | - | | | source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - | sing | double | The singular values, *i.e.*, the diagonal values of | - | | (nchan) | :math:`\Lambda`, see :ref:`mne_solution`. | - +---------------------+-------------+----------------------------------------------------------+ - | eigen_leads | double | The matrix :math:`V`, see :ref:`mne_solution`. | - | | (:,nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | eigen_fields | double | The matrix :math:`U^\top`, see | - | | (nchan, | :ref:`mne_solution`. | - | | nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | noise_cov | cov | The noise covariance matrix :math:`C`. | - +---------------------+-------------+----------------------------------------------------------+ - | source_cov | cov | The source covariance matrix :math:`R`. | - +---------------------+-------------+----------------------------------------------------------+ - | src | surf(:) | The description of the source spaces. | - +---------------------+-------------+----------------------------------------------------------+ - | mri_head_t | trans | Transformation from the MRI coordinate frame to the | - | | | (Neuromag) head coordinate frame. | - +---------------------+-------------+----------------------------------------------------------+ - | nave | double | The number of averages. | - +---------------------+-------------+----------------------------------------------------------+ - | projs | proj(:) | The SSP vectors which were active when the decomposition | - | | | was computed. | - +---------------------+-------------+----------------------------------------------------------+ - | proj | double | The projection operator computed using projs. | - | | (nchan) | | - +---------------------+-------------+----------------------------------------------------------+ - | whitener | | A sparse matrix containing the noise normalization | - | | | factors. Dimension is either nsource (fixed source | - | | | orientations) or 3*nsource (all source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - | reginv | double | The diagonal matrix :math:`\Gamma`, see | - | | (nchan) | :ref:`mne_solution`. | - +---------------------+-------------+----------------------------------------------------------+ - | noisenorm | double(:) | A sparse matrix containing the noise normalization | - | | | factors. Dimension is either nsource (fixed source | - | | | orientations) or 3*nsource (all source orientations). | - +---------------------+-------------+----------------------------------------------------------+ - - -On-line documentation for individual routines -############################################# - -Each of the routines listed in Tables :ref:`BGBCGHAG` - :ref:`BGBEFADJ` has on-line documentation accessible by saying ``help`` <*routine name*> in Matlab. diff --git a/doc/whats_new.rst b/doc/whats_new.rst deleted file mode 100644 index 27e856fb113..00000000000 --- a/doc/whats_new.rst +++ /dev/null @@ -1,42 +0,0 @@ -.. _whats_new: - -What's new -========== - -Changes for each version of MNE-Python are listed below. - -.. toctree:: - :maxdepth: 1 - - changes/devel.rst - changes/v1.5.rst - changes/v1.4.rst - changes/v1.3.rst - changes/v1.2.rst - changes/v1.1.rst - changes/v1.0.rst - changes/v0.24.rst - changes/v0.23.rst - changes/v0.22.rst - changes/v0.21.rst - changes/v0.20.rst - changes/v0.19.rst - changes/v0.18.rst - changes/v0.17.rst - changes/v0.16.rst - changes/v0.15.rst - changes/v0.14.rst - changes/v0.13.rst - changes/v0.12.rst - changes/v0.11.rst - changes/v0.10.rst - changes/v0.9.rst - changes/v0.8.rst - changes/v0.7.rst - changes/v0.6.rst - changes/v0.5.rst - changes/v0.4.rst - changes/v0.3.rst - changes/v0.2.rst - changes/v0.1.rst - old_versions/index.rst diff --git a/tutorials/io/70_reading_eyetracking_data.py b/tutorials/io/70_reading_eyetracking_data.py index a207c24a178..a9b1b1ffa76 100644 --- a/tutorials/io/70_reading_eyetracking_data.py +++ b/tutorials/io/70_reading_eyetracking_data.py @@ -23,8 +23,7 @@ .. seealso:: Some MNE functions may not be available to eyetracking and other physiological data, because MNE does not consider them to be data - channels. See the :doc:`glossary ` for more - information. + channels. See the :ref:`glossary` for more information. .. _import-eyelink_asc: From de3b5a00e3883c010863642c57818d8cb862ca45 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Oct 2023 09:49:10 -0400 Subject: [PATCH 14/37] [pre-commit.ci] pre-commit autoupdate (#12053) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Larson --- .pre-commit-config.yaml | 6 +++--- ignore_words.txt | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5cc0cfbacb3..5880c0d3b72 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.291 + rev: v0.0.292 hooks: - id: ruff name: ruff mne @@ -15,7 +15,7 @@ repos: # Ruff tutorials and examples - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.0.291 + rev: v0.0.292 hooks: - id: ruff name: ruff tutorials and examples @@ -26,7 +26,7 @@ repos: # Codespell - repo: https://github.com/codespell-project/codespell - rev: v2.2.5 + rev: v2.2.6 hooks: - id: codespell additional_dependencies: diff --git a/ignore_words.txt b/ignore_words.txt index 2d542566498..acf20c0a58a 100644 --- a/ignore_words.txt +++ b/ignore_words.txt @@ -37,3 +37,4 @@ shs pres aas vor +connec From 7c1429403e4beaa0dadb369512e2d5b1b1ed79fc Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 3 Oct 2023 12:23:42 -0400 Subject: [PATCH 15/37] MAINT: Auto-update installer link for old doc versions (#12056) --- doc/_static/js/set_installer_tab.js | 32 ++++++++++++ doc/_static/js/update_installer_version.js | 61 ++++++++++++++++++++++ doc/install/installers.rst | 33 ++---------- 3 files changed, 97 insertions(+), 29 deletions(-) create mode 100644 doc/_static/js/set_installer_tab.js create mode 100644 doc/_static/js/update_installer_version.js diff --git a/doc/_static/js/set_installer_tab.js b/doc/_static/js/set_installer_tab.js new file mode 100644 index 00000000000..5b6b737a565 --- /dev/null +++ b/doc/_static/js/set_installer_tab.js @@ -0,0 +1,32 @@ +/* inspired by https://tobiasahlin.com/blog/move-from-jquery-to-vanilla-javascript/ */ + +function documentReady(callback) { + if (document.readyState != "loading") callback(); + else document.addEventListener("DOMContentLoaded", callback); +} + +function setTabs() { + var platform = "linux"; + if (navigator.userAgent.indexOf("Win") !== -1) { + platform = "windows"; + } + if (navigator.userAgent.indexOf("Mac") !== -1) { + // there's no good way to distinguish intel vs M1 in javascript so we + // just default to showing the first of the 2 macOS tabs + platform = "macos-intel"; + } + let all_tab_nodes = document.querySelectorAll( + '.platform-selector-tabset')[0].children; + let input_nodes = [...all_tab_nodes].filter( + child => child.nodeName === "INPUT"); + let tab_label_nodes = [...document.querySelectorAll('.sd-tab-label')]; + let correct_label = tab_label_nodes.filter( + // label.id is drawn from :name: property in the rST, which must + // be unique across the whole site (*sigh*) + label => label.id.startsWith(platform))[0]; + let input_id = correct_label.getAttribute('for'); + let correct_input = input_nodes.filter(node => node.id === input_id)[0]; + correct_input.checked = true; +} + +documentReady(setTabs); diff --git a/doc/_static/js/update_installer_version.js b/doc/_static/js/update_installer_version.js new file mode 100644 index 00000000000..7cb8bdede1e --- /dev/null +++ b/doc/_static/js/update_installer_version.js @@ -0,0 +1,61 @@ +/* inspired by https://tobiasahlin.com/blog/move-from-jquery-to-vanilla-javascript/ */ + +function documentReady(callback) { + if (document.readyState != "loading") callback(); + else document.addEventListener("DOMContentLoaded", callback); +} + +async function getRelease() { + result = await fetch("https://api.github.com/repos/mne-tools/mne-installers/releases/latest"); + data = await result.json(); + return data; +} +async function warnVersion() { + data = await getRelease(); + // Take v1.5.1 for example and change to 1.5 + ids = ["linux-installers", "macos-intel-installers", "macos-apple-installers", "windows-installers"]; + warn = false; + ids.forEach((id) => { + label_id = document.getElementById(id); + // tab is immediately after label + children = [].slice.call(label_id.parentNode.children); + div = children[children.indexOf(label_id) + 1]; + a = div.children[0].children[0]; // div->p->a + ending = a.href.split("-").slice(-1)[0]; // Should be one of: ["macOS_Intel.pkg", "macOS_M1.pkg", "Linux.sh", "Windows.exe"] + data["assets"].every((asset) => { + // find the matching asset + if (!asset["browser_download_url"].endsWith(ending)) { + return true; // continue + } + old_stem = a.href.split("/").slice(-1)[0]; + new_stem = asset["browser_download_url"].split("/").slice(-1)[0]; + a.href = asset["browser_download_url"]; + // also replace the command on Linux + if (ending === "Linux.sh") { + code = document.getElementById("codecell0"); + } + if (!warn) { + // MNE-Python-1.5.1_0-Linux.sh to 1.5 for example + old_ver = old_stem.split("-").slice(2)[0].split("_")[0].split(".").slice(0, 2).join("."); + new_ver = new_stem.split("-").slice(2)[0].split("_")[0].split(".").slice(0, 2).join("."); + if (old_ver !== new_ver) { + warn = `The installers below are for version ${new_ver} as ${old_ver} is no longer supported`; + } + } + return false; // do not continue + }); + }); + if (warn) { + let outer = document.createElement("div"); + let title = document.createElement("p"); + let inner = document.createElement("p"); + outer.setAttribute("class", "admonition warning"); + title.setAttribute("class", "admonition-title"); + title.innerText = "Warning"; + inner.innerText = warn; + outer.append(title, inner); + document.querySelectorAll('.platform-selector-tabset')[0].before(outer); + } +} + +documentReady(warnVersion); diff --git a/doc/install/installers.rst b/doc/install/installers.rst index bad192150b4..2d1d75323b8 100644 --- a/doc/install/installers.rst +++ b/doc/install/installers.rst @@ -78,6 +78,10 @@ Got any questions? Let us know on the `MNE Forum`_! **Supported platforms:** Windows 10 and newer +.. raw:: html + + + First steps ^^^^^^^^^^^ @@ -123,32 +127,3 @@ interpreter. .. note:: This information is currently not displayed on the Windows platform. - - -.. raw:: html - - From a8b4638cf415838425fba34d2773c1007ac99ee1 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 3 Oct 2023 18:54:24 +0200 Subject: [PATCH 16/37] Revise cbrnr code ownership (#12061) --- .github/CODEOWNERS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 7b7877130ac..dc4e47a0e18 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -24,7 +24,7 @@ ################################################# # Artifact regression -/mne/preprocessing/_regress.py @wmvanvliet @cbrnr +/mne/preprocessing/_regress.py @wmvanvliet /mne/preprocessing/tests/test_regress.py @wmvanvliet # Beamforming @@ -33,7 +33,7 @@ *lcmv*.py @britta-wstnr # Channels -/mne/channels @cbrnr @agramfort @mscheltienne +/mne/channels @agramfort @mscheltienne # Core sensor-space classes /mne/epochs.py @drammock @agramfort @mscheltienne @@ -93,10 +93,10 @@ /mne/_freesurfer.py @alexrockhill @larsoner # TFR -/mne/time_frequency @drammock @cbrnr @adam2392 @mscheltienne +/mne/time_frequency @drammock @adam2392 @mscheltienne # Viz -/mne/viz @drammock @cbrnr +/mne/viz @drammock /mne/viz/_brain @larsoner @wmvanvliet /mne/viz/ui_events.py @wmvanvliet /tutorials/visualization @larsoner @wmvanvliet @@ -120,5 +120,5 @@ # Project infrastructure and CIs /*.* @larsoner @drammock # files in the root directory /.circleci @larsoner -/.github @larsoner @cbrnr +/.github @larsoner /tools @larsoner @drammock From b069d4abd7bd7b39973c9fcbf0d9ada02b5e4b19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Tue, 3 Oct 2023 20:10:44 +0200 Subject: [PATCH 17/37] Move more metadata and build flags from setup.py to pyproject.toml (#12060) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Larson --- mne/_ola.py | 2 +- mne/beamformer/_lcmv.py | 2 +- mne/datasets/brainstorm/bst_auditory.py | 2 +- mne/datasets/brainstorm/bst_phantom_ctf.py | 2 +- mne/datasets/brainstorm/bst_phantom_elekta.py | 2 +- mne/datasets/brainstorm/bst_raw.py | 2 +- mne/datasets/brainstorm/bst_resting.py | 2 +- mne/datasets/sleep_physionet/age.py | 6 +- mne/datasets/sleep_physionet/temazepam.py | 4 +- mne/datasets/tests/test_datasets.py | 2 +- mne/decoding/base.py | 2 +- mne/decoding/csp.py | 4 +- mne/decoding/transformer.py | 6 +- mne/minimum_norm/resolution_matrix.py | 6 +- mne/preprocessing/ecg.py | 2 +- mne/time_frequency/multitaper.py | 4 +- mne/viz/montage.py | 2 +- pyproject.toml | 93 ++++++++++++++++++- setup.py | 88 ------------------ tutorials/inverse/50_beamformer_lcmv.py | 2 +- .../preprocessing/25_background_filtering.py | 6 +- .../preprocessing/45_projectors_background.py | 8 +- tutorials/stats-sensor-space/20_erp_stats.py | 2 +- 23 files changed, 125 insertions(+), 126 deletions(-) diff --git a/mne/_ola.py b/mne/_ola.py index 75b850ee5b7..c17a47ffce6 100644 --- a/mne/_ola.py +++ b/mne/_ola.py @@ -281,7 +281,7 @@ def __init__( window="hann", tol=1e-10, *, - verbose=None + verbose=None, ): n_samples = _ensure_int(n_samples, "n_samples") n_overlap = _ensure_int(n_overlap, "n_overlap") diff --git a/mne/beamformer/_lcmv.py b/mne/beamformer/_lcmv.py index 10ce4c431e2..0abf671c716 100644 --- a/mne/beamformer/_lcmv.py +++ b/mne/beamformer/_lcmv.py @@ -207,7 +207,7 @@ def make_lcmv( noise_cov=noise_cov, rank=rank, pca=False, - **depth + **depth, ) ch_names = list(info["ch_names"]) diff --git a/mne/datasets/brainstorm/bst_auditory.py b/mne/datasets/brainstorm/bst_auditory.py index a45dc72b5cf..f6cbd7c4e43 100644 --- a/mne/datasets/brainstorm/bst_auditory.py +++ b/mne/datasets/brainstorm/bst_auditory.py @@ -33,7 +33,7 @@ def data_path( download=True, accept=False, *, - verbose=None + verbose=None, ): # noqa: D103 return _download_mne_dataset( name="bst_auditory", diff --git a/mne/datasets/brainstorm/bst_phantom_ctf.py b/mne/datasets/brainstorm/bst_phantom_ctf.py index 147626d33b6..9a64a438e89 100644 --- a/mne/datasets/brainstorm/bst_phantom_ctf.py +++ b/mne/datasets/brainstorm/bst_phantom_ctf.py @@ -22,7 +22,7 @@ def data_path( download=True, accept=False, *, - verbose=None + verbose=None, ): # noqa: D103 return _download_mne_dataset( name="bst_phantom_ctf", diff --git a/mne/datasets/brainstorm/bst_phantom_elekta.py b/mne/datasets/brainstorm/bst_phantom_elekta.py index 8e5b5a8a69c..b5e13d385f3 100644 --- a/mne/datasets/brainstorm/bst_phantom_elekta.py +++ b/mne/datasets/brainstorm/bst_phantom_elekta.py @@ -22,7 +22,7 @@ def data_path( download=True, accept=False, *, - verbose=None + verbose=None, ): # noqa: D103 return _download_mne_dataset( name="bst_phantom_elekta", diff --git a/mne/datasets/brainstorm/bst_raw.py b/mne/datasets/brainstorm/bst_raw.py index f8d92e0b26c..3aeef5e49d2 100644 --- a/mne/datasets/brainstorm/bst_raw.py +++ b/mne/datasets/brainstorm/bst_raw.py @@ -38,7 +38,7 @@ def data_path( download=True, accept=False, *, - verbose=None + verbose=None, ): # noqa: D103 return _download_mne_dataset( name="bst_raw", diff --git a/mne/datasets/brainstorm/bst_resting.py b/mne/datasets/brainstorm/bst_resting.py index 9e2f8f7e73b..cef6ab986c6 100644 --- a/mne/datasets/brainstorm/bst_resting.py +++ b/mne/datasets/brainstorm/bst_resting.py @@ -25,7 +25,7 @@ def data_path( download=True, accept=False, *, - verbose=None + verbose=None, ): # noqa: D103 return _download_mne_dataset( name="bst_resting", diff --git a/mne/datasets/sleep_physionet/age.py b/mne/datasets/sleep_physionet/age.py index 0a7fb174d1c..b0d23a94b07 100644 --- a/mne/datasets/sleep_physionet/age.py +++ b/mne/datasets/sleep_physionet/age.py @@ -29,7 +29,7 @@ def fetch_data( base_url=BASE_URL, on_missing="raise", *, - verbose=None + verbose=None, ): # noqa: D301, E501 """Get paths to local copies of PhysioNet Polysomnography dataset files. @@ -134,12 +134,12 @@ def fetch_data( psg_fname, pdl = _fetch_one( psg_records["fname"][idx].decode(), psg_records["sha"][idx].decode(), - *params + *params, ) hyp_fname, hdl = _fetch_one( hyp_records["fname"][idx].decode(), hyp_records["sha"][idx].decode(), - *params + *params, ) fnames.append([psg_fname, hyp_fname]) if pdl: diff --git a/mne/datasets/sleep_physionet/temazepam.py b/mne/datasets/sleep_physionet/temazepam.py index 4f3071300ab..8a6efc19a9a 100644 --- a/mne/datasets/sleep_physionet/temazepam.py +++ b/mne/datasets/sleep_physionet/temazepam.py @@ -104,12 +104,12 @@ def fetch_data( psg_fname, pdl = _fetch_one( records["psg fname"][idx].decode(), records["psg sha"][idx].decode(), - *params + *params, ) hyp_fname, hdl = _fetch_one( records["hyp fname"][idx].decode(), records["hyp sha"][idx].decode(), - *params + *params, ) fnames.append([psg_fname, hyp_fname]) if pdl: diff --git a/mne/datasets/tests/test_datasets.py b/mne/datasets/tests/test_datasets.py index 6b43565cf33..211dd4d0b1c 100644 --- a/mne/datasets/tests/test_datasets.py +++ b/mne/datasets/tests/test_datasets.py @@ -205,7 +205,7 @@ def test_fetch_parcellations(tmp_path): labels, parc="HCPMMP1_round", table_name="./left.fsaverage164.label.gii", - **kwargs + **kwargs, ) orig = op.join(this_subjects_dir, "fsaverage", "label", "lh.HCPMMP1.annot") first = hashfunc(orig) diff --git a/mne/decoding/base.py b/mne/decoding/base.py index eaf7ecfa3ae..6489b7f113a 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -419,7 +419,7 @@ def _fit_and_score( error_score="raise", *, verbose=None, - position=0 + position=0, ): """Fit estimator and compute scores for a given dataset split.""" # This code is adapted from sklearn diff --git a/mne/decoding/csp.py b/mne/decoding/csp.py index 34f1950c627..abb85afc2e9 100644 --- a/mne/decoding/csp.py +++ b/mne/decoding/csp.py @@ -281,7 +281,7 @@ def plot_patterns( name_format="CSP%01d", nrows=1, ncols="auto", - show=True + show=True, ): """Plot topographic patterns of components. @@ -412,7 +412,7 @@ def plot_filters( name_format="CSP%01d", nrows=1, ncols="auto", - show=True + show=True, ): """Plot topographic filters of components. diff --git a/mne/decoding/transformer.py b/mne/decoding/transformer.py index 29b33dde86b..9675f7bef0c 100644 --- a/mne/decoding/transformer.py +++ b/mne/decoding/transformer.py @@ -383,7 +383,7 @@ def __init__( n_jobs=None, normalization="length", *, - verbose=None + verbose=None, ): # noqa: D102 self.sfreq = sfreq self.fmin = fmin @@ -512,7 +512,7 @@ def __init__( iir_params=None, fir_design="firwin", *, - verbose=None + verbose=None, ): # noqa: D102 self.info = info self.l_freq = l_freq @@ -838,7 +838,7 @@ def __init__( fir_window="hamming", fir_design="firwin", *, - verbose=None + verbose=None, ): # noqa: D102 self.l_freq = l_freq self.h_freq = h_freq diff --git a/mne/minimum_norm/resolution_matrix.py b/mne/minimum_norm/resolution_matrix.py index 1114c25722b..a01ece26ccb 100644 --- a/mne/minimum_norm/resolution_matrix.py +++ b/mne/minimum_norm/resolution_matrix.py @@ -78,7 +78,7 @@ def _get_psf_ctf( norm, return_pca_vars, vector=False, - verbose=None + verbose=None, ): """Get point-spread (PSFs) or cross-talk (CTFs) functions.""" # check for consistencies in input parameters @@ -302,7 +302,7 @@ def get_point_spread( norm=False, return_pca_vars=False, vector=False, - verbose=None + verbose=None, ): """Get point-spread (PSFs) functions for vertices. @@ -351,7 +351,7 @@ def get_cross_talk( norm=False, return_pca_vars=False, vector=False, - verbose=None + verbose=None, ): """Get cross-talk (CTFs) function for vertices. diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py index a7ee6a81213..388fe9e416d 100644 --- a/mne/preprocessing/ecg.py +++ b/mne/preprocessing/ecg.py @@ -530,7 +530,7 @@ def _make_ecg(inst, start, stop, reject_by_annotation=False, verbose=None): picks, return_times=True, **kwargs, - reject_by_annotation=reject_by_annotation + reject_by_annotation=reject_by_annotation, ) elif isinstance(inst, BaseEpochs): ecg = np.hstack(inst.copy().get_data(picks, **kwargs)) diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index a3ccad28080..a304d50f6c1 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -336,7 +336,7 @@ def psd_array_multitaper( n_jobs=None, *, max_iter=150, - verbose=None + verbose=None, ): r"""Compute power spectral density (PSD) using a multi-taper method. @@ -475,7 +475,7 @@ def tfr_array_multitaper( output="complex", n_jobs=None, *, - verbose=None + verbose=None, ): """Compute Time-Frequency Representation (TFR) using DPSS tapers. diff --git a/mne/viz/montage.py b/mne/viz/montage.py index f81174fa31a..19cd8c12a1b 100644 --- a/mne/viz/montage.py +++ b/mne/viz/montage.py @@ -20,7 +20,7 @@ def plot_montage( sphere=None, *, axes=None, - verbose=None + verbose=None, ): """Plot a montage. diff --git a/pyproject.toml b/pyproject.toml index ac247de714a..a9565e40d5a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,90 @@ +[project] +name = "mne" +description = "MNE-Python project for MEG and EEG data analysis." +maintainers = [ + { name = "Alexandre Gramfort", email = "alexandre.gramfort@inria.fr" }, +] +dynamic = ["version"] +license = { text = "BSD-3-Clause" } +readme = { file = "README.rst", content-type = "text/x-rst" } +requires-python = ">=3.8" +keywords = [ + "neuroscience", + "neuroimaging", + "MEG", + "EEG", + "ECoG", + "fNIRS", + "brain", +] +classifiers = [ + "Intended Audience :: Science/Research", + "Intended Audience :: Developers", + "License :: OSI Approved", + "Programming Language :: Python", + "Topic :: Software Development", + "Topic :: Scientific/Engineering", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX", + "Operating System :: Unix", + "Operating System :: MacOS", + "Programming Language :: Python :: 3", +] + +[project.scripts] +mne = "mne.commands.utils:main" + +[project.urls] +Homepage = "https://mne.tools/" +Download = "https://pypi.org/project/scikit-learn/#files" +"Bug Tracker" = "https://github.com/mne-tools/mne-python/issues/" +Documentation = "https://mne.tools/" +Forum = "https://mne.discourse.group/" +"Source Code" = "https://github.com/mne-tools/mne-python/" + +[build-system] +requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools_scm] +write_to = "mne/_version.py" +version_scheme = "release-branch-semver" + +[options] +zip_safe = false # the package can run out of an .egg file +include_package_data = true + +[tool.setuptools.package-data] +"mne" = [ + "data/eegbci_checksums.txt", + "data/*.sel", + "data/icos.fif.gz", + "data/coil_def*.dat", + "data/helmets/*.fif.gz", + "data/FreeSurferColorLUT.txt", + "data/image/*gif", + "data/image/*lout", + "data/fsaverage/*.fif", + "channels/data/layouts/*.lout", + "channels/data/layouts/*.lay", + "channels/data/montages/*.sfp", + "channels/data/montages/*.txt", + "channels/data/montages/*.elc", + "channels/data/neighbors/*.mat", + "datasets/sleep_physionet/SHA1SUMS", + "datasets/_fsaverage/*.txt", + "datasets/_infant/*.txt", + "datasets/_phantom/*.txt", + "html/*.js", + "html/*.css", + "html_templates/repr/*.jinja", + "html_templates/report/*.jinja", + "icons/*.svg", + "icons/*.png", + "io/artemis123/resources/*.csv", + "io/edf/gdf_encodes.txt", +] + [tool.codespell] ignore-words = "ignore_words.txt" builtin = "clear,rare,informal,names,usage" @@ -19,12 +106,12 @@ ignore-decorators = [ "setter", "mne.utils.copy_function_doc_to_method_doc", "mne.utils.copy_doc", - "mne.utils.deprecated" + "mne.utils.deprecated", ] [tool.ruff.per-file-ignores] "tutorials/time-freq/10_spectrum_class.py" = [ - "E501" # line too long + "E501", # line too long ] "mne/datasets/*/*.py" = [ "D103", # Missing docstring in public function @@ -52,4 +139,4 @@ junit_family = "xunit2" exclude = "(dist/)|(build/)|(.*\\.ipynb)" [tool.bandit.assert_used] -skips = ["*/test_*.py"] # assert statements are good practice with pytest +skips = ["*/test_*.py"] # assert statements are good practice with pytest diff --git a/setup.py b/setup.py index 7649cdc0f37..d3422b58a6a 100644 --- a/setup.py +++ b/setup.py @@ -22,15 +22,6 @@ def parse_requirements_file(fname): return requirements -DISTNAME = "mne" -DESCRIPTION = "MNE-Python project for MEG and EEG data analysis." -MAINTAINER = "Alexandre Gramfort" -MAINTAINER_EMAIL = "alexandre.gramfort@inria.fr" -URL = "https://mne.tools/dev/" -LICENSE = "BSD-3-Clause" -DOWNLOAD_URL = "http://github.com/mne-tools/mne-python" - - def package_tree(pkgroot): """Get the submodule list.""" # Adapted from VisPy @@ -47,9 +38,6 @@ def package_tree(pkgroot): if op.exists("MANIFEST"): os.remove("MANIFEST") - with open("README.rst", "r") as fid: - long_description = fid.read() - # data_dependencies is empty, but let's leave them so that we don't break # people's workflows who did `pip install mne[data]` install_requires = parse_requirements_file("requirements_base.txt") @@ -59,87 +47,11 @@ def package_tree(pkgroot): "requirements_testing.txt" ) + parse_requirements_file("requirements_testing_extra.txt") setup( - name=DISTNAME, - maintainer=MAINTAINER, - include_package_data=True, - maintainer_email=MAINTAINER_EMAIL, - description=DESCRIPTION, - license=LICENSE, - url=URL, - download_url=DOWNLOAD_URL, - long_description=long_description, - long_description_content_type="text/x-rst", - zip_safe=False, # the package can run out of an .egg file - classifiers=[ - "Intended Audience :: Science/Research", - "Intended Audience :: Developers", - "License :: OSI Approved", - "Programming Language :: Python", - "Topic :: Software Development", - "Topic :: Scientific/Engineering", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX", - "Operating System :: Unix", - "Operating System :: MacOS", - "Programming Language :: Python :: 3", - ], - keywords="neuroscience neuroimaging MEG EEG ECoG fNIRS brain", - project_urls={ - "Homepage": "https://mne.tools/", - "Download": "https://pypi.org/project/scikit-learn/#files", - "Bug Tracker": "https://github.com/mne-tools/mne-python/issues/", - "Documentation": "https://mne.tools/", - "Forum": "https://mne.discourse.group/", - "Source Code": "https://github.com/mne-tools/mne-python/", - }, - platforms="any", - python_requires=">=3.8", install_requires=install_requires, - setup_requires=["setuptools>=45", "setuptools_scm>=6.2"], - use_scm_version={ - "write_to": "mne/_version.py", - "version_scheme": "release-branch-semver", - }, extras_require={ "data": data_requires, "hdf5": hdf5_requires, "test": test_requires, }, packages=package_tree("mne"), - package_data={ - "mne": [ - op.join("data", "eegbci_checksums.txt"), - op.join("data", "*.sel"), - op.join("data", "icos.fif.gz"), - op.join("data", "coil_def*.dat"), - op.join("data", "helmets", "*.fif.gz"), - op.join("data", "FreeSurferColorLUT.txt"), - op.join("data", "image", "*gif"), - op.join("data", "image", "*lout"), - op.join("data", "fsaverage", "*.fif"), - op.join("channels", "data", "layouts", "*.lout"), - op.join("channels", "data", "layouts", "*.lay"), - op.join("channels", "data", "montages", "*.sfp"), - op.join("channels", "data", "montages", "*.txt"), - op.join("channels", "data", "montages", "*.elc"), - op.join("channels", "data", "neighbors", "*.mat"), - op.join("datasets", "sleep_physionet", "SHA1SUMS"), - op.join("datasets", "_fsaverage", "*.txt"), - op.join("datasets", "_infant", "*.txt"), - op.join("datasets", "_phantom", "*.txt"), - op.join("html", "*.js"), - op.join("html", "*.css"), - op.join("html_templates", "repr", "*.jinja"), - op.join("html_templates", "report", "*.jinja"), - op.join("icons", "*.svg"), - op.join("icons", "*.png"), - op.join("io", "artemis123", "resources", "*.csv"), - op.join("io", "edf", "gdf_encodes.txt"), - ] - }, - entry_points={ - "console_scripts": [ - "mne = mne.commands.utils:main", - ] - }, ) diff --git a/tutorials/inverse/50_beamformer_lcmv.py b/tutorials/inverse/50_beamformer_lcmv.py index 6edda3c0aeb..9ccc5371d74 100644 --- a/tutorials/inverse/50_beamformer_lcmv.py +++ b/tutorials/inverse/50_beamformer_lcmv.py @@ -274,7 +274,7 @@ # Could do this for a 3-panel figure: # view_layout='horizontal', views=['coronal', 'sagittal', 'axial'], brain_kwargs=dict(silhouette=True), - **kwargs + **kwargs, ) # %% diff --git a/tutorials/preprocessing/25_background_filtering.py b/tutorials/preprocessing/25_background_filtering.py index a48572285f7..a5ec433ac7c 100644 --- a/tutorials/preprocessing/25_background_filtering.py +++ b/tutorials/preprocessing/25_background_filtering.py @@ -613,7 +613,7 @@ def plot_signal(x, offset): gain, "Chebychev-1 order=8, ripple=1 dB", compensate=True, - **kwargs + **kwargs, ) # %% @@ -632,7 +632,7 @@ def plot_signal(x, offset): gain, "Chebychev-1 order=8, ripple=6 dB", compensate=True, - **kwargs + **kwargs, ) # %% @@ -655,7 +655,7 @@ def plot_signal(x, offset): gain, "Chebychev-1 order=8, ripple=6 dB", compensate=False, - **kwargs + **kwargs, ) # %% diff --git a/tutorials/preprocessing/45_projectors_background.py b/tutorials/preprocessing/45_projectors_background.py index 5a67789b38c..a970def83a1 100644 --- a/tutorials/preprocessing/45_projectors_background.py +++ b/tutorials/preprocessing/45_projectors_background.py @@ -71,7 +71,7 @@ def setup_3d_axes(): arrow_length_ratio=0.1, color="C1", linewidth=1, - linestyle="dashed" + linestyle="dashed", ) # %% @@ -182,7 +182,7 @@ def setup_3d_axes(): *np.concatenate([origin, trigger_effect]).flatten(), arrow_length_ratio=0.1, color="C2", - alpha=0.5 + alpha=0.5, ) # plot the original vector @@ -199,7 +199,7 @@ def setup_3d_axes(): *(projected_point + offset).flat, "({}, {}, {})".format(*np.round(projected_point.flat, 2)), color="C0", - horizontalalignment="right" + horizontalalignment="right", ) # add dashed arrow showing projection @@ -210,7 +210,7 @@ def setup_3d_axes(): arrow_length_ratio=0.1, color="C1", linewidth=1, - linestyle="dashed" + linestyle="dashed", ) # %% diff --git a/tutorials/stats-sensor-space/20_erp_stats.py b/tutorials/stats-sensor-space/20_erp_stats.py index 872b1166056..504a323cbe6 100644 --- a/tutorials/stats-sensor-space/20_erp_stats.py +++ b/tutorials/stats-sensor-space/20_erp_stats.py @@ -132,7 +132,7 @@ mask=significant_points, show_names="all", titles=None, - **time_unit + **time_unit, ) plt.colorbar(axes["Left"].images[-1], ax=list(axes.values()), shrink=0.3, label="µV") From 9d20815494aca4ccf134bcc8758aadad3a9954ae Mon Sep 17 00:00:00 2001 From: "Denis A. Engemann" Date: Wed, 4 Oct 2023 00:43:54 +0200 Subject: [PATCH 18/37] add details for Denis (#12065) --- .github/CODEOWNERS | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index dc4e47a0e18..8d17d366a06 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,18 +33,18 @@ *lcmv*.py @britta-wstnr # Channels -/mne/channels @agramfort @mscheltienne +/mne/channels @agramfort @mscheltienne @dengemann # Core sensor-space classes -/mne/epochs.py @drammock @agramfort @mscheltienne -/mne/evoked.py @drammock @agramfort @mscheltienne -/mne/io/*.* @drammock @cbrnr @agramfort @mscheltienne +/mne/epochs.py @drammock @agramfort @mscheltienne @dengemann +/mne/evoked.py @drammock @agramfort @mscheltienne @dengemann +/mne/io/*.* @drammock @cbrnr @agramfort @mscheltienne @dengemann # Current-source density -/mne/preprocessing/_csd.py @alexrockhill +/mne/preprocessing/_csd.py @alexrockhill @dengemann # Decoding -/mne/decoding/csp.py @cbrnr @agramfort +/mne/decoding/csp.py @cbrnr @agramfort @dengemann # fNIRS /mne/preprocessing/nirs @rob-luke @@ -74,14 +74,14 @@ /mne/minimum_norm @agramfort # Preprocessing -/mne/preprocessing/ica.py @cbrnr @adam2392 @agramfort @mscheltienne -/mne/preprocessing/infomax_.py @cbrnr @adam2392 @mscheltienne +/mne/preprocessing/ica.py @cbrnr @adam2392 @agramfort @mscheltienne @dengemann +/mne/preprocessing/infomax_.py @cbrnr @adam2392 @mscheltienne @dengemann /mne/preprocessing/*annotate*.py @mscheltienne /mne/preprocessing/bads.py @mscheltienne /mne/preprocessing/e*g.py @mscheltienne # Report -/mne/report @hoechenberger +/mne/report @hoechenberger @dengemann # Simulation /mne/simulation/ @agramfort @@ -96,22 +96,22 @@ /mne/time_frequency @drammock @adam2392 @mscheltienne # Viz -/mne/viz @drammock +/mne/viz @drammock @dengemann /mne/viz/_brain @larsoner @wmvanvliet /mne/viz/ui_events.py @wmvanvliet -/tutorials/visualization @larsoner @wmvanvliet -/examples/visualization @larsoner +/tutorials/visualization @larsoner @wmvanvliet @dengemann +/examples/visualization @larsoner @dengemann ######################### # Project-level / other # ######################### # Examples and tutorials -/examples @drammock @agramfort -/tutorials @drammock @agramfort +/examples @drammock @agramfort @dengemann +/tutorials @drammock @agramfort @dengemann # Non-tutorial documentation text and infrastructure -/doc @larsoner @drammock @agramfort +/doc @larsoner @drammock @agramfort @dengemann /logo @drammock # Installation documentation From c65345ad5cdadb466a8351e5e173aeb9a577f2a8 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 4 Oct 2023 02:12:19 -0400 Subject: [PATCH 19/37] BUG: Fix bug with pickling MNEBadsList (#12063) --- mne/_fiff/meas_info.py | 9 ++++++++- mne/_fiff/tests/test_meas_info.py | 10 ++++++++-- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index 5f3fa7c0000..874504504cd 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -963,7 +963,13 @@ def __init__(self, *, bads, info): def extend(self, iterable): if not isinstance(iterable, list): iterable = list(iterable) - _check_bads_info_compat(iterable, self._mne_info) + # can happen during pickling + try: + info = self._mne_info + except AttributeError: + pass # can happen during pickling + else: + _check_bads_info_compat(iterable, info) return super().extend(iterable) def append(self, x): @@ -1551,6 +1557,7 @@ def __getstate__(self): def __setstate__(self, state): """Set state (for pickling).""" self._unlocked = state["_unlocked"] + self["bads"] = MNEBadsList(bads=self["bads"], info=self) def __setitem__(self, key, val): """Attribute setter.""" diff --git a/mne/_fiff/tests/test_meas_info.py b/mne/_fiff/tests/test_meas_info.py index feb30400d42..2535fb5c8e5 100644 --- a/mne/_fiff/tests/test_meas_info.py +++ b/mne/_fiff/tests/test_meas_info.py @@ -1070,21 +1070,27 @@ def test_channel_name_limit(tmp_path, monkeypatch, fname): apply_inverse(evoked, inv) # smoke test +@pytest.mark.parametrize("protocol", ("highest", "default")) @pytest.mark.parametrize("fname_info", (raw_fname, "create_info")) @pytest.mark.parametrize("unlocked", (True, False)) -def test_pickle(fname_info, unlocked): +def test_pickle(fname_info, unlocked, protocol): """Test that Info can be (un)pickled.""" if fname_info == "create_info": info = create_info(3, 1000.0, "eeg") else: info = read_info(fname_info) + protocol = getattr(pickle, f"{protocol.upper()}_PROTOCOL") + assert isinstance(info["bads"], MNEBadsList) + info["bads"] = info["ch_names"][:1] assert not info._unlocked info._unlocked = unlocked - data = pickle.dumps(info) + data = pickle.dumps(info, protocol=protocol) info_un = pickle.loads(data) # nosec B301 assert isinstance(info_un, Info) assert_object_equal(info, info_un) assert info_un._unlocked == unlocked + assert isinstance(info_un["bads"], MNEBadsList) + assert info_un["bads"]._mne_info is info_un def test_info_bad(): From 63ce95d4f0928e2ad035cb67c70ea06db44bd19f Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 4 Oct 2023 10:30:07 +0300 Subject: [PATCH 20/37] Collapsible html repr for raw/info (#12064) Co-buthored-by: Valerii Chirkov --- mne/_fiff/meas_info.py | 9 ++- mne/conftest.py | 2 +- mne/html_templates/repr/info.html.jinja | 90 ++++++++++++++++++++++--- mne/html_templates/repr/raw.html.jinja | 13 +--- mne/io/base.py | 8 ++- mne/utils/tests/test_misc.py | 2 +- tutorials/intro/30_info.py | 2 +- tutorials/intro/70_report.py | 2 +- tutorials/raw/10_raw_overview.py | 1 - 9 files changed, 98 insertions(+), 31 deletions(-) diff --git a/mne/_fiff/meas_info.py b/mne/_fiff/meas_info.py index 874504504cd..f1e67fa8306 100644 --- a/mne/_fiff/meas_info.py +++ b/mne/_fiff/meas_info.py @@ -14,6 +14,7 @@ import operator from textwrap import shorten import string +import uuid import numpy as np @@ -1884,7 +1885,7 @@ def _get_chs_for_repr(self): return good_channels, bad_channels, ecg, eog @repr_html - def _repr_html_(self, caption=None): + def _repr_html_(self, caption=None, duration=None, filenames=None): """Summarize info for HTML representation.""" if isinstance(caption, str): html = f"

{caption}

" @@ -1916,7 +1917,11 @@ def _repr_html_(self, caption=None): projs = None info_template = _get_html_template("repr", "info.html.jinja") + sections = ("General", "Channels", "Data") + section_ids = [f"section_{str(uuid.uuid4())}" for _ in sections] return html + info_template.render( + sections=sections, + section_ids=section_ids, caption=caption, meas_date=meas_date, projs=projs, @@ -1930,6 +1935,8 @@ def _repr_html_(self, caption=None): highpass=self.get("highpass"), sfreq=self.get("sfreq"), experimenter=self.get("experimenter"), + duration=duration, + filenames=filenames, ) def save(self, fname): diff --git a/mne/conftest.py b/mne/conftest.py index df914120ece..c1e6b36a93b 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -158,7 +158,7 @@ def pytest_configure(config): # h5py ignore:`product` is deprecated as of NumPy.*:DeprecationWarning # pandas - ignore:.*np\.find_common_type is deprecated.*:DeprecationWarning + ignore:In the future `np.long`.*:FutureWarning # https://github.com/joblib/joblib/issues/1454 ignore:.*`byte_bounds` is dep.*:DeprecationWarning # numpy distutils used by SciPy diff --git a/mne/html_templates/repr/info.html.jinja b/mne/html_templates/repr/info.html.jinja index ec01af7decd..f6d46c49f34 100644 --- a/mne/html_templates/repr/info.html.jinja +++ b/mne/html_templates/repr/info.html.jinja @@ -1,5 +1,47 @@ + + + + + + + + + {% if meas_date is not none %} @@ -7,7 +49,7 @@ {% endif %} - + {% if experimenter is not none %} @@ -15,8 +57,9 @@ {% endif %} + - {% if subject_info is not none %} + {% if subject_info is defined and subject_info is not none %} {% if 'his_id' in subject_info.keys() %} {% endif %} @@ -25,6 +68,13 @@ {% endif %} + + + {% if dig is not none %} @@ -32,43 +82,63 @@ {% endif %} - + - + - + - + - {% if sfreq is not none %} + + + + {% if sfreq is not none %} + {% endif %} {% if highpass is not none %} - + {% endif %} {% if lowpass is not none %} - + {% endif %} {% if projs is not none %} - + {% endif %} + {% if filenames %} + + + + + {% endif %} + {% if duration %} + + + + + {% endif %}
+ +
Measurement date{{ meas_date }}Unknown
Experimenter{{ experimenter }}Unknown
Participant{{ subject_info['his_id'] }}
+ +
Digitized points{{ dig|length }} pointsNot available
Good channels {{ good_channels }}
Bad channels {{ bad_channels }}
EOG channels {{ eog }}
ECG channels {{ ecg }}
+ +
Sampling frequency {{ '%0.2f'|format(sfreq) }} Hz
Highpass {{ '%0.2f'|format(highpass) }} Hz
Lowpass {{ '%0.2f'|format(lowpass) }} Hz
Projections {{ projs|join('
') | safe }}
Filenames{{ filenames|join('
') }}
Duration{{ duration }} (HH:MM:SS)
diff --git a/mne/html_templates/repr/raw.html.jinja b/mne/html_templates/repr/raw.html.jinja index 7bc8542eaee..9cba46f43bf 100644 --- a/mne/html_templates/repr/raw.html.jinja +++ b/mne/html_templates/repr/raw.html.jinja @@ -1,12 +1 @@ -{{ info_repr[:-9] | safe }} - {% if filenames %} - - Filenames - {{ filenames|join('
') }} - - {% endif %} - - Duration - {{ duration }} (HH:MM:SS) - - +{{ info_repr | safe }} diff --git a/mne/io/base.py b/mne/io/base.py index e4f917a7c29..3950861fed9 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -2069,9 +2069,11 @@ def _repr_html_(self, caption=None): duration = f"{int(hours):02d}:{int(minutes):02d}:{int(seconds):02d}" raw_template = _get_html_template("repr", "raw.html.jinja") return raw_template.render( - info_repr=self.info._repr_html_(caption=caption), - filenames=basenames, - duration=duration, + info_repr=self.info._repr_html_( + caption=caption, + filenames=basenames, + duration=duration, + ) ) def add_events(self, events, stim_channel=None, replace=False): diff --git a/mne/utils/tests/test_misc.py b/mne/utils/tests/test_misc.py index 14bfa183175..1309feafbec 100644 --- a/mne/utils/tests/test_misc.py +++ b/mne/utils/tests/test_misc.py @@ -24,7 +24,7 @@ def test_html_repr(): os.environ[key] = "True" # HTML repr on info = mne.create_info(10, 256) r = info._repr_html_() - assert r.startswith("") os.environ[key] = "False" # HTML repr off r = info._repr_html_() diff --git a/tutorials/intro/30_info.py b/tutorials/intro/30_info.py index 98e4aab097a..2de72747528 100644 --- a/tutorials/intro/30_info.py +++ b/tutorials/intro/30_info.py @@ -182,7 +182,7 @@ print(mne.pick_info(info, eeg_indices)["nchan"]) # %% -# We can also get a nice HTML representation in IPython like: +# We can also get a nice HTML representation in IPython like this: info diff --git a/tutorials/intro/70_report.py b/tutorials/intro/70_report.py index 0c8c086eff8..b23c8852694 100644 --- a/tutorials/intro/70_report.py +++ b/tutorials/intro/70_report.py @@ -17,7 +17,7 @@ HTML pages it generates are self-contained and do not require a running Python environment. However, it is less flexible as you can't change code and re-run something directly within the browser. This tutorial covers the basics of -building a report. As usual, we'll start by importing the modules and data we +building a report. As usual, we will start by importing the modules and data we need: """ diff --git a/tutorials/raw/10_raw_overview.py b/tutorials/raw/10_raw_overview.py index 0dbb8491d87..198cb5264ee 100644 --- a/tutorials/raw/10_raw_overview.py +++ b/tutorials/raw/10_raw_overview.py @@ -17,7 +17,6 @@ """ # %% - import os import numpy as np import matplotlib.pyplot as plt From d79606beef83e0b77512668ceb4945364113efbc Mon Sep 17 00:00:00 2001 From: Gonzalo Reina <32972801+Gon-reina@users.noreply.github.com> Date: Wed, 4 Oct 2023 14:56:46 +0100 Subject: [PATCH 21/37] Allow not dropping bads when creating or plotting Spectrum objs (#12006) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Daniel McCloy --- doc/changes/names.inc | 2 ++ mne/epochs.py | 3 +++ mne/evoked.py | 3 +++ mne/io/base.py | 3 +++ mne/time_frequency/spectrum.py | 13 ++++++++++++- mne/time_frequency/tests/test_spectrum.py | 12 +++++++++++- mne/utils/docs.py | 11 +++++++---- 7 files changed, 41 insertions(+), 6 deletions(-) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 389583fe616..be6ee47ec01 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -184,6 +184,8 @@ .. _George O'Neill: https://georgeoneill.github.io +.. _Gonzalo Reina: https://greina.me/ + .. _Guillaume Dumas: https://mila.quebec/en/person/guillaume-dumas .. _Guillaume Favelier: https://github.com/GuillaumeFavelier diff --git a/mne/epochs.py b/mne/epochs.py index 7326a58fd23..e66826b6f50 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -2366,6 +2366,7 @@ def compute_psd( picks=None, proj=False, remove_dc=True, + exclude=(), *, n_jobs=1, verbose=None, @@ -2382,6 +2383,7 @@ def compute_psd( %(picks_good_data_noref)s %(proj_psd)s %(remove_dc)s + %(exclude_psd)s %(n_jobs)s %(verbose)s %(method_kw_psd)s @@ -2410,6 +2412,7 @@ def compute_psd( tmin=tmin, tmax=tmax, picks=picks, + exclude=exclude, proj=proj, remove_dc=remove_dc, n_jobs=n_jobs, diff --git a/mne/evoked.py b/mne/evoked.py index b05f9f9880d..63b0a93d8ba 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -1051,6 +1051,7 @@ def compute_psd( picks=None, proj=False, remove_dc=True, + exclude=(), *, n_jobs=1, verbose=None, @@ -1067,6 +1068,7 @@ def compute_psd( %(picks_good_data_noref)s %(proj_psd)s %(remove_dc)s + %(exclude_psd)s %(n_jobs)s %(verbose)s %(method_kw_psd)s @@ -1095,6 +1097,7 @@ def compute_psd( tmin=tmin, tmax=tmax, picks=picks, + exclude=exclude, proj=proj, remove_dc=remove_dc, reject_by_annotation=False, diff --git a/mne/io/base.py b/mne/io/base.py index 3950861fed9..5b1125a8115 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -2136,6 +2136,7 @@ def compute_psd( tmin=None, tmax=None, picks=None, + exclude=(), proj=False, remove_dc=True, reject_by_annotation=True, @@ -2153,6 +2154,7 @@ def compute_psd( %(fmin_fmax_psd)s %(tmin_tmax_psd)s %(picks_good_data_noref)s + %(exclude_psd)s %(proj_psd)s %(remove_dc)s %(reject_by_annotation_psd)s @@ -2184,6 +2186,7 @@ def compute_psd( tmin=tmin, tmax=tmax, picks=picks, + exclude=exclude, proj=proj, remove_dc=remove_dc, reject_by_annotation=reject_by_annotation, diff --git a/mne/time_frequency/spectrum.py b/mne/time_frequency/spectrum.py index c68d34f3fdc..52ca167ee6c 100644 --- a/mne/time_frequency/spectrum.py +++ b/mne/time_frequency/spectrum.py @@ -300,6 +300,7 @@ def __init__( tmin, tmax, picks, + exclude, proj, remove_dc, *, @@ -348,7 +349,9 @@ def __init__( # prep times and picks self._time_mask = _time_mask(inst.times, tmin, tmax, sfreq=self.sfreq) - self._picks = _picks_to_idx(inst.info, picks, "data", with_ref_meg=False) + self._picks = _picks_to_idx( + inst.info, picks, "data", exclude, with_ref_meg=False + ) # add the info object. bads and non-data channels were dropped by # _picks_to_idx() so we update the info accordingly: @@ -1081,6 +1084,7 @@ class Spectrum(BaseSpectrum): %(fmin_fmax_psd)s %(tmin_tmax_psd)s %(picks_good_data_noref)s + %(exclude_psd)s %(proj_psd)s %(remove_dc)s %(reject_by_annotation_psd)s @@ -1122,6 +1126,7 @@ def __init__( tmin, tmax, picks, + exclude, proj, remove_dc, reject_by_annotation, @@ -1145,6 +1150,7 @@ def __init__( tmin, tmax, picks, + exclude, proj, remove_dc, n_jobs=n_jobs, @@ -1290,6 +1296,7 @@ class EpochsSpectrum(BaseSpectrum, GetEpochsMixin): %(fmin_fmax_psd)s %(tmin_tmax_psd)s %(picks_good_data_noref)s + %(exclude_psd)s %(proj_psd)s %(remove_dc)s %(n_jobs)s @@ -1327,6 +1334,7 @@ def __init__( tmin, tmax, picks, + exclude, proj, remove_dc, *, @@ -1347,6 +1355,7 @@ def __init__( tmin, tmax, picks, + exclude, proj, remove_dc, n_jobs=n_jobs, @@ -1459,6 +1468,7 @@ def average(self, method="mean"): tmin=None, tmax=None, picks=None, + exclude=(), proj=None, remove_dc=None, reject_by_annotation=None, @@ -1561,6 +1571,7 @@ def read_spectrum(fname): tmin=None, tmax=None, picks=None, + exclude=(), proj=None, remove_dc=None, reject_by_annotation=None, diff --git a/mne/time_frequency/tests/test_spectrum.py b/mne/time_frequency/tests/test_spectrum.py index 5deac6c46ff..6a296808b20 100644 --- a/mne/time_frequency/tests/test_spectrum.py +++ b/mne/time_frequency/tests/test_spectrum.py @@ -171,6 +171,16 @@ def test_spectrum_reject_by_annot(raw): assert spect_no_annot != spect_reject_annot +def test_spectrum_bads_exclude(raw): + """Test bads are not removed unless exclude="bads".""" + raw.pick("mag") # get rid of IAS channel + spect_no_excld = raw.compute_psd() + spect_with_excld = raw.compute_psd(exclude="bads") + assert raw.info["bads"] == spect_no_excld.info["bads"] + assert spect_with_excld.info["bads"] == [] + assert set(raw.ch_names) - set(spect_with_excld.ch_names) == set(raw.info["bads"]) + + def test_spectrum_getitem_raw(raw_spectrum): """Test Spectrum.__getitem__ for Raw-derived spectra.""" want = raw_spectrum.get_data(slice(1, 3), fmax=7) @@ -280,7 +290,7 @@ def test_spectrum_to_data_frame(inst, request, evoked): extra_dim = () if is_epochs else (1,) extra_cols = ["freq", "condition", "epoch"] if is_epochs else ["freq"] # compute PSD - spectrum = inst if is_already_psd else inst.compute_psd() + spectrum = inst if is_already_psd else inst.compute_psd(exclude="bads") n_epo, n_chan, n_freq = extra_dim + spectrum.get_data().shape # test wide format df_wide = spectrum.to_data_frame() diff --git a/mne/utils/docs.py b/mne/utils/docs.py index d32e1923aa4..64670ed73e9 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1356,12 +1356,15 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): _exclude_spectrum = """\ exclude : list of str | 'bads' Channel names to exclude{}. If ``'bads'``, channels - in ``spectrum.info['bads']`` are excluded; pass an empty list or tuple to - plot all channels (including "bad" channels, if any). + in ``{}info['bads']`` are excluded; pass an empty list to + include all channels (including "bad" channels, if any). """ -docdict["exclude_spectrum_get_data"] = _exclude_spectrum.format("") -docdict["exclude_spectrum_plot"] = _exclude_spectrum.format(" from being drawn") +docdict["exclude_psd"] = _exclude_spectrum.format("", "") +docdict["exclude_spectrum_get_data"] = _exclude_spectrum.format("", "spectrum.") +docdict["exclude_spectrum_plot"] = _exclude_spectrum.format( + " from being drawn", "spectrum." +) docdict[ "export_edf_note" From 905c12c6eac0b6bd67dc88707c32f1f804ae56fd Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Wed, 4 Oct 2023 17:52:52 +0300 Subject: [PATCH 22/37] clean up some deprecations (#12067) --- mne/io/__init__.py | 45 +-------------------- mne/io/tests/test_deprecation.py | 10 ----- mne/source_space/__init__.py | 34 +--------------- mne/source_space/tests/test_source_space.py | 6 --- 4 files changed, 2 insertions(+), 93 deletions(-) diff --git a/mne/io/__init__.py b/mne/io/__init__.py index b6261e9a688..fbf10cb403d 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -7,15 +7,11 @@ import lazy_loader as lazy -__getattr_lz__, __dir__, __all__ = lazy.attach( +__getattr__, __dir__, __all__ = lazy.attach( __name__, submodules=[ "constants", "pick", - # Remove these three in 1.6 along with their .py files - "proj", - "meas_info", - "reference", ], submod_attrs={ "base": ["BaseRaw", "concatenate_raws", "match_channel_orders"], @@ -61,42 +57,3 @@ ], }, ) - - -# Remove in 1.6 and change __getattr_lz__ to __getattr__ -from ..utils import warn as _warn -from .._fiff.reference import ( - set_eeg_reference as _set_eeg_reference, - set_bipolar_reference as _set_bipolar_reference, - add_reference_channels as _add_referenc_channels, -) -from .._fiff.meas_info import Info as _Info - - -def __getattr__(name): - """Try getting attribute from fiff submodule.""" - if name in ( - "set_eeg_reference", - "set_bipolar_reference", - "add_reference_channels", - ): - _warn( - f"mne.io.{name} is deprecated and will be removed in 1.6, " - "use mne.{name} instead", - FutureWarning, - ) - return globals()[f"_{name}"] - elif name == "RawFIF": - _warn( - "RawFIF is deprecated and will be removed in 1.6, use Raw instead", - FutureWarning, - ) - name = "Raw" - elif name == "Info": - _warn( - "mne.io.Info is deprecated and will be removed in 1.6, " - "use mne.Info instead", - FutureWarning, - ) - return _Info - return __getattr_lz__(name) diff --git a/mne/io/tests/test_deprecation.py b/mne/io/tests/test_deprecation.py index c74611c66e3..f69c2d438d7 100644 --- a/mne/io/tests/test_deprecation.py +++ b/mne/io/tests/test_deprecation.py @@ -4,7 +4,6 @@ # # License: BSD-3-Clause -from mne.io import Raw import pytest @@ -28,12 +27,3 @@ def test_deprecation(): with pytest.warns(FutureWarning, match=r"mne\.io\.meas_info\.read_info is dep"): meas_info.read_info - with pytest.warns(FutureWarning, match="RawFIF is deprecated"): - mne.io.RawFIF - with pytest.warns(FutureWarning, match="RawFIF is deprecated"): - from mne.io import RawFIF - assert RawFIF is Raw - with pytest.warns(FutureWarning, match="set_eeg_reference is deprecated"): - mne.io.set_eeg_reference - with pytest.warns(FutureWarning, match=r"use mne\.Info instead"): - mne.io.Info diff --git a/mne/source_space/__init__.py b/mne/source_space/__init__.py index 2b917129ff2..42506025869 100644 --- a/mne/source_space/__init__.py +++ b/mne/source_space/__init__.py @@ -2,7 +2,7 @@ import lazy_loader as lazy -__getattr_lz__, __dir__, __all__ = lazy.attach( +__getattr__, __dir__, __all__ = lazy.attach( __name__, submodules=["_source_space"], submod_attrs={ @@ -20,35 +20,3 @@ ], }, ) - - -from . import _source_space -from ..utils import warn as _warn - - -def __getattr__(name): - msg = out = None - try: - return __getattr_lz__(name) - except AttributeError: - try: - out = getattr(_source_space, name) - except AttributeError: - pass # will raise original error below - else: - # These should be removed (they're in the MNE namespace) - msg = f"mne.source_space.{name} is deprecated and will be removed in 1.6, " - if name in ( - "read_freesurfer_lut", - "get_mni_fiducials", - "get_volume_labels_from_aseg", - "get_volume_labels_from_src", - ): - msg += f"use mne.{name} instead" - else: - msg += "use public API instead" - if out is None: - raise - if msg is not None: - _warn(msg, FutureWarning) - return out diff --git a/mne/source_space/tests/test_source_space.py b/mne/source_space/tests/test_source_space.py index 990b0347bd9..b29751aa08f 100644 --- a/mne/source_space/tests/test_source_space.py +++ b/mne/source_space/tests/test_source_space.py @@ -1020,12 +1020,6 @@ def test_get_decimated_surfaces(src, n, nv): assert_array_equal(np.unique(s["tris"]), np.arange(nv)) -def test_deprecation(): - """Test deprecation of mne.source_space functions.""" - with pytest.warns(FutureWarning, match="use mne.get_volume_labels_from_src"): - mne.source_space.get_volume_labels_from_src - - # The following code was used to generate small-src.fif.gz. # Unfortunately the C code bombs when trying to add source space distances, # possibly due to incomplete "faking" of a smaller surface on our part here. From 05f1fa69fdddc2725bd3099280cd189e96a940f3 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 4 Oct 2023 18:14:36 -0400 Subject: [PATCH 23/37] BUG: Fix bug with sensor_colors (#12068) --- doc/changes/devel.rst | 2 + doc/conf.py | 2 +- mne/defaults.py | 3 + mne/gui/_coreg.py | 7 +- mne/utils/docs.py | 16 ++ mne/viz/_3d.py | 206 ++++++++++++------------- mne/viz/_brain/_brain.py | 6 + mne/viz/backends/_pyvista.py | 23 ++- mne/viz/tests/test_3d.py | 6 + tutorials/clinical/30_ecog.py | 9 +- tutorials/intro/40_sensor_locations.py | 1 - tutorials/intro/50_configure_mne.py | 2 +- 12 files changed, 154 insertions(+), 129 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index a639a64428b..d4532d60721 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -31,6 +31,7 @@ Enhancements - Add the possibility to provide a float between 0 and 1 as ``n_grad``, ``n_mag`` and ``n_eeg`` in `~mne.compute_proj_raw`, `~mne.compute_proj_epochs` and `~mne.compute_proj_evoked` to select the number of vectors based on the cumulative explained variance (:gh:`11919` by `Mathieu Scheltienne`_) - Added support for Artinis fNIRS data files to :func:`mne.io.read_raw_snirf` (:gh:`11926` by `Robert Luke`_) - Add helpful error messages when using methods on empty :class:`mne.Epochs`-objects (:gh:`11306` by `Martin Schulz`_) +- Add support for passing a :class:`python:dict` as ``sensor_color`` to specify per-channel-type colors in :func:`mne.viz.plot_alignment` (:gh:`12067` by `Eric Larson`_) - Add inferring EEGLAB files' montage unit automatically based on estimated head radius using :func:`read_raw_eeglab(..., montage_units="auto") ` (:gh:`11925` by `Jack Zhang`_, :gh:`11951` by `Eric Larson`_) - Add :class:`~mne.time_frequency.EpochsSpectrumArray` and :class:`~mne.time_frequency.SpectrumArray` to support creating power spectra from :class:`NumPy array ` data (:gh:`11803` by `Alex Rockhill`_) - Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) @@ -56,6 +57,7 @@ Bugs - Fix bug with axis clip box boundaries in :func:`mne.viz.plot_evoked_topo` and related functions (:gh:`11999` by `Eric Larson`_) - Fix bug with ``subject_info`` when loading data from and exporting to EDF file (:gh:`11952` by `Paul Roujansky`_) - Fix bug with delayed checking of :class:`info["bads"] ` (:gh:`12038` by `Eric Larson`_) +- Fix bug with :func:`mne.viz.plot_alignment` where ``sensor_colors`` were not handled properly on a per-channel-type basis (:gh:`12067` by `Eric Larson`_) - Fix handling of channel information in annotations when loading data from and exporting to EDF file (:gh:`11960` :gh:`12017` :gh:`12044` by `Paul Roujansky`_) - Add missing ``overwrite`` and ``verbose`` parameters to :meth:`Transform.save() ` (:gh:`12004` by `Marijn van Vliet`_) - Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) diff --git a/doc/conf.py b/doc/conf.py index 68e63396bd4..02d61c1210b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -200,7 +200,7 @@ "path-like": ":term:`path-like`", "array-like": ":term:`array_like `", "Path": ":class:`python:pathlib.Path`", - "bool": ":class:`python:bool`", + "bool": ":ref:`python:typebool`", # Matplotlib "colormap": ":ref:`colormap `", "color": ":doc:`color `", diff --git a/mne/defaults.py b/mne/defaults.py index 498312caa15..3d3b4d45761 100644 --- a/mne/defaults.py +++ b/mne/defaults.py @@ -227,6 +227,7 @@ coreg=dict( mri_fid_opacity=1.0, dig_fid_opacity=1.0, + # go from unit scaling (e.g., unit-radius sphere) to meters mri_fid_scale=5e-3, dig_fid_scale=8e-3, extra_scale=4e-3, @@ -235,6 +236,8 @@ eegp_height=0.1, ecog_scale=5e-3, seeg_scale=5e-3, + meg_scale=1.0, # sensors are already in SI units + ref_meg_scale=1.0, dbs_scale=5e-3, fnirs_scale=5e-3, source_scale=5e-3, diff --git a/mne/gui/_coreg.py b/mne/gui/_coreg.py index c44bd71dd75..e11b61ed898 100644 --- a/mne/gui/_coreg.py +++ b/mne/gui/_coreg.py @@ -835,7 +835,7 @@ def _redraw(self, *, verbose=None): mri_fids=self._add_mri_fiducials, hsp=self._add_head_shape_points, hpi=self._add_hpi_coils, - eeg=self._add_eeg_channels, + eeg=self._add_eeg_fnirs_channels, head_fids=self._add_head_fiducials, helmet=self._add_helmet, ) @@ -1217,7 +1217,7 @@ def _add_head_shape_points(self): hsp_actors = None self._update_actor("head_shape_points", hsp_actors) - def _add_eeg_channels(self): + def _add_eeg_fnirs_channels(self): if self._eeg_channels: eeg = ["original"] picks = pick_types(self._info, eeg=(len(eeg) > 0), fnirs=True) @@ -1240,8 +1240,7 @@ def _add_eeg_channels(self): check_inside=self._check_inside, nearest=self._nearest, ) - sens_actors = actors["eeg"] - sens_actors.extend(actors["fnirs"]) + sens_actors = sum(actors.values(), list()) else: sens_actors = None else: diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 64670ed73e9..9f2bd0b5f83 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -3969,6 +3969,22 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): automatically generated, corresponding to all non-zero events. """ +docdict[ + "sensor_colors" +] = """ +sensor_colors : array-like of color | dict | None + Colors to use for the sensor glyphs. Can be None (default) to use default colors. + A dict should provide the colors (values) for each channel type (keys), e.g.:: + + dict(eeg=eeg_colors) + + Where the value (``eeg_colors`` above) can be broadcast to an array of colors with + length that matches the number of channels of that type, i.e., is compatible with + :func:`matplotlib.colors.to_rgba_array`. A few examples of this for the case above + are the string ``"k"``, a list of ``n_eeg`` color strings, or an NumPy ndarray of + shape ``(n_eeg, 3)`` or ``(n_eeg, 4)``. +""" + docdict[ "sensors_topomap" ] = """ diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index f4aa2b1999c..ce99f2e6352 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -9,6 +9,7 @@ # # License: Simplified BSD +from collections import defaultdict import os import os.path as op import warnings @@ -604,11 +605,10 @@ def plot_alignment( .. versionadded:: 0.16 .. versionchanged:: 1.0 Defaults to ``'terrain'``. - sensor_colors : array-like | None - Colors to use for the sensor glyphs. Can be list-like of color strings - (length ``n_sensors``) or array-like of RGB(A) values (shape - ``(n_sensors, 3)`` or ``(n_sensors, 4)``). ``None`` (the default) uses - the default sensor colors for the :func:`~mne.viz.plot_alignment` GUI. + %(sensor_colors)s + + .. versionchanged:: 1.6 + Support for passing a ``dict`` was added. %(verbose)s Returns @@ -1437,29 +1437,16 @@ def _plot_sensors( sensor_colors=None, ): """Render sensors in a 3D scene.""" + from matplotlib.colors import to_rgba_array + defaults = DEFAULTS["coreg"] ch_pos, sources, detectors = _ch_pos_in_coord_frame( pick_info(info, picks), to_cf_t=to_cf_t, warn_meg=warn_meg ) - actors = dict( - meg=list(), - ref_meg=list(), - eeg=list(), - fnirs=list(), - ecog=list(), - seeg=list(), - dbs=list(), - ) - locs = dict( - eeg=list(), - fnirs=list(), - ecog=list(), - seeg=list(), - source=list(), - detector=list(), - ) - scalar = 1 if units == "m" else 1e3 + actors = defaultdict(lambda: list()) + locs = defaultdict(lambda: list()) + unit_scalar = 1 if units == "m" else 1e3 for ch_name, ch_coord in ch_pos.items(): ch_type = channel_type(info, info.ch_names.index(ch_name)) # for default picking @@ -1471,46 +1458,75 @@ def _plot_sensors( plot_sensors = (ch_type != "fnirs" or "channels" in fnirs) and ( ch_type != "eeg" or "original" in eeg ) - color = defaults[ch_type + "_color"] # plot sensors if isinstance(ch_coord, tuple): # is meg, plot coil - verts, triangles = ch_coord - actor, _ = renderer.surface( - surface=dict(rr=verts * scalar, tris=triangles), - color=color, - opacity=0.25, - backface_culling=True, - ) - actors[ch_type].append(actor) - else: - if plot_sensors: - locs[ch_type].append(ch_coord) + ch_coord = dict(rr=ch_coord[0] * unit_scalar, tris=ch_coord[1]) + if plot_sensors: + locs[ch_type].append(ch_coord) if ch_name in sources and "sources" in fnirs: locs["source"].append(sources[ch_name]) if ch_name in detectors and "detectors" in fnirs: locs["detector"].append(detectors[ch_name]) + # Plot these now if ch_name in sources and ch_name in detectors and "pairs" in fnirs: actor, _ = renderer.tube( # array of origin and dest points - origin=sources[ch_name][np.newaxis] * scalar, - destination=detectors[ch_name][np.newaxis] * scalar, - radius=0.001 * scalar, + origin=sources[ch_name][np.newaxis] * unit_scalar, + destination=detectors[ch_name][np.newaxis] * unit_scalar, + radius=0.001 * unit_scalar, ) actors[ch_type].append(actor) + del ch_type - # add sensors - for sensor_type in locs.keys(): - if len(locs[sensor_type]) > 0: - sens_loc = np.array(locs[sensor_type]) - sens_loc = sens_loc[~np.isnan(sens_loc).any(axis=1)] - scale = defaults[sensor_type + "_scale"] - if sensor_colors is None: - color = defaults[sensor_type + "_color"] + # now actually plot the sensors + extra = "" + types = (dict, None) + if len(locs) == 0: + return + elif len(locs) == 1: + # Upsample from array-like to dict when there is one channel type + extra = "(or array-like since only one sensor type is plotted)" + if sensor_colors is not None and not isinstance(sensor_colors, dict): + sensor_colors = { + list(locs)[0]: to_rgba_array(sensor_colors), + } + else: + extra = f"when more than one channel type ({list(locs)}) is plotted" + _validate_type(sensor_colors, types, "sensor_colors", extra=extra) + del extra, types + if sensor_colors is None: + sensor_colors = dict() + assert isinstance(sensor_colors, dict) + for ch_type, sens_loc in locs.items(): + assert len(sens_loc) # should be guaranteed above + colors = to_rgba_array(sensor_colors.get(ch_type, defaults[ch_type + "_color"])) + _check_option( + f"len(sensor_colors[{repr(ch_type)}])", + colors.shape[0], + (len(sens_loc), 1), + ) + scale = defaults[ch_type + "_scale"] * unit_scalar + if isinstance(sens_loc[0], dict): # meg coil + if len(colors) == 1: + colors = [colors[0]] * len(sens_loc) + for surface, color in zip(sens_loc, colors): + actor, _ = renderer.surface( + surface=surface, + color=color[:3], + opacity=sensor_opacity * color[3], + backface_culling=False, # visible from all sides + ) + actors[ch_type].append(actor) + else: + sens_loc = np.array(sens_loc, float) + mask = ~np.isnan(sens_loc).any(axis=1) + if len(colors) == 1: + # Single color mode (one actor) actor, _ = _plot_glyphs( renderer=renderer, - loc=sens_loc * scalar, - color=color, - scale=scale * scalar, - opacity=sensor_opacity, + loc=sens_loc[mask] * unit_scalar, + color=colors[0, :3], + scale=scale, + opacity=sensor_opacity * colors[0, 3], orient_glyphs=orient_glyphs, scale_by_distance=scale_by_distance, project_points=project_points, @@ -1518,31 +1534,18 @@ def _plot_sensors( check_inside=check_inside, nearest=nearest, ) - if sensor_type in ("source", "detector"): - sensor_type = "fnirs" - actors[sensor_type].append(actor) + actors[ch_type].append(actor) else: - actor_list = [] - for idx_sen in range(sens_loc.shape[0]): - sensor_colors = np.asarray(sensor_colors) - if ( - sensor_colors.ndim not in (1, 2) - or sensor_colors.shape[0] != sens_loc.shape[0] - ): - raise ValueError( - "sensor_colors should either be None or be " - "array-like with shape (n_sensors,) or " - "(n_sensors, 3) or (n_sensors, 4). Got shape " - f"{sensor_colors.shape}." - ) - color = sensor_colors[idx_sen] - + # Multi-color mode (multiple actors) + for loc, color, usable in zip(sens_loc, colors, mask): + if not usable: + continue actor, _ = _plot_glyphs( renderer=renderer, - loc=(sens_loc * scalar)[idx_sen, :], - color=color, - scale=scale * scalar, - opacity=sensor_opacity, + loc=loc * unit_scalar, + color=color[:3], + scale=scale, + opacity=sensor_opacity * color[3], orient_glyphs=orient_glyphs, scale_by_distance=scale_by_distance, project_points=project_points, @@ -1550,40 +1553,31 @@ def _plot_sensors( check_inside=check_inside, nearest=nearest, ) - actor_list.append(actor) - if sensor_type in ("source", "detector"): - sensor_type = "fnirs" - actors[sensor_type].append(actor_list) - - # add projected eeg - eeg_indices = pick_types(info, eeg=True) - if eeg_indices.size > 0 and "projected" in eeg: - logger.info("Projecting sensors to the head surface") - eeg_loc = np.array([ch_pos[info.ch_names[idx]] for idx in eeg_indices]) - eeg_loc = eeg_loc[~np.isnan(eeg_loc).any(axis=1)] - eegp_loc, eegp_nn = _project_onto_surface( - eeg_loc, head_surf, project_rrs=True, return_nn=True - )[2:4] - del eeg_loc - eegp_loc *= scalar - scale = defaults["eegp_scale"] * scalar - actor, _ = renderer.quiver3d( - x=eegp_loc[:, 0], - y=eegp_loc[:, 1], - z=eegp_loc[:, 2], - u=eegp_nn[:, 0], - v=eegp_nn[:, 1], - w=eegp_nn[:, 2], - color=defaults["eegp_color"], - mode="cylinder", - scale=scale, - opacity=0.6, - glyph_height=defaults["eegp_height"], - glyph_center=(0.0, -defaults["eegp_height"] / 2.0, 0), - glyph_resolution=20, - backface_culling=True, - ) - actors["eeg"].append(actor) + actors[ch_type].append(actor) + if ch_type == "eeg" and "projected" in eeg: + logger.info("Projecting sensors to the head surface") + eegp_loc, eegp_nn = _project_onto_surface( + sens_loc[mask], head_surf, project_rrs=True, return_nn=True + )[2:4] + eegp_loc *= unit_scalar + actor, _ = renderer.quiver3d( + x=eegp_loc[:, 0], + y=eegp_loc[:, 1], + z=eegp_loc[:, 2], + u=eegp_nn[:, 0], + v=eegp_nn[:, 1], + w=eegp_nn[:, 2], + color=defaults["eegp_color"], + mode="cylinder", + scale=defaults["eegp_scale"] * unit_scalar, + opacity=0.6, + glyph_height=defaults["eegp_height"], + glyph_center=(0.0, -defaults["eegp_height"] / 2.0, 0), + glyph_resolution=20, + backface_culling=True, + ) + actors["eeg"].append(actor) + actors = dict(actors) # get rid of defaultdict return actors diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index 49d4bbcc45a..f4d3a90eb0a 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -2763,6 +2763,8 @@ def add_sensors( seeg=True, dbs=True, max_dist=0.004, + *, + sensor_colors=None, verbose=None, ): """Add mesh objects to represent sensor positions. @@ -2778,6 +2780,9 @@ def add_sensors( %(seeg)s %(dbs)s %(max_dist_ieeg)s + %(sensor_colors)s + + .. versionadded:: 1.6 %(verbose)s Notes @@ -2832,6 +2837,7 @@ def add_sensors( warn_meg, head_surf, self._units, + sensor_colors=sensor_colors, ) for item, actors in sensors_actors.items(): for actor in actors: diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index bad105d36e2..700ff9e6870 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -373,17 +373,18 @@ def polydata( polygon_offset=None, **kwargs, ): + from matplotlib.colors import to_rgba_array + with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) rgba = False - if color is not None and len(color) == mesh.n_points: - if color.shape[1] == 3: - scalars = np.c_[color, np.ones(mesh.n_points)] - else: - scalars = color - scalars = (scalars * 255).astype("ubyte") - color = None - rgba = True + if color is not None: + # See if we need to convert or not + check_color = to_rgba_array(color) + if len(check_color) == mesh.n_points: + scalars = (check_color * 255).astype("ubyte") + color = None + rgba = True if isinstance(colormap, np.ndarray): if colormap.dtype == np.uint8: colormap = colormap.astype(np.float64) / 255.0 @@ -395,24 +396,22 @@ def polydata( mesh.GetPointData().SetActiveNormals("Normals") else: _compute_normals(mesh) - if "rgba" in kwargs: - rgba = kwargs["rgba"] - kwargs.pop("rgba") smooth_shading = self.smooth_shading if representation == "wireframe": smooth_shading = False # never use smooth shading for wf + rgba = kwargs.pop("rgba", rgba) actor = _add_mesh( plotter=self.plotter, mesh=mesh, color=color, scalars=scalars, edge_color=color, - rgba=rgba, opacity=opacity, cmap=colormap, backface_culling=backface_culling, rng=[vmin, vmax], show_scalar_bar=False, + rgba=rgba, smooth_shading=smooth_shading, interpolate_before_map=interpolate_before_map, style=representation, diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 1a769aef2c3..f7993111543 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -278,8 +278,13 @@ def test_plot_alignment_meg(renderer, system): this_info = read_raw_kit(sqd_fname).info meg = ["helmet", "sensors"] + sensor_colors = "k" # should be upsampled to correct shape if system == "KIT": meg.append("ref") + with pytest.raises(TypeError, match="instance of dict"): + plot_alignment(this_info, meg=meg, sensor_colors=sensor_colors) + sensor_colors = dict(meg=sensor_colors) + sensor_colors["ref_meg"] = ["r"] * len(pick_types(this_info, ref_meg=True)) fig = plot_alignment( this_info, read_trans(trans_fname), @@ -287,6 +292,7 @@ def test_plot_alignment_meg(renderer, system): subjects_dir=subjects_dir, meg=meg, eeg=False, + sensor_colors=sensor_colors, ) assert isinstance(fig, Figure3D) # count the number of objects: should be n_meg_ch + 1 (helmet) + 1 (head) diff --git a/tutorials/clinical/30_ecog.py b/tutorials/clinical/30_ecog.py index e839b45365b..b97b44c1036 100644 --- a/tutorials/clinical/30_ecog.py +++ b/tutorials/clinical/30_ecog.py @@ -133,9 +133,9 @@ subjects_dir=subjects_dir, surfaces=["pial"], coord_frame="head", - sensor_colors=None, + sensor_colors=(1.0, 1.0, 1.0, 0.5), ) -mne.viz.set_3d_view(fig, azimuth=0, elevation=70) +mne.viz.set_3d_view(fig, azimuth=0, elevation=70, focalpoint="auto", distance="auto") xy, im = snapshot_brain_montage(fig, raw.info) @@ -165,7 +165,8 @@ gamma_power_at_15s -= gamma_power_at_15s.min() gamma_power_at_15s /= gamma_power_at_15s.max() rgba = colormaps.get_cmap("viridis") -sensor_colors = gamma_power_at_15s.map(rgba).tolist() +sensor_colors = np.array(gamma_power_at_15s.map(rgba).tolist(), float) +sensor_colors[:, 3] = 0.5 fig = plot_alignment( raw.info, @@ -177,7 +178,7 @@ sensor_colors=sensor_colors, ) -mne.viz.set_3d_view(fig, azimuth=0, elevation=70) +mne.viz.set_3d_view(fig, azimuth=0, elevation=70, focalpoint="auto", distance="auto") xy, im = snapshot_brain_montage(fig, raw.info) diff --git a/tutorials/intro/40_sensor_locations.py b/tutorials/intro/40_sensor_locations.py index 0ef663fa810..86fefe1bb80 100644 --- a/tutorials/intro/40_sensor_locations.py +++ b/tutorials/intro/40_sensor_locations.py @@ -9,7 +9,6 @@ MNE-Python handles physical locations of sensors. As usual we'll start by importing the modules we need: """ - # %% from pathlib import Path diff --git a/tutorials/intro/50_configure_mne.py b/tutorials/intro/50_configure_mne.py index b4ce508e87f..9c5d9d02f99 100644 --- a/tutorials/intro/50_configure_mne.py +++ b/tutorials/intro/50_configure_mne.py @@ -34,7 +34,7 @@ # %% # Note that the string values read from the JSON file are not parsed in any # way, so :func:`~mne.get_config` returns a string even for true/false config -# values, rather than a Python :ref:`boolean `. +# values, rather than a Python :ref:`boolean `. # Similarly, :func:`~mne.set_config` will only set string values (or ``None`` # values, to unset a variable): From 81b7ddfe3fc33f83ba7d472a6a67e714850d7c73 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Thu, 5 Oct 2023 09:23:37 +0300 Subject: [PATCH 24/37] handle lazy loading through .pyi type stubs (#12072) Co-authored-by: Eric Larson --- MANIFEST.in | 1 + mne/__init__.py | 275 +---------- mne/__init__.pyi | 426 ++++++++++++++++++ mne/beamformer/__init__.py | 33 +- mne/beamformer/__init__.pyi | 34 ++ mne/channels/__init__.py | 48 +- mne/channels/__init__.pyi | 76 ++++ mne/commands/__init__.py | 7 +- mne/commands/__init__.pyi | 2 + mne/datasets/__init__.py | 43 +- mne/datasets/__init__.pyi | 72 +++ mne/decoding/__init__.py | 24 +- mne/decoding/__init__.pyi | 40 ++ mne/export/__init__.py | 10 +- mne/export/__init__.pyi | 3 + mne/forward/__init__.py | 55 +-- mne/forward/__init__.pyi | 86 ++++ mne/gui/__init__.py | 7 +- mne/gui/__init__.pyi | 2 + mne/html_templates/__init__.py | 7 +- mne/html_templates/__init__.pyi | 2 + mne/inverse_sparse/__init__.py | 11 +- mne/inverse_sparse/__init__.pyi | 3 + mne/io/__init__.py | 53 +-- mne/io/__init__.pyi | 87 ++++ mne/minimum_norm/__init__.py | 35 +- mne/minimum_norm/__init__.pyi | 50 ++ mne/preprocessing/__init__.py | 52 +-- mne/preprocessing/__init__.pyi | 91 ++++ mne/report/__init__.py | 9 +- mne/report/__init__.pyi | 2 + mne/simulation/__init__.py | 16 +- mne/simulation/__init__.pyi | 22 + mne/source_space/__init__.py | 20 +- mne/source_space/__init__.pyi | 22 + mne/stats/__init__.py | 32 +- mne/stats/__init__.pyi | 42 ++ mne/time_frequency/__init__.py | 55 +-- mne/time_frequency/__init__.pyi | 73 +++ mne/utils/__init__.py | 207 +-------- mne/utils/__init__.pyi | 384 ++++++++++++++++ mne/viz/__init__.py | 107 +---- mne/viz/__init__.pyi | 178 ++++++++ tools/{dev_reports => dev}/Makefile | 0 .../check_steering_committee.py | 0 tools/dev/generate_pyi_files.py | 76 ++++ .../unacknowledged-bug-reports.jq | 0 47 files changed, 1796 insertions(+), 1084 deletions(-) create mode 100644 mne/__init__.pyi create mode 100644 mne/beamformer/__init__.pyi create mode 100644 mne/channels/__init__.pyi create mode 100644 mne/commands/__init__.pyi create mode 100644 mne/datasets/__init__.pyi create mode 100644 mne/decoding/__init__.pyi create mode 100644 mne/export/__init__.pyi create mode 100644 mne/forward/__init__.pyi create mode 100644 mne/gui/__init__.pyi create mode 100644 mne/html_templates/__init__.pyi create mode 100644 mne/inverse_sparse/__init__.pyi create mode 100644 mne/io/__init__.pyi create mode 100644 mne/minimum_norm/__init__.pyi create mode 100644 mne/preprocessing/__init__.pyi create mode 100644 mne/report/__init__.pyi create mode 100644 mne/simulation/__init__.pyi create mode 100644 mne/source_space/__init__.pyi create mode 100644 mne/stats/__init__.pyi create mode 100644 mne/time_frequency/__init__.pyi create mode 100644 mne/utils/__init__.pyi create mode 100644 mne/viz/__init__.pyi rename tools/{dev_reports => dev}/Makefile (100%) rename tools/{dev_reports => dev}/check_steering_committee.py (100%) create mode 100644 tools/dev/generate_pyi_files.py rename tools/{dev_reports => dev}/unacknowledged-bug-reports.jq (100%) diff --git a/MANIFEST.in b/MANIFEST.in index 612153d940d..9e12649b5fe 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -15,6 +15,7 @@ recursive-include tutorials *.py recursive-include tutorials *.txt recursive-include mne *.py +recursive-include mne *.pyi recursive-include mne/data * recursive-include mne/icons * recursive-include mne/data/helmets * diff --git a/mne/__init__.py b/mne/__init__.py index 97cbedb6de5..d1c53c9eccf 100644 --- a/mne/__init__.py +++ b/mne/__init__.py @@ -1,5 +1,4 @@ """MNE software for MEG and EEG data analysis.""" - # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # @@ -26,279 +25,7 @@ from ._version import __version__ except ImportError: __version__ = "0.0.0" - - -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[ - "beamformer", - "channels", - "chpi", - "commands", - "coreg", - "cuda", - "datasets", - "decoding", - "defaults", - "dipole", - "epochs", - "event", - "export", - "filter", - "forward", - "gui", - "inverse_sparse", - "io", - "minimum_norm", - "preprocessing", - "report", - "source_space", - "simulation", - "stats", - "surface", - "time_frequency", - "viz", - ], - submod_attrs={ - "_freesurfer": [ - "get_volume_labels_from_aseg", - "head_to_mni", - "head_to_mri", - "read_freesurfer_lut", - "read_lta", - "read_talxfm", - "vertex_to_mni", - ], - "annotations": [ - "Annotations", - "annotations_from_events", - "count_annotations", - "events_from_annotations", - "read_annotations", - ], - "bem": [ - "make_bem_model", - "make_bem_solution", - "make_sphere_model", - "read_bem_solution", - "read_bem_surfaces", - "write_bem_solution", - "write_bem_surfaces", - "write_head_bem", - ], - "channels": [ - "equalize_channels", - "rename_channels", - "find_layout", - "read_vectorview_selection", - ], - "coreg": [ - "create_default_subject", - "scale_bem", - "scale_labels", - "scale_mri", - "scale_source_space", - ], - "cov": [ - "Covariance", - "compute_covariance", - "compute_raw_covariance", - "make_ad_hoc_cov", - "read_cov", - "whiten_evoked", - "write_cov", - ], - "dipole": [ - "Dipole", - "DipoleFixed", - "fit_dipole", - "read_dipole", - ], - "epochs": [ - "BaseEpochs", - "Epochs", - "EpochsArray", - "concatenate_epochs", - "make_fixed_length_epochs", - "read_epochs", - ], - "event": [ - "AcqParserFIF", - "concatenate_events", - "count_events", - "find_events", - "find_stim_steps", - "make_fixed_length_events", - "merge_events", - "pick_events", - "read_events", - "write_events", - ], - "evoked": [ - "Evoked", - "EvokedArray", - "combine_evoked", - "read_evokeds", - "write_evokeds", - ], - "forward": [ - "Forward", - "apply_forward_raw", - "apply_forward", - "average_forward_solutions", - "convert_forward_solution", - "make_field_map", - "make_forward_dipole", - "make_forward_solution", - "read_forward_solution", - "use_coil_def", - "write_forward_solution", - ], - "io": [ - "read_epochs_fieldtrip", - "read_evoked_besa", - "read_evoked_fieldtrip", - "read_evokeds_mff", - ], - "io.base": [ - "concatenate_raws", - "match_channel_orders", - ], - "io.eeglab": [ - "read_epochs_eeglab", - ], - "io.kit": [ - "read_epochs_kit", - ], - "_fiff.meas_info": [ - "Info", - "create_info", - ], - "_fiff.pick": [ - "channel_indices_by_type", - "channel_type", - "pick_channels_cov", - "pick_channels_forward", - "pick_channels_regexp", - "pick_channels", - "pick_info", - "pick_types_forward", - "pick_types", - ], - "_fiff.proj": [ - "Projection", - ], - "_fiff.reference": [ - "add_reference_channels", - "set_bipolar_reference", - "set_eeg_reference", - ], - "_fiff.what": [ - "what", - ], - "label": [ - "BiHemiLabel", - "grow_labels", - "label_sign_flip", - "Label", - "labels_to_stc", - "morph_labels", - "random_parcellation", - "read_label", - "read_labels_from_annot", - "split_label", - "stc_to_label", - "write_label", - "write_labels_to_annot", - ], - "misc": [ - "parse_config", - "read_reject_parameters", - ], - "morph_map": [ - "read_morph_map", - ], - "morph": [ - "SourceMorph", - "compute_source_morph", - "grade_to_vertices", - "read_source_morph", - ], - "proj": [ - "compute_proj_epochs", - "compute_proj_evoked", - "compute_proj_raw", - "read_proj", - "sensitivity_map", - "write_proj", - ], - "rank": [ - "compute_rank", - ], - "report": [ - "Report", - "open_report", - ], - "source_estimate": [ - "MixedSourceEstimate", - "MixedVectorSourceEstimate", - "SourceEstimate", - "VectorSourceEstimate", - "VolSourceEstimate", - "VolVectorSourceEstimate", - "extract_label_time_course", - "grade_to_tris", - "read_source_estimate", - "spatial_dist_adjacency", - "spatial_inter_hemi_adjacency", - "spatial_src_adjacency", - "spatial_tris_adjacency", - "spatio_temporal_dist_adjacency", - "spatio_temporal_src_adjacency", - "spatio_temporal_tris_adjacency", - "stc_near_sensors", - ], - "source_space._source_space": [ - "SourceSpaces", - "add_source_space_distances", - "get_volume_labels_from_src", - "morph_source_spaces", - "read_source_spaces", - "setup_source_space", - "setup_volume_source_space", - "write_source_spaces", - ], - "surface": [ - "decimate_surface", - "dig_mri_distances", - "get_head_surf", - "get_meg_helmet_surf", - "get_montage_volume_labels", - "read_surface", - "read_tri", - "write_surface", - ], - "transforms": [ - "Transform", - "read_trans", - "transform_surface_to", - "write_trans", - ], - "utils": [ - "get_config_path", - "get_config", - "grand_average", - "open_docs", - "set_cache_dir", - "set_config", - "set_memmap_min_size", - "sys_info", - "use_log_level", - "verbose", - ], - }, -) - +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) # initialize logging from .utils import set_log_level, set_log_file diff --git a/mne/__init__.pyi b/mne/__init__.pyi new file mode 100644 index 00000000000..5492a3dfdb5 --- /dev/null +++ b/mne/__init__.pyi @@ -0,0 +1,426 @@ +__all__ = [ + "AcqParserFIF", + "Annotations", + "BaseEpochs", + "BiHemiLabel", + "Covariance", + "Dipole", + "DipoleFixed", + "Epochs", + "EpochsArray", + "Evoked", + "EvokedArray", + "Forward", + "Info", + "Label", + "MixedSourceEstimate", + "MixedVectorSourceEstimate", + "Projection", + "Report", + "SourceEstimate", + "SourceMorph", + "SourceSpaces", + "Transform", + "VectorSourceEstimate", + "VolSourceEstimate", + "VolVectorSourceEstimate", + "add_reference_channels", + "add_source_space_distances", + "annotations_from_events", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "beamformer", + "channel_indices_by_type", + "channel_type", + "channels", + "chpi", + "combine_evoked", + "commands", + "compute_covariance", + "compute_proj_epochs", + "compute_proj_evoked", + "compute_proj_raw", + "compute_rank", + "compute_raw_covariance", + "compute_source_morph", + "concatenate_epochs", + "concatenate_events", + "concatenate_raws", + "convert_forward_solution", + "coreg", + "count_annotations", + "count_events", + "create_default_subject", + "create_info", + "cuda", + "datasets", + "decimate_surface", + "decoding", + "defaults", + "dig_mri_distances", + "dipole", + "epochs", + "equalize_channels", + "event", + "events_from_annotations", + "export", + "extract_label_time_course", + "filter", + "find_events", + "find_layout", + "find_stim_steps", + "fit_dipole", + "forward", + "get_config", + "get_config_path", + "get_head_surf", + "get_meg_helmet_surf", + "get_montage_volume_labels", + "get_volume_labels_from_aseg", + "get_volume_labels_from_src", + "grade_to_tris", + "grade_to_vertices", + "grand_average", + "grow_labels", + "gui", + "head_to_mni", + "head_to_mri", + "inverse_sparse", + "io", + "label_sign_flip", + "labels_to_stc", + "make_ad_hoc_cov", + "make_bem_model", + "make_bem_solution", + "make_field_map", + "make_fixed_length_epochs", + "make_fixed_length_events", + "make_forward_dipole", + "make_forward_solution", + "make_sphere_model", + "match_channel_orders", + "merge_events", + "minimum_norm", + "morph_labels", + "morph_source_spaces", + "open_docs", + "open_report", + "parse_config", + "pick_channels", + "pick_channels_cov", + "pick_channels_forward", + "pick_channels_regexp", + "pick_events", + "pick_info", + "pick_types", + "pick_types_forward", + "preprocessing", + "random_parcellation", + "read_annotations", + "read_bem_solution", + "read_bem_surfaces", + "read_cov", + "read_dipole", + "read_epochs", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_events", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds", + "read_evokeds_mff", + "read_forward_solution", + "read_freesurfer_lut", + "read_label", + "read_labels_from_annot", + "read_lta", + "read_morph_map", + "read_proj", + "read_reject_parameters", + "read_source_estimate", + "read_source_morph", + "read_source_spaces", + "read_surface", + "read_talxfm", + "read_trans", + "read_tri", + "read_vectorview_selection", + "rename_channels", + "report", + "scale_bem", + "scale_labels", + "scale_mri", + "scale_source_space", + "sensitivity_map", + "set_bipolar_reference", + "set_cache_dir", + "set_config", + "set_eeg_reference", + "set_memmap_min_size", + "setup_source_space", + "setup_volume_source_space", + "simulation", + "source_space", + "spatial_dist_adjacency", + "spatial_inter_hemi_adjacency", + "spatial_src_adjacency", + "spatial_tris_adjacency", + "spatio_temporal_dist_adjacency", + "spatio_temporal_src_adjacency", + "spatio_temporal_tris_adjacency", + "split_label", + "stats", + "stc_near_sensors", + "stc_to_label", + "surface", + "sys_info", + "time_frequency", + "transform_surface_to", + "use_coil_def", + "use_log_level", + "verbose", + "vertex_to_mni", + "viz", + "what", + "whiten_evoked", + "write_bem_solution", + "write_bem_surfaces", + "write_cov", + "write_events", + "write_evokeds", + "write_forward_solution", + "write_head_bem", + "write_label", + "write_labels_to_annot", + "write_proj", + "write_source_spaces", + "write_surface", + "write_trans", +] +from . import ( + beamformer, + channels, + chpi, + commands, + coreg, + cuda, + datasets, + decoding, + defaults, + dipole, + epochs, + event, + export, + filter, + forward, + gui, + inverse_sparse, + io, + minimum_norm, + preprocessing, + report, + source_space, + simulation, + stats, + surface, + time_frequency, + viz, +) +from ._freesurfer import ( + get_volume_labels_from_aseg, + head_to_mni, + head_to_mri, + read_freesurfer_lut, + read_lta, + read_talxfm, + vertex_to_mni, +) +from .annotations import ( + Annotations, + annotations_from_events, + count_annotations, + events_from_annotations, + read_annotations, +) +from .bem import ( + make_bem_model, + make_bem_solution, + make_sphere_model, + read_bem_solution, + read_bem_surfaces, + write_bem_solution, + write_bem_surfaces, + write_head_bem, +) +from .channels import ( + equalize_channels, + rename_channels, + find_layout, + read_vectorview_selection, +) +from .coreg import ( + create_default_subject, + scale_bem, + scale_labels, + scale_mri, + scale_source_space, +) +from .cov import ( + Covariance, + compute_covariance, + compute_raw_covariance, + make_ad_hoc_cov, + read_cov, + whiten_evoked, + write_cov, +) +from .dipole import Dipole, DipoleFixed, fit_dipole, read_dipole +from .epochs import ( + BaseEpochs, + Epochs, + EpochsArray, + concatenate_epochs, + make_fixed_length_epochs, + read_epochs, +) +from .event import ( + AcqParserFIF, + concatenate_events, + count_events, + find_events, + find_stim_steps, + make_fixed_length_events, + merge_events, + pick_events, + read_events, + write_events, +) +from .evoked import Evoked, EvokedArray, combine_evoked, read_evokeds, write_evokeds +from .forward import ( + Forward, + apply_forward_raw, + apply_forward, + average_forward_solutions, + convert_forward_solution, + make_field_map, + make_forward_dipole, + make_forward_solution, + read_forward_solution, + use_coil_def, + write_forward_solution, +) +from .io import ( + read_epochs_fieldtrip, + read_evoked_besa, + read_evoked_fieldtrip, + read_evokeds_mff, +) +from .io.base import concatenate_raws, match_channel_orders +from .io.eeglab import read_epochs_eeglab +from .io.kit import read_epochs_kit +from ._fiff.meas_info import Info, create_info +from ._fiff.pick import ( + channel_indices_by_type, + channel_type, + pick_channels_cov, + pick_channels_forward, + pick_channels_regexp, + pick_channels, + pick_info, + pick_types_forward, + pick_types, +) +from ._fiff.proj import Projection +from ._fiff.reference import ( + add_reference_channels, + set_bipolar_reference, + set_eeg_reference, +) +from ._fiff.what import what +from .label import ( + BiHemiLabel, + grow_labels, + label_sign_flip, + Label, + labels_to_stc, + morph_labels, + random_parcellation, + read_label, + read_labels_from_annot, + split_label, + stc_to_label, + write_label, + write_labels_to_annot, +) +from .misc import parse_config, read_reject_parameters +from .morph_map import read_morph_map +from .morph import ( + SourceMorph, + compute_source_morph, + grade_to_vertices, + read_source_morph, +) +from .proj import ( + compute_proj_epochs, + compute_proj_evoked, + compute_proj_raw, + read_proj, + sensitivity_map, + write_proj, +) +from .rank import compute_rank +from .report import Report, open_report +from .source_estimate import ( + MixedSourceEstimate, + MixedVectorSourceEstimate, + SourceEstimate, + VectorSourceEstimate, + VolSourceEstimate, + VolVectorSourceEstimate, + extract_label_time_course, + grade_to_tris, + read_source_estimate, + spatial_dist_adjacency, + spatial_inter_hemi_adjacency, + spatial_src_adjacency, + spatial_tris_adjacency, + spatio_temporal_dist_adjacency, + spatio_temporal_src_adjacency, + spatio_temporal_tris_adjacency, + stc_near_sensors, +) +from .source_space._source_space import ( + SourceSpaces, + add_source_space_distances, + get_volume_labels_from_src, + morph_source_spaces, + read_source_spaces, + setup_source_space, + setup_volume_source_space, + write_source_spaces, +) +from .surface import ( + decimate_surface, + dig_mri_distances, + get_head_surf, + get_meg_helmet_surf, + get_montage_volume_labels, + read_surface, + read_tri, + write_surface, +) +from .transforms import Transform, read_trans, transform_surface_to, write_trans +from .utils import ( + get_config_path, + get_config, + grand_average, + open_docs, + set_cache_dir, + set_config, + set_memmap_min_size, + sys_info, + use_log_level, + verbose, +) diff --git a/mne/beamformer/__init__.py b/mne/beamformer/__init__.py index fec74e7b229..25c2156e7d0 100644 --- a/mne/beamformer/__init__.py +++ b/mne/beamformer/__init__.py @@ -1,35 +1,4 @@ """Beamformers for source localization.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "_lcmv": [ - "make_lcmv", - "apply_lcmv", - "apply_lcmv_epochs", - "apply_lcmv_raw", - "apply_lcmv_cov", - ], - "_dics": [ - "make_dics", - "apply_dics", - "apply_dics_epochs", - "apply_dics_tfr_epochs", - "apply_dics_csd", - ], - "_rap_music": [ - "rap_music", - "trap_music", - ], - "_compute_beamformer": [ - "Beamformer", - "read_beamformer", - ], - "resolution_matrix": [ - "make_lcmv_resolution_matrix", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/beamformer/__init__.pyi b/mne/beamformer/__init__.pyi new file mode 100644 index 00000000000..68129e9f081 --- /dev/null +++ b/mne/beamformer/__init__.pyi @@ -0,0 +1,34 @@ +__all__ = [ + "Beamformer", + "apply_dics", + "apply_dics_csd", + "apply_dics_epochs", + "apply_dics_tfr_epochs", + "apply_lcmv", + "apply_lcmv_cov", + "apply_lcmv_epochs", + "apply_lcmv_raw", + "make_dics", + "make_lcmv", + "make_lcmv_resolution_matrix", + "rap_music", + "read_beamformer", + "trap_music", +] +from ._lcmv import ( + make_lcmv, + apply_lcmv, + apply_lcmv_epochs, + apply_lcmv_raw, + apply_lcmv_cov, +) +from ._dics import ( + make_dics, + apply_dics, + apply_dics_epochs, + apply_dics_tfr_epochs, + apply_dics_csd, +) +from ._rap_music import rap_music, trap_music +from ._compute_beamformer import Beamformer, read_beamformer +from .resolution_matrix import make_lcmv_resolution_matrix diff --git a/mne/channels/__init__.py b/mne/channels/__init__.py index 3591d7aeeb4..6c63c47525c 100644 --- a/mne/channels/__init__.py +++ b/mne/channels/__init__.py @@ -2,52 +2,6 @@ Can be used for setting of sensor locations used for processing and plotting. """ - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "channels": [ - "equalize_channels", - "rename_channels", - "fix_mag_coil_types", - "read_ch_adjacency", - "find_ch_adjacency", - "make_1020_channel_selections", - "combine_channels", - "read_vectorview_selection", - "_SELECTIONS", - "_EEG_SELECTIONS", - "_divide_to_regions", - "get_builtin_ch_adjacencies", - "unify_bad_channels", - ], - "layout": [ - "Layout", - "make_eeg_layout", - "make_grid_layout", - "read_layout", - "find_layout", - "generate_2d_layout", - ], - "montage": [ - "DigMontage", - "get_builtin_montages", - "make_dig_montage", - "read_dig_dat", - "read_dig_egi", - "read_dig_captrak", - "read_dig_fif", - "read_dig_polhemus_isotrak", - "read_polhemus_fastscan", - "compute_dev_head_t", - "make_standard_montage", - "read_custom_montage", - "read_dig_hpts", - "read_dig_localite", - "compute_native_head_t", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/channels/__init__.pyi b/mne/channels/__init__.pyi new file mode 100644 index 00000000000..bb9c2f96413 --- /dev/null +++ b/mne/channels/__init__.pyi @@ -0,0 +1,76 @@ +__all__ = [ + "DigMontage", + "Layout", + "_EEG_SELECTIONS", + "_SELECTIONS", + "_divide_to_regions", + "combine_channels", + "compute_dev_head_t", + "compute_native_head_t", + "equalize_channels", + "find_ch_adjacency", + "find_layout", + "fix_mag_coil_types", + "generate_2d_layout", + "get_builtin_ch_adjacencies", + "get_builtin_montages", + "make_1020_channel_selections", + "make_dig_montage", + "make_eeg_layout", + "make_grid_layout", + "make_standard_montage", + "read_ch_adjacency", + "read_custom_montage", + "read_dig_captrak", + "read_dig_dat", + "read_dig_egi", + "read_dig_fif", + "read_dig_hpts", + "read_dig_localite", + "read_dig_polhemus_isotrak", + "read_layout", + "read_polhemus_fastscan", + "read_vectorview_selection", + "rename_channels", + "unify_bad_channels", +] +from .channels import ( + equalize_channels, + rename_channels, + fix_mag_coil_types, + read_ch_adjacency, + find_ch_adjacency, + make_1020_channel_selections, + combine_channels, + read_vectorview_selection, + _SELECTIONS, + _EEG_SELECTIONS, + _divide_to_regions, + get_builtin_ch_adjacencies, + unify_bad_channels, +) +from .layout import ( + Layout, + make_eeg_layout, + make_grid_layout, + read_layout, + find_layout, + generate_2d_layout, +) +from .montage import ( + DigMontage, + get_builtin_montages, + make_dig_montage, + read_dig_dat, + read_dig_egi, + read_dig_captrak, + read_dig_fif, + read_dig_polhemus_isotrak, + read_polhemus_fastscan, + compute_dev_head_t, + make_standard_montage, + read_custom_montage, + read_dig_hpts, + read_dig_localite, + compute_native_head_t, +) diff --git a/mne/commands/__init__.py b/mne/commands/__init__.py index 813e2309557..11b3610605d 100644 --- a/mne/commands/__init__.py +++ b/mne/commands/__init__.py @@ -1,9 +1,4 @@ """Command-line utilities.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["utils"], - submod_attrs={}, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/commands/__init__.pyi b/mne/commands/__init__.pyi new file mode 100644 index 00000000000..c0caf04abbb --- /dev/null +++ b/mne/commands/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["utils"] +from . import utils diff --git a/mne/datasets/__init__.py b/mne/datasets/__init__.py index 1ff3a0316d2..5c15b0c69d7 100644 --- a/mne/datasets/__init__.py +++ b/mne/datasets/__init__.py @@ -4,45 +4,4 @@ """ import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[ - "fieldtrip_cmc", - "brainstorm", - "visual_92_categories", - "kiloword", - "eegbci", - "hf_sef", - "misc", - "mtrf", - "sample", - "somato", - "multimodal", - "fnirs_motor", - "opm", - "spm_face", - "testing", - "_fake", - "phantom_4dbti", - "sleep_physionet", - "limo", - "refmeg_noise", - "ssvep", - "erp_core", - "epilepsy_ecog", - "eyelink", - "ucl_opm_auditory", - ], - submod_attrs={ - "_fetch": ["fetch_dataset"], - "_fsaverage.base": ["fetch_fsaverage"], - "_infant.base": ["fetch_infant_template"], - "_phantom.base": ["fetch_phantom"], - "utils": [ - "_download_all_example_data", - "fetch_hcp_mmp_parcellation", - "fetch_aparc_sub_parcellation", - "has_dataset", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/datasets/__init__.pyi b/mne/datasets/__init__.pyi new file mode 100644 index 00000000000..96964148d3e --- /dev/null +++ b/mne/datasets/__init__.pyi @@ -0,0 +1,72 @@ +__all__ = [ + "_download_all_example_data", + "_fake", + "brainstorm", + "eegbci", + "epilepsy_ecog", + "erp_core", + "eyelink", + "fetch_aparc_sub_parcellation", + "fetch_dataset", + "fetch_fsaverage", + "fetch_hcp_mmp_parcellation", + "fetch_infant_template", + "fetch_phantom", + "fieldtrip_cmc", + "fnirs_motor", + "has_dataset", + "hf_sef", + "kiloword", + "limo", + "misc", + "mtrf", + "multimodal", + "opm", + "phantom_4dbti", + "refmeg_noise", + "sample", + "sleep_physionet", + "somato", + "spm_face", + "ssvep", + "testing", + "ucl_opm_auditory", + "visual_92_categories", +] +from . import ( + fieldtrip_cmc, + brainstorm, + visual_92_categories, + kiloword, + eegbci, + hf_sef, + misc, + mtrf, + sample, + somato, + multimodal, + fnirs_motor, + opm, + spm_face, + testing, + _fake, + phantom_4dbti, + sleep_physionet, + limo, + refmeg_noise, + ssvep, + erp_core, + epilepsy_ecog, + eyelink, + ucl_opm_auditory, +) +from ._fetch import fetch_dataset +from ._fsaverage.base import fetch_fsaverage +from ._infant.base import fetch_infant_template +from ._phantom.base import fetch_phantom +from .utils import ( + _download_all_example_data, + fetch_hcp_mmp_parcellation, + fetch_aparc_sub_parcellation, + has_dataset, +) diff --git a/mne/decoding/__init__.py b/mne/decoding/__init__.py index 8f83e7875fb..ea140501e26 100644 --- a/mne/decoding/__init__.py +++ b/mne/decoding/__init__.py @@ -1,26 +1,4 @@ """Decoding and encoding, including machine learning and receptive fields.""" import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "base": ["BaseEstimator", "LinearModel", "cross_val_multiscore", "get_coef"], - "csp": ["CSP", "SPoC"], - "ems": ["EMS", "compute_ems"], - "mixin": ["TransformerMixin"], - "receptive_field": ["ReceptiveField"], - "search_light": ["GeneralizingEstimator", "SlidingEstimator"], - "ssd": ["SSD"], - "time_delaying_ridge": ["TimeDelayingRidge"], - "time_frequency": ["TimeFrequency"], - "transformer": [ - "FilterEstimator", - "PSDEstimator", - "Scaler", - "TemporalFilter", - "UnsupervisedSpatialFilter", - "Vectorizer", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/decoding/__init__.pyi b/mne/decoding/__init__.pyi new file mode 100644 index 00000000000..4c37a6bc496 --- /dev/null +++ b/mne/decoding/__init__.pyi @@ -0,0 +1,40 @@ +__all__ = [ + "BaseEstimator", + "CSP", + "EMS", + "FilterEstimator", + "GeneralizingEstimator", + "LinearModel", + "PSDEstimator", + "ReceptiveField", + "SPoC", + "SSD", + "Scaler", + "SlidingEstimator", + "TemporalFilter", + "TimeDelayingRidge", + "TimeFrequency", + "TransformerMixin", + "UnsupervisedSpatialFilter", + "Vectorizer", + "compute_ems", + "cross_val_multiscore", + "get_coef", +] +from .base import BaseEstimator, LinearModel, cross_val_multiscore, get_coef +from .csp import CSP, SPoC +from .ems import EMS, compute_ems +from .mixin import TransformerMixin +from .receptive_field import ReceptiveField +from .search_light import GeneralizingEstimator, SlidingEstimator +from .ssd import SSD +from .time_delaying_ridge import TimeDelayingRidge +from .time_frequency import TimeFrequency +from .transformer import ( + FilterEstimator, + PSDEstimator, + Scaler, + TemporalFilter, + UnsupervisedSpatialFilter, + Vectorizer, +) diff --git a/mne/export/__init__.py b/mne/export/__init__.py index ffaee713afa..eb213750708 100644 --- a/mne/export/__init__.py +++ b/mne/export/__init__.py @@ -1,12 +1,4 @@ """Functions for exporting data to non-FIF formats.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "_export": ["export_raw", "export_epochs", "export_evokeds"], - "_egimff": ["export_evokeds_mff"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/export/__init__.pyi b/mne/export/__init__.pyi new file mode 100644 index 00000000000..e5376ed94e3 --- /dev/null +++ b/mne/export/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["export_epochs", "export_evokeds", "export_evokeds_mff", "export_raw"] +from ._export import export_raw, export_epochs, export_evokeds +from ._egimff import export_evokeds_mff diff --git a/mne/forward/__init__.py b/mne/forward/__init__.py index 7738ca51090..4b16f0ab55d 100644 --- a/mne/forward/__init__.py +++ b/mne/forward/__init__.py @@ -1,55 +1,4 @@ """Forward modeling code.""" +import lazy_loader as lazy # for testing purposes -import lazy_loader as lazy - -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["_lead_dots"], # for testing purposes - submod_attrs={ - "forward": [ - "Forward", - "read_forward_solution", - "write_forward_solution", - "is_fixed_orient", - "_read_forward_meas_info", - "_select_orient_forward", - "compute_orient_prior", - "compute_depth_prior", - "apply_forward", - "apply_forward_raw", - "restrict_forward_to_stc", - "restrict_forward_to_label", - "average_forward_solutions", - "_stc_src_sel", - "_fill_measurement_info", - "_apply_forward", - "_subject_from_forward", - "convert_forward_solution", - "_merge_fwds", - "_do_forward_solution", - ], - "_compute_forward": [ - "_magnetic_dipole_field_vec", - "_compute_forwards", - "_concatenate_coils", - ], - "_field_interpolation": [ - "_make_surface_mapping", - "make_field_map", - "_as_meg_type_inst", - "_map_meg_or_eeg_channels", - ], - "_make_forward": [ - "make_forward_solution", - "_prepare_for_forward", - "_prep_meg_channels", - "_prep_eeg_channels", - "_to_forward_dict", - "_create_meg_coils", - "_read_coil_defs", - "_transform_orig_meg_coils", - "make_forward_dipole", - "use_coil_def", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/forward/__init__.pyi b/mne/forward/__init__.pyi new file mode 100644 index 00000000000..36d35b913ca --- /dev/null +++ b/mne/forward/__init__.pyi @@ -0,0 +1,86 @@ +__all__ = [ + "Forward", + "_apply_forward", + "_as_meg_type_inst", + "_compute_forwards", + "_concatenate_coils", + "_create_meg_coils", + "_do_forward_solution", + "_fill_measurement_info", + "_lead_dots", + "_magnetic_dipole_field_vec", + "_make_surface_mapping", + "_map_meg_or_eeg_channels", + "_merge_fwds", + "_prep_eeg_channels", + "_prep_meg_channels", + "_prepare_for_forward", + "_read_coil_defs", + "_read_forward_meas_info", + "_select_orient_forward", + "_stc_src_sel", + "_subject_from_forward", + "_to_forward_dict", + "_transform_orig_meg_coils", + "apply_forward", + "apply_forward_raw", + "average_forward_solutions", + "compute_depth_prior", + "compute_orient_prior", + "convert_forward_solution", + "is_fixed_orient", + "make_field_map", + "make_forward_dipole", + "make_forward_solution", + "read_forward_solution", + "restrict_forward_to_label", + "restrict_forward_to_stc", + "use_coil_def", + "write_forward_solution", +] +from . import _lead_dots +from .forward import ( + Forward, + read_forward_solution, + write_forward_solution, + is_fixed_orient, + _read_forward_meas_info, + _select_orient_forward, + compute_orient_prior, + compute_depth_prior, + apply_forward, + apply_forward_raw, + restrict_forward_to_stc, + restrict_forward_to_label, + average_forward_solutions, + _stc_src_sel, + _fill_measurement_info, + _apply_forward, + _subject_from_forward, + convert_forward_solution, + _merge_fwds, + _do_forward_solution, +) +from ._compute_forward import ( + _magnetic_dipole_field_vec, + _compute_forwards, + _concatenate_coils, +) +from ._field_interpolation import ( + _make_surface_mapping, + make_field_map, + _as_meg_type_inst, + _map_meg_or_eeg_channels, +) +from ._make_forward import ( + make_forward_solution, + _prepare_for_forward, + _prep_meg_channels, + _prep_eeg_channels, + _to_forward_dict, + _create_meg_coils, + _read_coil_defs, + _transform_orig_meg_coils, + make_forward_dipole, + use_coil_def, +) diff --git a/mne/gui/__init__.py b/mne/gui/__init__.py index c3a5b9aada5..3f7a393c4ac 100644 --- a/mne/gui/__init__.py +++ b/mne/gui/__init__.py @@ -1,9 +1,4 @@ """Convenience functions for opening GUIs.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={"_gui": ["coregistration", "_GUIScraper"]}, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/gui/__init__.pyi b/mne/gui/__init__.pyi new file mode 100644 index 00000000000..77a7310e4ad --- /dev/null +++ b/mne/gui/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_GUIScraper", "coregistration"] +from ._gui import coregistration, _GUIScraper diff --git a/mne/html_templates/__init__.py b/mne/html_templates/__init__.py index 22690df3076..851c785db58 100644 --- a/mne/html_templates/__init__.py +++ b/mne/html_templates/__init__.py @@ -1,9 +1,4 @@ """Jinja2 HTML templates.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={"_templates": ["_get_html_template"]}, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/html_templates/__init__.pyi b/mne/html_templates/__init__.pyi new file mode 100644 index 00000000000..2312227d990 --- /dev/null +++ b/mne/html_templates/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["_get_html_template"] +from ._templates import _get_html_template diff --git a/mne/inverse_sparse/__init__.py b/mne/inverse_sparse/__init__.py index 4310e1d224a..134b2f3496c 100644 --- a/mne/inverse_sparse/__init__.py +++ b/mne/inverse_sparse/__init__.py @@ -1,16 +1,7 @@ """Non-Linear sparse inverse solvers.""" - # Author: Alexandre Gramfort # # License: Simplified BSD - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "mxne_inverse": ["mixed_norm", "tf_mixed_norm", "make_stc_from_dipoles"], - "_gamma_map": ["gamma_map"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/inverse_sparse/__init__.pyi b/mne/inverse_sparse/__init__.pyi new file mode 100644 index 00000000000..5cd096d92c1 --- /dev/null +++ b/mne/inverse_sparse/__init__.pyi @@ -0,0 +1,3 @@ +__all__ = ["gamma_map", "make_stc_from_dipoles", "mixed_norm", "tf_mixed_norm"] +from .mxne_inverse import mixed_norm, tf_mixed_norm, make_stc_from_dipoles +from ._gamma_map import gamma_map diff --git a/mne/io/__init__.py b/mne/io/__init__.py index fbf10cb403d..ba7f3113794 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -1,59 +1,8 @@ """IO module for reading raw data.""" - # Authors: Alexandre Gramfort # Matti Hämäläinen # # License: BSD-3-Clause - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[ - "constants", - "pick", - ], - submod_attrs={ - "base": ["BaseRaw", "concatenate_raws", "match_channel_orders"], - "array": ["RawArray"], - "besa": ["read_evoked_besa"], - "brainvision": ["read_raw_brainvision"], - "bti": ["read_raw_bti"], - "cnt": ["read_raw_cnt"], - "ctf": ["read_raw_ctf"], - "curry": ["read_raw_curry"], - "edf": ["read_raw_edf", "read_raw_bdf", "read_raw_gdf"], - "egi": ["read_raw_egi", "read_evokeds_mff"], - "kit": ["read_raw_kit", "read_epochs_kit"], - "fiff": ["read_raw_fif", "Raw"], - "fil": ["read_raw_fil"], - "nedf": ["read_raw_nedf"], - "nicolet": ["read_raw_nicolet"], - "artemis123": ["read_raw_artemis123"], - "eeglab": ["read_raw_eeglab", "read_epochs_eeglab"], - "eximia": ["read_raw_eximia"], - "hitachi": ["read_raw_hitachi"], - "nirx": ["read_raw_nirx"], - "boxy": ["read_raw_boxy"], - "snirf": ["read_raw_snirf"], - "persyst": ["read_raw_persyst"], - "fieldtrip": [ - "read_raw_fieldtrip", - "read_epochs_fieldtrip", - "read_evoked_fieldtrip", - ], - "nihon": ["read_raw_nihon"], - "nsx": ["read_raw_nsx"], - "_read_raw": ["read_raw"], - "eyelink": ["read_raw_eyelink"], - "_fiff_wrap": [ - "read_info", - "write_info", - "anonymize_info", - "read_fiducials", - "write_fiducials", - "show_fiff", - "get_channel_type_constants", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/io/__init__.pyi b/mne/io/__init__.pyi new file mode 100644 index 00000000000..e7fba58667a --- /dev/null +++ b/mne/io/__init__.pyi @@ -0,0 +1,87 @@ +__all__ = [ + "BaseRaw", + "Raw", + "RawArray", + "anonymize_info", + "concatenate_raws", + "constants", + "get_channel_type_constants", + "match_channel_orders", + "pick", + "read_epochs_eeglab", + "read_epochs_fieldtrip", + "read_epochs_kit", + "read_evoked_besa", + "read_evoked_fieldtrip", + "read_evokeds_mff", + "read_fiducials", + "read_info", + "read_raw", + "read_raw_artemis123", + "read_raw_bdf", + "read_raw_boxy", + "read_raw_brainvision", + "read_raw_bti", + "read_raw_cnt", + "read_raw_ctf", + "read_raw_curry", + "read_raw_edf", + "read_raw_eeglab", + "read_raw_egi", + "read_raw_eximia", + "read_raw_eyelink", + "read_raw_fieldtrip", + "read_raw_fif", + "read_raw_fil", + "read_raw_gdf", + "read_raw_hitachi", + "read_raw_kit", + "read_raw_nedf", + "read_raw_nicolet", + "read_raw_nihon", + "read_raw_nirx", + "read_raw_nsx", + "read_raw_persyst", + "read_raw_snirf", + "show_fiff", + "write_fiducials", + "write_info", +] +from . import constants, pick +from .base import BaseRaw, concatenate_raws, match_channel_orders +from .array import RawArray +from .besa import read_evoked_besa +from .brainvision import read_raw_brainvision +from .bti import read_raw_bti +from .cnt import read_raw_cnt +from .ctf import read_raw_ctf +from .curry import read_raw_curry +from .edf import read_raw_edf, read_raw_bdf, read_raw_gdf +from .egi import read_raw_egi, read_evokeds_mff +from .kit import read_raw_kit, read_epochs_kit +from .fiff import read_raw_fif, Raw +from .fil import read_raw_fil +from .nedf import read_raw_nedf +from .nicolet import read_raw_nicolet +from .artemis123 import read_raw_artemis123 +from .eeglab import read_raw_eeglab, read_epochs_eeglab +from .eximia import read_raw_eximia +from .hitachi import read_raw_hitachi +from .nirx import read_raw_nirx +from .boxy import read_raw_boxy +from .snirf import read_raw_snirf +from .persyst import read_raw_persyst +from .fieldtrip import read_raw_fieldtrip, read_epochs_fieldtrip, read_evoked_fieldtrip +from .nihon import read_raw_nihon +from .nsx import read_raw_nsx +from ._read_raw import read_raw +from .eyelink import read_raw_eyelink +from ._fiff_wrap import ( + read_info, + write_info, + anonymize_info, + read_fiducials, + write_fiducials, + show_fiff, + get_channel_type_constants, +) diff --git a/mne/minimum_norm/__init__.py b/mne/minimum_norm/__init__.py index 16d942fabaf..cb1308d9768 100644 --- a/mne/minimum_norm/__init__.py +++ b/mne/minimum_norm/__init__.py @@ -1,37 +1,4 @@ """Linear inverse solvers based on L2 Minimum Norm Estimates (MNE).""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "inverse": [ - "InverseOperator", - "read_inverse_operator", - "apply_inverse", - "apply_inverse_raw", - "make_inverse_operator", - "apply_inverse_epochs", - "apply_inverse_tfr_epochs", - "write_inverse_operator", - "compute_rank_inverse", - "prepare_inverse_operator", - "estimate_snr", - "apply_inverse_cov", - "INVERSE_METHODS", - ], - "time_frequency": [ - "source_band_induced_power", - "source_induced_power", - "compute_source_psd", - "compute_source_psd_epochs", - ], - "resolution_matrix": [ - "make_inverse_resolution_matrix", - "get_point_spread", - "get_cross_talk", - ], - "spatial_resolution": ["resolution_metrics"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/minimum_norm/__init__.pyi b/mne/minimum_norm/__init__.pyi new file mode 100644 index 00000000000..bf8e3fc90ca --- /dev/null +++ b/mne/minimum_norm/__init__.pyi @@ -0,0 +1,50 @@ +__all__ = [ + "INVERSE_METHODS", + "InverseOperator", + "apply_inverse", + "apply_inverse_cov", + "apply_inverse_epochs", + "apply_inverse_raw", + "apply_inverse_tfr_epochs", + "compute_rank_inverse", + "compute_source_psd", + "compute_source_psd_epochs", + "estimate_snr", + "get_cross_talk", + "get_point_spread", + "make_inverse_operator", + "make_inverse_resolution_matrix", + "prepare_inverse_operator", + "read_inverse_operator", + "resolution_metrics", + "source_band_induced_power", + "source_induced_power", + "write_inverse_operator", +] +from .inverse import ( + InverseOperator, + read_inverse_operator, + apply_inverse, + apply_inverse_raw, + make_inverse_operator, + apply_inverse_epochs, + apply_inverse_tfr_epochs, + write_inverse_operator, + compute_rank_inverse, + prepare_inverse_operator, + estimate_snr, + apply_inverse_cov, + INVERSE_METHODS, +) +from .time_frequency import ( + source_band_induced_power, + source_induced_power, + compute_source_psd, + compute_source_psd_epochs, +) +from .resolution_matrix import ( + make_inverse_resolution_matrix, + get_point_spread, + get_cross_talk, +) +from .spatial_resolution import resolution_metrics diff --git a/mne/preprocessing/__init__.py b/mne/preprocessing/__init__.py index 372df010e8c..8a32cd31332 100644 --- a/mne/preprocessing/__init__.py +++ b/mne/preprocessing/__init__.py @@ -1,60 +1,10 @@ """Preprocessing with artifact detection, SSP, and ICA.""" - # Authors: Alexandre Gramfort # Matti Hämäläinen # Martin Luessi # Denis Engemann # # License: BSD-3-Clause - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["eyetracking", "ieeg", "nirs"], - submod_attrs={ - "_annotate_amplitude": ["annotate_amplitude"], - "maxfilter": ["apply_maxfilter"], - "ssp": ["compute_proj_ecg", "compute_proj_eog"], - "eog": ["find_eog_events", "create_eog_epochs"], - "ecg": ["find_ecg_events", "create_ecg_epochs"], - "ica": [ - "ICA", - "ica_find_eog_events", - "ica_find_ecg_events", - "get_score_funcs", - "read_ica", - "corrmap", - "read_ica_eeglab", - ], - "otp": ["oversampled_temporal_projection"], - "_peak_finder": ["peak_finder"], - "infomax_": ["infomax"], - "stim": ["fix_stim_artifact"], - "maxwell": [ - "maxwell_filter", - "find_bad_channels_maxwell", - "compute_maxwell_basis", - "maxwell_filter_prepare_emptyroom", - ], - "realign": ["realign_raw"], - "xdawn": ["Xdawn"], - "_csd": ["compute_current_source_density", "compute_bridged_electrodes"], - "artifact_detection": [ - "annotate_movement", - "compute_average_dev_head_t", - "annotate_muscle_zscore", - "annotate_break", - ], - "_regress": ["regress_artifact", "EOGRegression", "read_eog_regression"], - "_fine_cal": [ - "compute_fine_calibration", - "read_fine_calibration", - "write_fine_calibration", - ], - "_annotate_nan": ["annotate_nan"], - "interpolate": ["equalize_bads", "interpolate_bridged_electrodes"], - "_css": ["cortical_signal_suppression"], - "hfc": ["compute_proj_hfc"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/preprocessing/__init__.pyi b/mne/preprocessing/__init__.pyi new file mode 100644 index 00000000000..bd4dd7f44c4 --- /dev/null +++ b/mne/preprocessing/__init__.pyi @@ -0,0 +1,91 @@ +__all__ = [ + "EOGRegression", + "ICA", + "Xdawn", + "annotate_amplitude", + "annotate_break", + "annotate_movement", + "annotate_muscle_zscore", + "annotate_nan", + "apply_maxfilter", + "compute_average_dev_head_t", + "compute_bridged_electrodes", + "compute_current_source_density", + "compute_fine_calibration", + "compute_maxwell_basis", + "compute_proj_ecg", + "compute_proj_eog", + "compute_proj_hfc", + "corrmap", + "cortical_signal_suppression", + "create_ecg_epochs", + "create_eog_epochs", + "equalize_bads", + "eyetracking", + "find_bad_channels_maxwell", + "find_ecg_events", + "find_eog_events", + "fix_stim_artifact", + "get_score_funcs", + "ica_find_ecg_events", + "ica_find_eog_events", + "ieeg", + "infomax", + "interpolate_bridged_electrodes", + "maxwell_filter", + "maxwell_filter_prepare_emptyroom", + "nirs", + "oversampled_temporal_projection", + "peak_finder", + "read_eog_regression", + "read_fine_calibration", + "read_ica", + "read_ica_eeglab", + "realign_raw", + "regress_artifact", + "write_fine_calibration", +] +from . import eyetracking, ieeg, nirs +from ._annotate_amplitude import annotate_amplitude +from .maxfilter import apply_maxfilter +from .ssp import compute_proj_ecg, compute_proj_eog +from .eog import find_eog_events, create_eog_epochs +from .ecg import find_ecg_events, create_ecg_epochs +from .ica import ( + ICA, + ica_find_eog_events, + ica_find_ecg_events, + get_score_funcs, + read_ica, + corrmap, + read_ica_eeglab, +) +from .otp import oversampled_temporal_projection +from ._peak_finder import peak_finder +from .infomax_ import infomax +from .stim import fix_stim_artifact +from .maxwell import ( + maxwell_filter, + find_bad_channels_maxwell, + compute_maxwell_basis, + maxwell_filter_prepare_emptyroom, +) +from .realign import realign_raw +from .xdawn import Xdawn +from ._csd import compute_current_source_density, compute_bridged_electrodes +from .artifact_detection import ( + annotate_movement, + compute_average_dev_head_t, + annotate_muscle_zscore, + annotate_break, +) +from ._regress import regress_artifact, EOGRegression, read_eog_regression +from ._fine_cal import ( + compute_fine_calibration, + read_fine_calibration, + write_fine_calibration, +) +from ._annotate_nan import annotate_nan +from .interpolate import equalize_bads, interpolate_bridged_electrodes +from ._css import cortical_signal_suppression +from .hfc import compute_proj_hfc diff --git a/mne/report/__init__.py b/mne/report/__init__.py index c5ae0eaadd2..0037b496551 100644 --- a/mne/report/__init__.py +++ b/mne/report/__init__.py @@ -1,11 +1,4 @@ """Report-generation functions and classes.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "report": ["Report", "open_report", "_ReportScraper"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/report/__init__.pyi b/mne/report/__init__.pyi new file mode 100644 index 00000000000..5f62e1eafbf --- /dev/null +++ b/mne/report/__init__.pyi @@ -0,0 +1,2 @@ +__all__ = ["Report", "_ReportScraper", "open_report"] +from .report import Report, open_report, _ReportScraper diff --git a/mne/simulation/__init__.py b/mne/simulation/__init__.py index 045d563273c..cfefe8658ac 100644 --- a/mne/simulation/__init__.py +++ b/mne/simulation/__init__.py @@ -1,18 +1,4 @@ """Data simulation code.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["metrics"], - submod_attrs={ - "evoked": ["simulate_evoked", "add_noise"], - "raw": ["simulate_raw", "add_ecg", "add_eog", "add_chpi"], - "source": [ - "select_source_in_label", - "simulate_stc", - "simulate_sparse_stc", - "SourceSimulator", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/simulation/__init__.pyi b/mne/simulation/__init__.pyi new file mode 100644 index 00000000000..1a49e48a882 --- /dev/null +++ b/mne/simulation/__init__.pyi @@ -0,0 +1,22 @@ +__all__ = [ + "SourceSimulator", + "add_chpi", + "add_ecg", + "add_eog", + "add_noise", + "metrics", + "select_source_in_label", + "simulate_evoked", + "simulate_raw", + "simulate_sparse_stc", + "simulate_stc", +] +from . import metrics +from .evoked import simulate_evoked, add_noise +from .raw import simulate_raw, add_ecg, add_eog, add_chpi +from .source import ( + select_source_in_label, + simulate_stc, + simulate_sparse_stc, + SourceSimulator, +) diff --git a/mne/source_space/__init__.py b/mne/source_space/__init__.py index 42506025869..eca8b7f74c9 100644 --- a/mne/source_space/__init__.py +++ b/mne/source_space/__init__.py @@ -1,22 +1,4 @@ """Forward modeling code.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["_source_space"], - submod_attrs={ - "_source_space": [ - "compute_distance_to_sensors", - "get_decimated_surfaces", - # These are documented in the MNE namespace but it doesn't hurt to - # keep them here as well - "SourceSpaces", - "read_source_spaces", - "write_source_spaces", - "setup_source_space", - "setup_volume_source_space", - "add_source_space_distances", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/source_space/__init__.pyi b/mne/source_space/__init__.pyi new file mode 100644 index 00000000000..fab90945882 --- /dev/null +++ b/mne/source_space/__init__.pyi @@ -0,0 +1,22 @@ +__all__ = [ + "SourceSpaces", + "_source_space", + "add_source_space_distances", + "compute_distance_to_sensors", + "get_decimated_surfaces", + "read_source_spaces", + "setup_source_space", + "setup_volume_source_space", + "write_source_spaces", +] +from . import _source_space +from ._source_space import ( + compute_distance_to_sensors, + get_decimated_surfaces, + SourceSpaces, + read_source_spaces, + write_source_spaces, + setup_source_space, + setup_volume_source_space, + add_source_space_distances, +) diff --git a/mne/stats/__init__.py b/mne/stats/__init__.py index 9a1ed38518e..7c4f1454a9b 100644 --- a/mne/stats/__init__.py +++ b/mne/stats/__init__.py @@ -1,34 +1,4 @@ """Functions for statistical analysis.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "parametric": [ - "f_threshold_mway_rm", - "f_mway_rm", - "f_oneway", - "_parametric_ci", - "ttest_1samp_no_p", - "ttest_ind_no_p", - ], - "permutations": [ - "permutation_t_test", - "_ci", - "bootstrap_confidence_interval", - ], - "cluster_level": [ - "permutation_cluster_test", - "permutation_cluster_1samp_test", - "spatio_temporal_cluster_test", - "spatio_temporal_cluster_1samp_test", - "_st_mask_from_s_inds", - "summarize_clusters_stc", - ], - "multi_comp": ["fdr_correction", "bonferroni_correction"], - "regression": ["linear_regression", "linear_regression_raw"], - "_adjacency": ["combine_adjacency"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/stats/__init__.pyi b/mne/stats/__init__.pyi new file mode 100644 index 00000000000..ac47d6c3680 --- /dev/null +++ b/mne/stats/__init__.pyi @@ -0,0 +1,42 @@ +__all__ = [ + "_ci", + "_parametric_ci", + "_st_mask_from_s_inds", + "bonferroni_correction", + "bootstrap_confidence_interval", + "combine_adjacency", + "f_mway_rm", + "f_oneway", + "f_threshold_mway_rm", + "fdr_correction", + "linear_regression", + "linear_regression_raw", + "permutation_cluster_1samp_test", + "permutation_cluster_test", + "permutation_t_test", + "spatio_temporal_cluster_1samp_test", + "spatio_temporal_cluster_test", + "summarize_clusters_stc", + "ttest_1samp_no_p", + "ttest_ind_no_p", +] +from .parametric import ( + f_threshold_mway_rm, + f_mway_rm, + f_oneway, + _parametric_ci, + ttest_1samp_no_p, + ttest_ind_no_p, +) +from .permutations import permutation_t_test, _ci, bootstrap_confidence_interval +from .cluster_level import ( + permutation_cluster_test, + permutation_cluster_1samp_test, + spatio_temporal_cluster_test, + spatio_temporal_cluster_1samp_test, + _st_mask_from_s_inds, + summarize_clusters_stc, +) +from .multi_comp import fdr_correction, bonferroni_correction +from .regression import linear_regression, linear_regression_raw +from ._adjacency import combine_adjacency diff --git a/mne/time_frequency/__init__.py b/mne/time_frequency/__init__.py index 8f245bee7f6..c95662bbc26 100644 --- a/mne/time_frequency/__init__.py +++ b/mne/time_frequency/__init__.py @@ -1,57 +1,4 @@ """Time frequency analysis tools.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "_stft": [ - "istft", - "stft", - "stftfreq", - ], - "_stockwell": [ - "tfr_array_stockwell", - "tfr_stockwell", - ], - "ar": ["fit_iir_model_raw"], - "csd": [ - "CrossSpectralDensity", - "csd_array_fourier", - "csd_array_morlet", - "csd_array_multitaper", - "csd_fourier", - "csd_morlet", - "csd_multitaper", - "csd_tfr", - "pick_channels_csd", - "read_csd", - ], - "multitaper": [ - "dpss_windows", - "psd_array_multitaper", - "tfr_array_multitaper", - ], - "psd": ["psd_array_welch"], - "spectrum": [ - "EpochsSpectrum", - "EpochsSpectrumArray", - "Spectrum", - "SpectrumArray", - "read_spectrum", - ], - "tfr": [ - "_BaseTFR", - "AverageTFR", - "EpochsTFR", - "fwhm", - "morlet", - "read_tfrs", - "tfr_array_morlet", - "tfr_morlet", - "tfr_multitaper", - "write_tfrs", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/time_frequency/__init__.pyi b/mne/time_frequency/__init__.pyi new file mode 100644 index 00000000000..258fa5ef96c --- /dev/null +++ b/mne/time_frequency/__init__.pyi @@ -0,0 +1,73 @@ +__all__ = [ + "AverageTFR", + "CrossSpectralDensity", + "EpochsSpectrum", + "EpochsSpectrumArray", + "EpochsTFR", + "Spectrum", + "SpectrumArray", + "_BaseTFR", + "csd_array_fourier", + "csd_array_morlet", + "csd_array_multitaper", + "csd_fourier", + "csd_morlet", + "csd_multitaper", + "csd_tfr", + "dpss_windows", + "fit_iir_model_raw", + "fwhm", + "istft", + "morlet", + "pick_channels_csd", + "psd_array_multitaper", + "psd_array_welch", + "read_csd", + "read_spectrum", + "read_tfrs", + "stft", + "stftfreq", + "tfr_array_morlet", + "tfr_array_multitaper", + "tfr_array_stockwell", + "tfr_morlet", + "tfr_multitaper", + "tfr_stockwell", + "write_tfrs", +] +from ._stft import istft, stft, stftfreq +from ._stockwell import tfr_array_stockwell, tfr_stockwell +from .ar import fit_iir_model_raw +from .csd import ( + CrossSpectralDensity, + csd_array_fourier, + csd_array_morlet, + csd_array_multitaper, + csd_fourier, + csd_morlet, + csd_multitaper, + csd_tfr, + pick_channels_csd, + read_csd, +) +from .multitaper import dpss_windows, psd_array_multitaper, tfr_array_multitaper +from .psd import psd_array_welch +from .spectrum import ( + EpochsSpectrum, + EpochsSpectrumArray, + Spectrum, + SpectrumArray, + read_spectrum, +) +from .tfr import ( + _BaseTFR, + AverageTFR, + EpochsTFR, + fwhm, + morlet, + read_tfrs, + tfr_array_morlet, + tfr_morlet, + tfr_multitaper, + write_tfrs, +) diff --git a/mne/utils/__init__.py b/mne/utils/__init__.py index 76e52f378fe..f84944aae4a 100644 --- a/mne/utils/__init__.py +++ b/mne/utils/__init__.py @@ -1,208 +1,3 @@ import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=[], - submod_attrs={ - "_bunch": ["Bunch", "BunchConst", "BunchConstNamed"], - "check": [ - "check_fname", - "check_version", - "check_random_state", - "_check_fname", - "_check_subject", - "_check_pandas_installed", - "_check_pandas_index_arguments", - "_check_event_id", - "_check_ch_locs", - "_check_compensation_grade", - "_check_if_nan", - "_is_numeric", - "_ensure_int", - "_check_integer_or_list", - "_check_preload", - "_validate_type", - "_check_range", - "_check_info_inv", - "_check_channels_spatial_filter", - "_check_one_ch_type", - "_check_rank", - "_check_option", - "_check_depth", - "_check_combine", - "_path_like", - "_check_src_normal", - "_check_stc_units", - "_check_qt_version", - "_check_sphere", - "_check_time_format", - "_check_freesurfer_home", - "_suggest", - "_require_version", - "_on_missing", - "_check_on_missing", - "int_like", - "_safe_input", - "_check_all_same_channel_names", - "path_like", - "_ensure_events", - "_check_eeglabio_installed", - "_check_pybv_installed", - "_check_edflib_installed", - "_to_rgb", - "_soft_import", - "_check_dict_keys", - "_check_pymatreader_installed", - "_import_h5py", - "_import_h5io_funcs", - "_import_nibabel", - "_import_pymatreader_funcs", - "_check_head_radius", - ], - "config": [ - "set_config", - "get_config", - "get_config_path", - "set_cache_dir", - "set_memmap_min_size", - "get_subjects_dir", - "_get_stim_channel", - "sys_info", - "_get_extra_data_path", - "_get_root_dir", - "_get_numpy_libs", - ], - "docs": [ - "copy_function_doc_to_method_doc", - "copy_doc", - "linkcode_resolve", - "open_docs", - "deprecated", - "fill_doc", - "deprecated_alias", - "legacy", - "copy_base_doc_to_subclass_doc", - "_doc_special_members", - ], - "fetching": ["_url_to_local_path"], - "_logging": [ - "verbose", - "logger", - "set_log_level", - "set_log_file", - "use_log_level", - "catch_logging", - "warn", - "filter_out_warnings", - "wrapped_stdout", - "_get_call_line", - "_record_warnings", - "ClosingStringIO", - "_verbose_safe_false", - "_parse_verbose", - ], - "misc": [ - "run_subprocess", - "_pl", - "_clean_names", - "pformat", - "_file_like", - "_empty_hash", - "_explain_exception", - "_get_argvalues", - "sizeof_fmt", - "running_subprocess", - "_DefaultEventParser", - "_assert_no_instances", - "_resource_path", - "repr_html", - "_auto_weakref", - ], - "progressbar": ["ProgressBar"], - "_testing": [ - "run_command_if_main", - "requires_mne", - "requires_good_network", - "ArgvSetter", - "SilenceStdout", - "has_freesurfer", - "has_mne_c", - "_TempDir", - "buggy_mkl_svd", - "requires_freesurfer", - "requires_mne_mark", - "assert_object_equal", - "assert_and_remove_boundary_annot", - "_raw_annot", - "assert_dig_allclose", - "assert_meg_snr", - "assert_snr", - "assert_stcs_equal", - "_click_ch_name", - "requires_openmeeg_mark", - ], - "numerics": [ - "hashfunc", - "_compute_row_norms", - "_reg_pinv", - "random_permutation", - "_reject_data_segments", - "compute_corr", - "_get_inst_data", - "array_split_idx", - "sum_squared", - "split_list", - "_gen_events", - "create_slices", - "_time_mask", - "_freq_mask", - "grand_average", - "object_diff", - "object_hash", - "object_size", - "_apply_scaling_cov", - "_undo_scaling_cov", - "_apply_scaling_array", - "_undo_scaling_array", - "_scaled_array", - "_replace_md5", - "_PCA", - "_mask_to_onsets_offsets", - "_array_equal_nan", - "_julian_to_cal", - "_cal_to_julian", - "_dt_to_julian", - "_julian_to_dt", - "_dt_to_stamp", - "_stamp_to_dt", - "_check_dt", - "_ReuseCycle", - "_arange_div", - "_hashable_ndarray", - "_custom_lru_cache", - ], - "mixin": [ - "SizeMixin", - "GetEpochsMixin", - "TimeMixin", - "ExtendedTimeMixin", - "_prepare_read_metadata", - "_prepare_write_metadata", - "_check_decim", - ], - "linalg": [ - "_svd_lwork", - "_repeated_svd", - "_sym_mat_pow", - "sqrtm_sym", - "eigh", - "_get_blas_funcs", - ], - "dataframe": [ - "_set_pandas_dtype", - "_scale_dataframe_data", - "_convert_times", - "_build_data_frame", - ], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/utils/__init__.pyi b/mne/utils/__init__.pyi new file mode 100644 index 00000000000..8dfc4a6bdc4 --- /dev/null +++ b/mne/utils/__init__.pyi @@ -0,0 +1,384 @@ +__all__ = [ + "ArgvSetter", + "Bunch", + "BunchConst", + "BunchConstNamed", + "ClosingStringIO", + "ExtendedTimeMixin", + "GetEpochsMixin", + "ProgressBar", + "SilenceStdout", + "SizeMixin", + "TimeMixin", + "_DefaultEventParser", + "_PCA", + "_ReuseCycle", + "_TempDir", + "_apply_scaling_array", + "_apply_scaling_cov", + "_arange_div", + "_array_equal_nan", + "_assert_no_instances", + "_auto_weakref", + "_build_data_frame", + "_cal_to_julian", + "_check_all_same_channel_names", + "_check_ch_locs", + "_check_channels_spatial_filter", + "_check_combine", + "_check_compensation_grade", + "_check_decim", + "_check_depth", + "_check_dict_keys", + "_check_dt", + "_check_edflib_installed", + "_check_eeglabio_installed", + "_check_event_id", + "_check_fname", + "_check_freesurfer_home", + "_check_head_radius", + "_check_if_nan", + "_check_info_inv", + "_check_integer_or_list", + "_check_on_missing", + "_check_one_ch_type", + "_check_option", + "_check_pandas_index_arguments", + "_check_pandas_installed", + "_check_preload", + "_check_pybv_installed", + "_check_pymatreader_installed", + "_check_qt_version", + "_check_range", + "_check_rank", + "_check_sphere", + "_check_src_normal", + "_check_stc_units", + "_check_subject", + "_check_time_format", + "_clean_names", + "_click_ch_name", + "_compute_row_norms", + "_convert_times", + "_custom_lru_cache", + "_doc_special_members", + "_dt_to_julian", + "_dt_to_stamp", + "_empty_hash", + "_ensure_events", + "_ensure_int", + "_explain_exception", + "_file_like", + "_freq_mask", + "_gen_events", + "_get_argvalues", + "_get_blas_funcs", + "_get_call_line", + "_get_extra_data_path", + "_get_inst_data", + "_get_numpy_libs", + "_get_root_dir", + "_get_stim_channel", + "_hashable_ndarray", + "_import_h5io_funcs", + "_import_h5py", + "_import_nibabel", + "_import_pymatreader_funcs", + "_is_numeric", + "_julian_to_cal", + "_julian_to_dt", + "_mask_to_onsets_offsets", + "_on_missing", + "_parse_verbose", + "_path_like", + "_pl", + "_prepare_read_metadata", + "_prepare_write_metadata", + "_raw_annot", + "_record_warnings", + "_reg_pinv", + "_reject_data_segments", + "_repeated_svd", + "_replace_md5", + "_require_version", + "_resource_path", + "_safe_input", + "_scale_dataframe_data", + "_scaled_array", + "_set_pandas_dtype", + "_soft_import", + "_stamp_to_dt", + "_suggest", + "_svd_lwork", + "_sym_mat_pow", + "_time_mask", + "_to_rgb", + "_undo_scaling_array", + "_undo_scaling_cov", + "_url_to_local_path", + "_validate_type", + "_verbose_safe_false", + "array_split_idx", + "assert_and_remove_boundary_annot", + "assert_dig_allclose", + "assert_meg_snr", + "assert_object_equal", + "assert_snr", + "assert_stcs_equal", + "buggy_mkl_svd", + "catch_logging", + "check_fname", + "check_random_state", + "check_version", + "compute_corr", + "copy_base_doc_to_subclass_doc", + "copy_doc", + "copy_function_doc_to_method_doc", + "create_slices", + "deprecated", + "deprecated_alias", + "eigh", + "fill_doc", + "filter_out_warnings", + "get_config", + "get_config_path", + "get_subjects_dir", + "grand_average", + "has_freesurfer", + "has_mne_c", + "hashfunc", + "int_like", + "legacy", + "linkcode_resolve", + "logger", + "object_diff", + "object_hash", + "object_size", + "open_docs", + "path_like", + "pformat", + "random_permutation", + "repr_html", + "requires_freesurfer", + "requires_good_network", + "requires_mne", + "requires_mne_mark", + "requires_openmeeg_mark", + "run_command_if_main", + "run_subprocess", + "running_subprocess", + "set_cache_dir", + "set_config", + "set_log_file", + "set_log_level", + "set_memmap_min_size", + "sizeof_fmt", + "split_list", + "sqrtm_sym", + "sum_squared", + "sys_info", + "use_log_level", + "verbose", + "warn", + "wrapped_stdout", +] +from ._bunch import Bunch, BunchConst, BunchConstNamed +from .check import ( + check_fname, + check_version, + check_random_state, + _check_fname, + _check_subject, + _check_pandas_installed, + _check_pandas_index_arguments, + _check_event_id, + _check_ch_locs, + _check_compensation_grade, + _check_if_nan, + _is_numeric, + _ensure_int, + _check_integer_or_list, + _check_preload, + _validate_type, + _check_range, + _check_info_inv, + _check_channels_spatial_filter, + _check_one_ch_type, + _check_rank, + _check_option, + _check_depth, + _check_combine, + _path_like, + _check_src_normal, + _check_stc_units, + _check_qt_version, + _check_sphere, + _check_time_format, + _check_freesurfer_home, + _suggest, + _require_version, + _on_missing, + _check_on_missing, + int_like, + _safe_input, + _check_all_same_channel_names, + path_like, + _ensure_events, + _check_eeglabio_installed, + _check_pybv_installed, + _check_edflib_installed, + _to_rgb, + _soft_import, + _check_dict_keys, + _check_pymatreader_installed, + _import_h5py, + _import_h5io_funcs, + _import_nibabel, + _import_pymatreader_funcs, + _check_head_radius, +) +from .config import ( + set_config, + get_config, + get_config_path, + set_cache_dir, + set_memmap_min_size, + get_subjects_dir, + _get_stim_channel, + sys_info, + _get_extra_data_path, + _get_root_dir, + _get_numpy_libs, +) +from .docs import ( + copy_function_doc_to_method_doc, + copy_doc, + linkcode_resolve, + open_docs, + deprecated, + fill_doc, + deprecated_alias, + legacy, + copy_base_doc_to_subclass_doc, + _doc_special_members, +) +from .fetching import _url_to_local_path +from ._logging import ( + verbose, + logger, + set_log_level, + set_log_file, + use_log_level, + catch_logging, + warn, + filter_out_warnings, + wrapped_stdout, + _get_call_line, + _record_warnings, + ClosingStringIO, + _verbose_safe_false, + _parse_verbose, +) +from .misc import ( + run_subprocess, + _pl, + _clean_names, + pformat, + _file_like, + _empty_hash, + _explain_exception, + _get_argvalues, + sizeof_fmt, + running_subprocess, + _DefaultEventParser, + _assert_no_instances, + _resource_path, + repr_html, + _auto_weakref, +) +from .progressbar import ProgressBar +from ._testing import ( + run_command_if_main, + requires_mne, + requires_good_network, + ArgvSetter, + SilenceStdout, + has_freesurfer, + has_mne_c, + _TempDir, + buggy_mkl_svd, + requires_freesurfer, + requires_mne_mark, + assert_object_equal, + assert_and_remove_boundary_annot, + _raw_annot, + assert_dig_allclose, + assert_meg_snr, + assert_snr, + assert_stcs_equal, + _click_ch_name, + requires_openmeeg_mark, +) +from .numerics import ( + hashfunc, + _compute_row_norms, + _reg_pinv, + random_permutation, + _reject_data_segments, + compute_corr, + _get_inst_data, + array_split_idx, + sum_squared, + split_list, + _gen_events, + create_slices, + _time_mask, + _freq_mask, + grand_average, + object_diff, + object_hash, + object_size, + _apply_scaling_cov, + _undo_scaling_cov, + _apply_scaling_array, + _undo_scaling_array, + _scaled_array, + _replace_md5, + _PCA, + _mask_to_onsets_offsets, + _array_equal_nan, + _julian_to_cal, + _cal_to_julian, + _dt_to_julian, + _julian_to_dt, + _dt_to_stamp, + _stamp_to_dt, + _check_dt, + _ReuseCycle, + _arange_div, + _hashable_ndarray, + _custom_lru_cache, +) +from .mixin import ( + SizeMixin, + GetEpochsMixin, + TimeMixin, + ExtendedTimeMixin, + _prepare_read_metadata, + _prepare_write_metadata, + _check_decim, +) +from .linalg import ( + _svd_lwork, + _repeated_svd, + _sym_mat_pow, + sqrtm_sym, + eigh, + _get_blas_funcs, +) +from .dataframe import ( + _set_pandas_dtype, + _scale_dataframe_data, + _convert_times, + _build_data_frame, +) diff --git a/mne/viz/__init__.py b/mne/viz/__init__.py index 2ec36d48941..f2f295ccf0c 100644 --- a/mne/viz/__init__.py +++ b/mne/viz/__init__.py @@ -1,109 +1,4 @@ """Visualization routines.""" - import lazy_loader as lazy -__getattr__, __dir__, __all__ = lazy.attach( - __name__, - submodules=["backends", "_scraper", "ui_events"], - submod_attrs={ - "backends._abstract": ["Figure3D"], - "backends.renderer": [ - "set_3d_backend", - "get_3d_backend", - "use_3d_backend", - "set_3d_view", - "set_3d_title", - "create_3d_figure", - "close_3d_figure", - "close_all_3d_figures", - "get_brain_class", - ], - "circle": ["circular_layout", "plot_channel_labels_circle"], - "epochs": [ - "plot_drop_log", - "plot_epochs", - "plot_epochs_psd", - "plot_epochs_image", - ], - "evoked": [ - "plot_evoked", - "plot_evoked_image", - "plot_evoked_white", - "plot_snr_estimate", - "plot_evoked_topo", - "plot_evoked_joint", - "plot_compare_evokeds", - ], - "evoked_field": [ - "EvokedField", - ], - "ica": [ - "plot_ica_scores", - "plot_ica_sources", - "plot_ica_overlay", - "_plot_sources", - "plot_ica_properties", - ], - "misc": [ - "plot_cov", - "plot_csd", - "plot_bem", - "plot_events", - "plot_source_spectrogram", - "_get_presser", - "plot_dipole_amplitudes", - "plot_ideal_filter", - "plot_filter", - "adjust_axes", - "plot_chpi_snr", - ], - "montage": ["plot_montage"], - "raw": ["plot_raw", "plot_raw_psd", "plot_raw_psd_topo", "_RAW_CLIP_DEF"], - "topo": ["plot_topo_image_epochs", "iter_topography"], - "topomap": [ - "plot_evoked_topomap", - "plot_projs_topomap", - "plot_arrowmap", - "plot_ica_components", - "plot_tfr_topomap", - "plot_topomap", - "plot_epochs_psd_topomap", - "plot_layout", - "plot_bridged_electrodes", - "plot_ch_adjacency", - "plot_regression_weights", - ], - "utils": [ - "tight_layout", - "mne_analyze_colormap", - "compare_fiff", - "ClickableImage", - "add_background_image", - "plot_sensors", - "centers_to_edges", - "concatenate_images", - "_get_plot_ch_type", - ], - "_3d": [ - "plot_sparse_source_estimates", - "plot_source_estimates", - "plot_vector_source_estimates", - "plot_evoked_field", - "plot_dipole_locations", - "snapshot_brain_montage", - "plot_head_positions", - "plot_alignment", - "plot_brain_colorbar", - "plot_volume_source_estimates", - "link_brains", - "set_3d_options", - ], - "_brain": ["Brain"], - "_figure": [ - "get_browser_backend", - "set_browser_backend", - "use_browser_backend", - ], - "_proj": ["plot_projs_joint"], - }, -) +(__getattr__, __dir__, __all__) = lazy.attach_stub(__name__, __file__) diff --git a/mne/viz/__init__.pyi b/mne/viz/__init__.pyi new file mode 100644 index 00000000000..e73226b6909 --- /dev/null +++ b/mne/viz/__init__.pyi @@ -0,0 +1,178 @@ +__all__ = [ + "Brain", + "ClickableImage", + "EvokedField", + "Figure3D", + "_RAW_CLIP_DEF", + "_get_plot_ch_type", + "_get_presser", + "_plot_sources", + "_scraper", + "add_background_image", + "adjust_axes", + "backends", + "centers_to_edges", + "circular_layout", + "close_3d_figure", + "close_all_3d_figures", + "compare_fiff", + "concatenate_images", + "create_3d_figure", + "get_3d_backend", + "get_brain_class", + "get_browser_backend", + "iter_topography", + "link_brains", + "mne_analyze_colormap", + "plot_alignment", + "plot_arrowmap", + "plot_bem", + "plot_brain_colorbar", + "plot_bridged_electrodes", + "plot_ch_adjacency", + "plot_channel_labels_circle", + "plot_chpi_snr", + "plot_compare_evokeds", + "plot_cov", + "plot_csd", + "plot_dipole_amplitudes", + "plot_dipole_locations", + "plot_drop_log", + "plot_epochs", + "plot_epochs_image", + "plot_epochs_psd", + "plot_epochs_psd_topomap", + "plot_events", + "plot_evoked", + "plot_evoked_field", + "plot_evoked_image", + "plot_evoked_joint", + "plot_evoked_topo", + "plot_evoked_topomap", + "plot_evoked_white", + "plot_filter", + "plot_head_positions", + "plot_ica_components", + "plot_ica_overlay", + "plot_ica_properties", + "plot_ica_scores", + "plot_ica_sources", + "plot_ideal_filter", + "plot_layout", + "plot_montage", + "plot_projs_joint", + "plot_projs_topomap", + "plot_raw", + "plot_raw_psd", + "plot_raw_psd_topo", + "plot_regression_weights", + "plot_sensors", + "plot_snr_estimate", + "plot_source_estimates", + "plot_source_spectrogram", + "plot_sparse_source_estimates", + "plot_tfr_topomap", + "plot_topo_image_epochs", + "plot_topomap", + "plot_vector_source_estimates", + "plot_volume_source_estimates", + "set_3d_backend", + "set_3d_options", + "set_3d_title", + "set_3d_view", + "set_browser_backend", + "snapshot_brain_montage", + "tight_layout", + "ui_events", + "use_3d_backend", + "use_browser_backend", +] +from . import backends, _scraper, ui_events +from .backends._abstract import Figure3D +from .backends.renderer import ( + set_3d_backend, + get_3d_backend, + use_3d_backend, + set_3d_view, + set_3d_title, + create_3d_figure, + close_3d_figure, + close_all_3d_figures, + get_brain_class, +) +from .circle import circular_layout, plot_channel_labels_circle +from .epochs import plot_drop_log, plot_epochs, plot_epochs_psd, plot_epochs_image +from .evoked import ( + plot_evoked, + plot_evoked_image, + plot_evoked_white, + plot_snr_estimate, + plot_evoked_topo, + plot_evoked_joint, + plot_compare_evokeds, +) +from .evoked_field import EvokedField +from .ica import ( + plot_ica_scores, + plot_ica_sources, + plot_ica_overlay, + _plot_sources, + plot_ica_properties, +) +from .misc import ( + plot_cov, + plot_csd, + plot_bem, + plot_events, + plot_source_spectrogram, + _get_presser, + plot_dipole_amplitudes, + plot_ideal_filter, + plot_filter, + adjust_axes, + plot_chpi_snr, +) +from .montage import plot_montage +from .raw import plot_raw, plot_raw_psd, plot_raw_psd_topo, _RAW_CLIP_DEF +from .topo import plot_topo_image_epochs, iter_topography +from .topomap import ( + plot_evoked_topomap, + plot_projs_topomap, + plot_arrowmap, + plot_ica_components, + plot_tfr_topomap, + plot_topomap, + plot_epochs_psd_topomap, + plot_layout, + plot_bridged_electrodes, + plot_ch_adjacency, + plot_regression_weights, +) +from .utils import ( + tight_layout, + mne_analyze_colormap, + compare_fiff, + ClickableImage, + add_background_image, + plot_sensors, + centers_to_edges, + concatenate_images, + _get_plot_ch_type, +) +from ._3d import ( + plot_sparse_source_estimates, + plot_source_estimates, + plot_vector_source_estimates, + plot_evoked_field, + plot_dipole_locations, + snapshot_brain_montage, + plot_head_positions, + plot_alignment, + plot_brain_colorbar, + plot_volume_source_estimates, + link_brains, + set_3d_options, +) +from ._brain import Brain +from ._figure import get_browser_backend, set_browser_backend, use_browser_backend +from ._proj import plot_projs_joint diff --git a/tools/dev_reports/Makefile b/tools/dev/Makefile similarity index 100% rename from tools/dev_reports/Makefile rename to tools/dev/Makefile diff --git a/tools/dev_reports/check_steering_committee.py b/tools/dev/check_steering_committee.py similarity index 100% rename from tools/dev_reports/check_steering_committee.py rename to tools/dev/check_steering_committee.py diff --git a/tools/dev/generate_pyi_files.py b/tools/dev/generate_pyi_files.py new file mode 100644 index 00000000000..f7804e895e8 --- /dev/null +++ b/tools/dev/generate_pyi_files.py @@ -0,0 +1,76 @@ +import os +import sys +from importlib import import_module +from pathlib import Path + +import ast_comments as ast +import black +import mne + + +class RewriteAssign(ast.NodeTransformer): + """NodeTransformer to replace lazy attach with attach_stub.""" + + def visit_Assign(self, node): + """Replace lazy attach assignment with stub assignment.""" + if not hasattr(node.targets[0], "dims"): + return node + + ids = [name.id for name in node.targets[0].dims] + if ids == ["__getattr__", "__dir__", "__all__"]: + return ast.parse( + "__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)\n" + ) + return node + + +pyi_mode = black.Mode(is_pyi=True) +root = Path(mne.__file__).parent +inits = root.rglob("__init__.py") + +for init in inits: + # skip init files that don't lazy load (e.g., tests) + code = init.read_text("utf-8") + if "import lazy_loader as lazy" not in code: + continue + # get the AST + tree = ast.parse(code) + nodes = [node for node in tree.body if isinstance(node, ast.Assign)] + assert len(nodes) == 1 + node = nodes[0] + keywords = node.value.keywords + # get submodules + import_lines = list() + assert keywords[0].arg == "submodules" + # for submod in keywords[0].value.elts: + # import_lines.append(f"import {submod.value}") + submods = [submod.value for submod in keywords[0].value.elts] + if len(submods): + import_lines.append(f"from . import {', '.join(submods)}") + # get attrs + assert keywords[1].arg == "submod_attrs" + _dict = keywords[1].value + for key, vals in zip(_dict.keys, _dict.values): + attrs = [attr.value for attr in vals.elts] + import_lines.append(f"from .{key.value} import {', '.join(attrs)}") + # format + import_lines = black.format_str("\n".join(import_lines), mode=pyi_mode) + # get __all__ + import_path = str(init.parent.relative_to(root.parent)).replace(os.sep, ".") + import_module(import_path) + _all = black.format_str( + f"__all__ = {repr(sys.modules[import_path].__all__)}\n", + mode=pyi_mode, + ) + # write __init__.pyi + outfile = init.with_suffix(".pyi") + with open(outfile, "w") as fid: + fid.write(_all) + fid.write(import_lines) + # rewrite __init__.py + new_tree = RewriteAssign().visit(tree) + new_tree = ast.fix_missing_locations(new_tree) + new_code = ast.unparse(new_tree) + formatted_code = black.format_str(new_code, mode=black.Mode()) + with open(init, "w") as fid: + fid.write(formatted_code) diff --git a/tools/dev_reports/unacknowledged-bug-reports.jq b/tools/dev/unacknowledged-bug-reports.jq similarity index 100% rename from tools/dev_reports/unacknowledged-bug-reports.jq rename to tools/dev/unacknowledged-bug-reports.jq From cedefa0751adbf6d2be4531239019ef50302a8af Mon Sep 17 00:00:00 2001 From: Santeri Ruuskanen <66060772+ruuskas@users.noreply.github.com> Date: Thu, 5 Oct 2023 15:44:03 +0300 Subject: [PATCH 25/37] Add UI Event linking to DraggableColorbar (#12057) Co-authored-by: Santeri Ruuskanen Co-authored-by: Marijn van Vliet --- doc/changes/devel.rst | 3 +- mne/viz/epochs.py | 4 +- mne/viz/evoked.py | 2 +- mne/viz/tests/test_utils.py | 73 +++++++++++++++++++++++++++++++++++++ mne/viz/topo.py | 2 +- mne/viz/topomap.py | 70 +++++++++++++++++++++++++++++++---- mne/viz/ui_events.py | 16 +++++++- mne/viz/utils.py | 35 ++++++++++++++++-- 8 files changed, 189 insertions(+), 16 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index d4532d60721..84a48ef6e5c 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -23,7 +23,7 @@ Version 1.6.dev0 (development) Enhancements ~~~~~~~~~~~~ -- Improve tests for saving splits with `Epochs` (:gh:`11884` by `Dmitrii Altukhov`_) +- Improve tests for saving splits with :class:`mne.Epochs` (:gh:`11884` by `Dmitrii Altukhov`_) - Added functionality for linking interactive figures together, such that changing one figure will affect another, see :ref:`tut-ui-events` and :mod:`mne.viz.ui_events`. Current figures implementing UI events are :func:`mne.viz.plot_topomap` and :func:`mne.viz.plot_source_estimates` (:gh:`11685` :gh:`11891` by `Marijn van Vliet`_) - HTML anchors for :class:`mne.Report` now reflect the ``section-title`` of the report items rather than using a global incrementor ``global-N`` (:gh:`11890` by `Eric Larson`_) - Added public :func:`mne.io.write_info` to complement :func:`mne.io.read_info` (:gh:`11918` by `Eric Larson`_) @@ -37,6 +37,7 @@ Enhancements - Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) - Refactored internals of :func:`mne.read_annotations` (:gh:`11964` by `Paul Roujansky`_) - Enhance :func:`~mne.viz.plot_evoked_field` with a GUI that has controls for time, colormap, and contour lines (:gh:`11942` by `Marijn van Vliet`_) +- Add :class:`mne.viz.ui_events.UIEvent` linking for interactive colorbars, allowing users to link figures and change the colormap and limits interactively. This supports :func:`~mne.viz.plot_evoked_topomap`, :func:`~mne.viz.plot_ica_components`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_projs_topomap`, :meth:`~mne.Evoked.plot_image`, and :meth:`~mne.Epochs.plot_image` (:gh:`12057` by `Santeri Ruuskanen`_) Bugs ~~~~ diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index 3e94308b10c..d173c80a45b 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -661,7 +661,9 @@ def _plot_epochs_image( this_colorbar = cbar(im, cax=ax["colorbar"]) this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12) if cmap[1]: - ax_im.CB = DraggableColorbar(this_colorbar, im) + ax_im.CB = DraggableColorbar( + this_colorbar, im, kind="epochs_image", ch_type=unit + ) with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") tight_layout(fig=fig) diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 34a9d60dfe3..687203cad49 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -958,7 +958,7 @@ def _plot_image( cbar = plt.colorbar(im, ax=ax) cbar.ax.set_title(ch_unit) if cmap[1]: - ax.CB = DraggableColorbar(cbar, im) + ax.CB = DraggableColorbar(cbar, im, "evoked_image", this_type) ylabel = "Channels" if show_names else "Channel (index)" t = titles[this_type] + " (%d channel%s" % (len(data), _pl(data)) + t_end diff --git a/mne/viz/tests/test_utils.py b/mne/viz/tests/test_utils.py index b4a418652f1..c7f339c9997 100644 --- a/mne/viz/tests/test_utils.py +++ b/mne/viz/tests/test_utils.py @@ -12,6 +12,8 @@ from mne.viz.utils import ( compare_fiff, _fake_click, + _fake_keypress, + _fake_scroll, _compute_scalings, _validate_if_list_of_axes, _get_color_list, @@ -20,15 +22,18 @@ _make_event_color_dict, concatenate_images, ) +from mne.viz.ui_events import link, subscribe, ColormapRange from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap from mne.io import read_raw_fif from mne.event import read_events from mne.epochs import Epochs +from mne import read_evokeds base_dir = Path(__file__).parent.parent.parent / "io" / "tests" / "data" raw_fname = base_dir / "test_raw.fif" cov_fname = base_dir / "test-cov.fif" ev_fname = base_dir / "test_raw-eve.fif" +ave_fname = base_dir / "test-ave.fif" def test_setup_vmin_vmax_warns(): @@ -202,3 +207,71 @@ def test_concatenate_images(a_w, a_h, b_w, b_h, axis): else: want_shape = (max(a_h, b_h), a_w + b_w, 3) assert img.shape == want_shape + + +def test_draggable_colorbar(): + """Test that DraggableColorbar publishes correct UI Events.""" + evokeds = read_evokeds(ave_fname) + left_auditory = evokeds[0] + right_auditory = evokeds[1] + vmin, vmax = -400, 400 + fig = left_auditory.plot_topomap("interactive", vlim=(vmin, vmax)) + fig2 = right_auditory.plot_topomap("interactive", vlim=(vmin, vmax)) + link(fig, fig2) + callback_calls = [] + + def callback(event): + callback_calls.append(event) + + subscribe(fig, "colormap_range", callback) + + # Test that correct event is published + _fake_keypress(fig, "down") + _fake_keypress(fig, "up") + assert len(callback_calls) == 2 + event = callback_calls.pop() + assert type(event) is ColormapRange + # Test that scrolling changes color limits + _fake_scroll(fig, 10, 10, 1) + event = callback_calls.pop() + assert abs(event.fmin) < abs(vmin) + assert abs(event.fmax) < abs(vmax) + fmin, fmax = event.fmin, event.fmax + _fake_scroll(fig, 10, 10, -1) + event = callback_calls.pop() + assert abs(event.fmin) > abs(fmin) + assert abs(event.fmax) > abs(fmax) + fmin, fmax = event.fmin, event.fmax + # Test that plus and minus change color limits + _fake_keypress(fig, "+") + event = callback_calls.pop() + assert abs(event.fmin) < abs(fmin) + assert abs(event.fmax) < abs(fmax) + fmin, fmax = event.fmin, event.fmax + _fake_keypress(fig, "-") + event = callback_calls.pop() + assert abs(event.fmin) > abs(fmin) + assert abs(event.fmax) > abs(fmax) + fmin, fmax = event.fmin, event.fmax + # Test that page up and page down change color limits + _fake_keypress(fig, "pageup") + event = callback_calls.pop() + assert event.fmin < fmin + assert event.fmax < fmax + fmin, fmax = event.fmin, event.fmax + _fake_keypress(fig, "pagedown") + event = callback_calls.pop() + assert event.fmin > fmin + assert event.fmax > fmax + # Test that space key resets color limits + _fake_keypress(fig, " ") + event = callback_calls.pop() + assert event.fmax == vmax + assert event.fmin == vmin + # Test that colormap change in one figure changes that of another one + cmap_want = fig.axes[0].CB.cycle[fig.axes[0].CB.index + 1] + cmap_old = fig.axes[0].CB.mappable.get_cmap().name + _fake_keypress(fig, "down") + cmap_new1 = fig.axes[0].CB.mappable.get_cmap().name + cmap_new2 = fig2.axes[0].CB.mappable.get_cmap().name + assert cmap_new1 == cmap_new2 == cmap_want != cmap_old diff --git a/mne/viz/topo.py b/mne/viz/topo.py index a01ee72a0c2..683c22d9a6a 100644 --- a/mne/viz/topo.py +++ b/mne/viz/topo.py @@ -457,7 +457,7 @@ def _imshow_tfr( else: cbar = plt.colorbar(mappable=img, ax=ax) if interactive_cmap: - ax.CB = DraggableColorbar(cbar, img) + ax.CB = DraggableColorbar(cbar, img, kind="tfr_image", ch_type=None) ax.RS = RectangleSelector(ax, onselect=onselect) # reference must be kept return t_end diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index 0802362a27f..d47ec145e07 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -298,7 +298,16 @@ def _plot_update_evoked_topomap(params, bools): def _add_colorbar( - ax, im, cmap, side="right", pad=0.05, title=None, format=None, size="5%" + ax, + im, + cmap, + side="right", + pad=0.05, + title=None, + format=None, + size="5%", + kind=None, + ch_type=None, ): """Add a colorbar to an axis.""" import matplotlib.pyplot as plt @@ -308,7 +317,7 @@ def _add_colorbar( cax = divider.append_axes(side, size=size, pad=pad) cbar = plt.colorbar(im, cax=cax, format=format) if cmap is not None and cmap[1]: - ax.CB = DraggableColorbar(cbar, im) + ax.CB = DraggableColorbar(cbar, im, kind, ch_type) if title is not None: cax.set_title(title, y=1.05, fontsize=10) return cbar, cax @@ -587,7 +596,15 @@ def _plot_projs_topomap( ) if colorbar: - _add_colorbar(ax, im, cmap, title=units, format=cbar_fmt) + _add_colorbar( + ax, + im, + cmap, + title=units, + format=cbar_fmt, + kind="projs_topomap", + ch_type=_ch_type, + ) return ax.get_figure() @@ -973,7 +990,7 @@ def plot_topomap( .. versionadded:: 0.20 %(res_topomap)s %(size_topomap)s - %(cmap_topomap_simple)s + %(cmap_topomap)s %(vlim_plot_topomap)s .. versionadded:: 1.2 @@ -1454,7 +1471,16 @@ def _plot_ica_topomap( ch_type=ch_type, )[0] if colorbar: - cbar, cax = _add_colorbar(axes, im, cmap, pad=0.05, title="AU", format="%3.2f") + cbar, cax = _add_colorbar( + axes, + im, + cmap, + pad=0.05, + title="AU", + format="%3.2f", + kind="ica_topomap", + ch_type=ch_type, + ) cbar.ax.tick_params(labelsize=12) cbar.set_ticks(vlim) _hide_frame(axes) @@ -1685,7 +1711,15 @@ def plot_ica_components( im.axes.set_label(ica._ica_names[ii]) if colorbar: cbar, cax = _add_colorbar( - ax, im, cmap, title="AU", side="right", pad=0.05, format=cbar_fmt + ax, + im, + cmap, + title="AU", + side="right", + pad=0.05, + format=cbar_fmt, + kind="ica_comp_topomap", + ch_type=ch_type, ) cbar.ax.tick_params(labelsize=12) cbar.set_ticks(_vlim) @@ -1956,7 +1990,15 @@ def plot_tfr_topomap( from matplotlib import ticker units = _handle_default("units", units)["misc"] - cbar, cax = _add_colorbar(axes, im, cmap, title=units, format=cbar_fmt) + cbar, cax = _add_colorbar( + axes, + im, + cmap, + title=units, + format=cbar_fmt, + kind="tfr_topomap", + ch_type=ch_type, + ) if locator is None: locator = ticker.MaxNLocator(nbins=5) cbar.locator = locator @@ -2363,6 +2405,11 @@ def _slider_changed(val): kwargs=kwargs, ), ) + subscribe( + fig, + "colormap_range", + partial(_on_colormap_range, kwargs=kwargs), + ) if colorbar: if interactive: @@ -2383,7 +2430,9 @@ def _slider_changed(val): cbar.ax.tick_params(labelsize=7) if cmap[1]: for im in images: - im.axes.CB = DraggableColorbar(cbar, im) + im.axes.CB = DraggableColorbar( + cbar, im, kind="evoked_topomap", ch_type=ch_type + ) if proj == "interactive": _check_delayed_ssp(evoked) @@ -2460,6 +2509,11 @@ def _on_time_change( ax.figure.canvas.draw_idle() +def _on_colormap_range(event, kwargs): + """Handle updating colormap range.""" + kwargs.update(vlim=(event.fmin, event.fmax), cmap=event.cmap) + + def _plot_topomap_multi_cbar( data, pos, diff --git a/mne/viz/ui_events.py b/mne/viz/ui_events.py index ba5b1db9a33..78c1419ca2f 100644 --- a/mne/viz/ui_events.py +++ b/mne/viz/ui_events.py @@ -11,10 +11,12 @@ """ import contextlib from dataclasses import dataclass -from typing import Optional, List +from typing import Optional, List, Union import weakref import re +from matplotlib.colors import Colormap + from ..utils import warn, fill_doc, _validate_type, logger, verbose # Global dict {fig: channel} containing all currently active event channels. @@ -114,26 +116,38 @@ class ColormapRange(UIEvent): kind : str Kind of colormap being updated. The Notes section of the drawing routine publishing this event should mention the possible kinds. + ch_type : str + Type of sensor the data originates from. %(fmin_fmid_fmax)s %(alpha)s + cmap : str + The colormap to use. Either string or matplotlib.colors.Colormap + instance. Attributes ---------- kind : str Kind of colormap being updated. The Notes section of the drawing routine publishing this event should mention the possible kinds. + ch_type : str + Type of sensor the data originates from. unit : str The unit of the values. %(ui_event_name_source)s %(fmin_fmid_fmax)s %(alpha)s + cmap : str + The colormap to use. Either string or matplotlib.colors.Colormap + instance. """ kind: str + ch_type: Optional[str] = None fmin: Optional[float] = None fmid: Optional[float] = None fmax: Optional[float] = None alpha: Optional[bool] = None + cmap: Optional[Union[Colormap, str]] = None @dataclass diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 264505b67ad..78f05ee9109 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -63,6 +63,7 @@ check_version, _check_decim, ) +from .ui_events import publish, subscribe, ColormapRange from ..transforms import apply_trans @@ -1569,11 +1570,14 @@ class DraggableColorbar: See http://www.ster.kuleuven.be/~pieterd/python/html/plotting/interactive_colorbar.html """ # noqa: E501 - def __init__(self, cbar, mappable): + def __init__(self, cbar, mappable, kind, ch_type): import matplotlib.pyplot as plt self.cbar = cbar self.mappable = mappable + self.kind = kind + self.ch_type = ch_type + self.fig = self.cbar.ax.figure self.press = None self.cycle = sorted( [i for i in dir(plt.cm) if hasattr(getattr(plt.cm, i), "N")] @@ -1582,6 +1586,7 @@ def __init__(self, cbar, mappable): self.index = self.cycle.index(mappable.get_cmap().name) self.lims = (self.cbar.norm.vmin, self.cbar.norm.vmax) self.connect() + subscribe(self.fig, "colormap_range", self._on_colormap_range) def connect(self): """Connect to all the events we need.""" @@ -1640,7 +1645,7 @@ def key_press(self, event): self.cbar.mappable.set_cmap(cmap) _draw_without_rendering(self.cbar) self.mappable.set_cmap(cmap) - self._update() + self._publish() def on_motion(self, event): """Handle mouse movements.""" @@ -1659,7 +1664,7 @@ def on_motion(self, event): elif event.button == 3: self.cbar.norm.vmin -= (perc * scale) * np.sign(dy) self.cbar.norm.vmax += (perc * scale) * np.sign(dy) - self._update() + self._publish() def on_release(self, event): """Handle release.""" @@ -1671,8 +1676,32 @@ def on_scroll(self, event): scale = 1.1 if event.step < 0 else 1.0 / 1.1 self.cbar.norm.vmin *= scale self.cbar.norm.vmax *= scale + self._publish() + + def _on_colormap_range(self, event): + if event.kind != self.kind or event.ch_type != self.ch_type: + return + if event.fmin is not None: + self.cbar.norm.vmin = event.fmin + if event.fmax is not None: + self.cbar.norm.vmax = event.fmax + if event.cmap is not None: + self.cbar.mappable.set_cmap(event.cmap) + self.mappable.set_cmap(event.cmap) self._update() + def _publish(self): + publish( + self.fig, + ColormapRange( + kind=self.kind, + ch_type=self.ch_type, + fmin=self.cbar.norm.vmin, + fmax=self.cbar.norm.vmax, + cmap=self.mappable.get_cmap(), + ), + ) + def _update(self): from matplotlib.ticker import AutoLocator From 5a83eaea91c48c8ac87a443b102b4509f88e8f50 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 5 Oct 2023 10:20:05 -0400 Subject: [PATCH 26/37] MAINT: Fix broken examples (#12074) --- doc/conf.py | 6 ++- examples/visualization/sensor_noise_level.py | 39 ------------------- mne/viz/_brain/_brain.py | 3 +- tutorials/io/30_reading_fnirs_data.py | 1 - .../50_artifact_correction_ssp.py | 5 ++- 5 files changed, 10 insertions(+), 44 deletions(-) delete mode 100644 examples/visualization/sensor_noise_level.py diff --git a/doc/conf.py b/doc/conf.py index 02d61c1210b..a514bddda39 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1621,6 +1621,7 @@ def reset_warnings(gallery_conf, fname): co = "connectivity" mne_conn = "https://mne.tools/mne-connectivity/stable" tu = "auto_tutorials" +pr = "preprocessing" di = "discussions" sm = "source-modeling" fw = "forward" @@ -1666,7 +1667,7 @@ def reset_warnings(gallery_conf, fname): f"{tu}/{sd}/plot_phantom_4DBTi.html": f"{tu}/{nv}/90_phantom_4DBTi.html", f"{tu}/{sd}/plot_brainstorm_auditory.html": f"{tu}/io/60_ctf_bst_auditory.html", f"{tu}/{sd}/plot_sleep.html": f"{tu}/clinical/60_sleep.html", - f"{tu}/{di}/plot_background_filtering.html": f"{tu}/preprocessing/25_background_filtering.html", # noqa E501 + f"{tu}/{di}/plot_background_filtering.html": f"{tu}/{pr}/25_background_filtering.html", # noqa E501 f"{tu}/{di}/plot_background_statistics.html": f"{tu}/{sn}/10_background_stats.html", f"{tu}/{sn}/plot_stats_cluster_erp.html": f"{tu}/{sn}/20_erp_stats.html", f"{tu}/{sn}/plot_stats_cluster_1samp_test_time_frequency.html": f"{tu}/{sn}/40_cluster_1samp_time_freq.html", # noqa E501 @@ -1681,7 +1682,7 @@ def reset_warnings(gallery_conf, fname): f"{tu}/{si}/plot_creating_data_structures.html": f"{tu}/{si}/10_array_objs.html", f"{tu}/{si}/plot_point_spread.html": f"{tu}/{si}/70_point_spread.html", f"{tu}/{si}/plot_dics.html": f"{tu}/{si}/80_dics.html", - f"{tu}/{tf}/plot_eyetracking.html": f"{tu}/preprocessing/90_eyetracking_data.html", + f"{tu}/{tf}/plot_eyetracking.html": f"{tu}/{pr}/90_eyetracking_data.html", f"{ex}/{co}/mne_inverse_label_connectivity.html": f"{mne_conn}/{ex}/mne_inverse_label_connectivity.html", # noqa E501 f"{ex}/{co}/cwt_sensor_connectivity.html": f"{mne_conn}/{ex}/cwt_sensor_connectivity.html", # noqa E501 f"{ex}/{co}/mixed_source_space_connectivity.html": f"{mne_conn}/{ex}/mixed_source_space_connectivity.html", # noqa E501 @@ -1692,6 +1693,7 @@ def reset_warnings(gallery_conf, fname): f"{ex}/{co}/mne_inverse_psi_visual.html": f"{mne_conn}/{ex}/mne_inverse_psi_visual.html", # noqa E501 f"{ex}/{co}/sensor_connectivity.html": f"{mne_conn}/{ex}/sensor_connectivity.html", f"{ex}/{vi}/publication_figure.html": f"{tu}/{vi}/10_publication_figure.html", + f"{ex}/{vi}/sensor_noise_level.html": f"{tu}/{pr}/50_artifact_correction_ssp.html", } diff --git a/examples/visualization/sensor_noise_level.py b/examples/visualization/sensor_noise_level.py deleted file mode 100644 index 8fb345058ab..00000000000 --- a/examples/visualization/sensor_noise_level.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -.. _ex-noise-level: - -====================================== -Show noise levels from empty room data -====================================== - -This shows how to use :meth:`mne.io.Raw.compute_psd` to examine noise levels -of systems. See :footcite:`KhanCohen2013` for an example. -""" -# Author: Eric Larson -# -# License: BSD-3-Clause - -# %% - -import mne - -data_path = mne.datasets.sample.data_path() - -raw_erm = mne.io.read_raw_fif( - data_path / "MEG" / "sample" / "ernoise_raw.fif", preload=True -) - -# %% -# We can plot the absolute noise levels: -raw_erm.compute_psd(tmax=10).plot( - average=True, - spatial_colors=False, - dB=False, - xscale="log", - picks="data", - exclude="bads", -) -# %% -# References -# ---------- -# -# .. footbibliography:: diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index f4d3a90eb0a..4c4aeb531ba 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -2839,7 +2839,8 @@ def add_sensors( self._units, sensor_colors=sensor_colors, ) - for item, actors in sensors_actors.items(): + # sensors_actors can still be None + for item, actors in (sensors_actors or {}).items(): for actor in actors: self._add_actor(item, actor) diff --git a/tutorials/io/30_reading_fnirs_data.py b/tutorials/io/30_reading_fnirs_data.py index b2208c85941..789450ceb79 100644 --- a/tutorials/io/30_reading_fnirs_data.py +++ b/tutorials/io/30_reading_fnirs_data.py @@ -73,7 +73,6 @@ Continuous Wave Devices *********************** - .. _import-nirx: NIRx (directory or hdr) diff --git a/tutorials/preprocessing/50_artifact_correction_ssp.py b/tutorials/preprocessing/50_artifact_correction_ssp.py index 5dbcac7cd4b..a1ea7135d8e 100644 --- a/tutorials/preprocessing/50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/50_artifact_correction_ssp.py @@ -99,6 +99,9 @@ empty_room_raw.del_proj() # %% +# +# _ex-noise-level: +# # Visualizing the empty-room noise # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # @@ -106,7 +109,7 @@ # individual spectrum for each sensor, or an average (with confidence band) # across sensors: -spectrum = empty_room_raw.compute_psd() +spectrum = empty_room_raw.compute_psd(verbose="error") # ignore zero value warning for average in (False, True): spectrum.plot(average=average, dB=False, xscale="log", picks="data", exclude="bads") From 27d1c59777061a4a94c6a5fc03e6e08fc6c0cf9a Mon Sep 17 00:00:00 2001 From: Ivan Zubarev Date: Thu, 5 Oct 2023 19:42:29 +0300 Subject: [PATCH 27/37] #11608, buggfix and docstring update (#12066) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Larson Co-authored-by: Daniel McCloy --- doc/changes/devel.rst | 1 + mne/channels/channels.py | 12 +++--------- mne/channels/tests/test_channels.py | 25 ++++++++++++++++++++----- mne/utils/docs.py | 20 +++++++++++++++----- 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 84a48ef6e5c..ff753de08d4 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -44,6 +44,7 @@ Bugs - Fix bugs with :func:`mne.preprocessing.realign_raw` where the start of ``other`` was incorrectly cropped; and onsets and durations in ``other.annotations`` were left unsynced with the resampled data (:gh:`11950` by :newcontrib:`Qian Chu`) - Fix bug where ``encoding`` argument was ignored when reading annotations from an EDF file (:gh:`11958` by :newcontrib:`Andrew Gilbert`) - Mark tests ``test_adjacency_matches_ft`` and ``test_fetch_uncompressed_file`` as network tests (:gh:`12041` by :newcontrib:`Maksym Balatsko`) +- Fix bug with :func:`mne.channels.read_ch_adjacency` (:gh:`11608` by :newcontrib:`Ivan Zubarev`) - Fix bugs with saving splits for :class:`~mne.Epochs` (:gh:`11876` by `Dmitrii Altukhov`_) - Fix bug with multi-plot 3D rendering where only one plot was updated (:gh:`11896` by `Eric Larson`_) - Fix bug where subject birthdays were not correctly read by :func:`mne.io.read_raw_snirf` (:gh:`11912` by `Eric Larson`_) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index d57610d257f..1dd4aa49d14 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -1340,8 +1340,7 @@ def read_ch_adjacency(fname, picks=None): You can retrieve the names of all built-in channel adjacencies via :func:`mne.channels.get_builtin_ch_adjacencies`. - %(picks_all)s - Picks must match the template. + %(picks_all_notypes)s Returns ------- @@ -1401,7 +1400,8 @@ def read_ch_adjacency(fname, picks=None): nb = loadmat(fname)["neighbours"] ch_names = _recursive_flatten(nb["label"], str) - picks = _picks_to_idx(len(ch_names), picks) + temp_info = create_info(ch_names, 1.0) + picks = _picks_to_idx(temp_info, picks, none="all") neighbors = [_recursive_flatten(c, str) for c in nb["neighblabel"].flatten()] assert len(ch_names) == len(neighbors) adjacency = _ch_neighbor_adjacency(ch_names, neighbors) @@ -1409,12 +1409,6 @@ def read_ch_adjacency(fname, picks=None): adjacency = adjacency[picks][:, picks] ch_names = [ch_names[p] for p in picks] - # make sure MEG channel names contain space after "MEG" - for idx, ch_name in enumerate(ch_names): - if ch_name.startswith("MEG") and not ch_name[3] == " ": - ch_name = ch_name.replace("MEG", "MEG ") - ch_names[idx] = ch_name - return adjacency, ch_names diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 42695ae76bf..5679a7f21e2 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -243,6 +243,25 @@ def test_get_builtin_ch_adjacencies(): assert len(name_and_description) == 2 +@pytest.mark.parametrize("name", get_builtin_ch_adjacencies()) +@pytest.mark.parametrize("picks", ["pick-slice", "pick-arange", "pick-names"]) +def test_read_builtin_ch_adjacency_picks(name, picks): + """Test picking channel subsets when reading builtin adjacency matrices.""" + ch_adjacency, ch_names = read_ch_adjacency(name) + assert_equal(ch_adjacency.shape[0], len(ch_names)) + subset_names = ch_names[::2] + if picks == "pick-slice": + subset = slice(None, None, 2) + elif picks == "pick-arange": + subset = np.arange(0, len(ch_names), 2) + else: + assert picks == "pick-names" + subset = subset_names + + ch_subset_adjacency, ch_subset_names = read_ch_adjacency(name, subset) + assert_array_equal(ch_subset_names, subset_names) + + def test_read_ch_adjacency(tmp_path): """Test reading channel adjacency templates.""" a = partial(np.array, dtype=" Date: Thu, 5 Oct 2023 13:16:50 -0400 Subject: [PATCH 28/37] MAINT: Work around PySide 6.5.3 event loop error (#12076) --- mne/viz/_brain/tests/test_brain.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/mne/viz/_brain/tests/test_brain.py b/mne/viz/_brain/tests/test_brain.py index a4ad2490a1e..e9ea19c748d 100644 --- a/mne/viz/_brain/tests/test_brain.py +++ b/mne/viz/_brain/tests/test_brain.py @@ -738,11 +738,14 @@ def tiny(tmp_path): def test_brain_screenshot(renderer_interactive_pyvistaqt, tmp_path, brain_gc): """Test time viewer screenshot.""" # This is broken on Conda + GHA for some reason + from qtpy import API_NAME + if ( os.getenv("CONDA_PREFIX", "") != "" and os.getenv("GITHUB_ACTIONS", "") == "true" + or API_NAME.lower() == "pyside6" ): - pytest.skip("Test is unreliable on GitHub Actions conda runs") + pytest.skip("Test is unreliable on GitHub Actions conda runs and pyside6") tiny_brain, ratio = tiny(tmp_path) img_nv = tiny_brain.screenshot(time_viewer=False) want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3) @@ -1096,6 +1099,10 @@ def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmp_path, brain def test_brain_scraper(renderer_interactive_pyvistaqt, brain_gc, tmp_path): """Test a simple scraping example.""" pytest.importorskip("sphinx_gallery") + from qtpy import API_NAME + + if API_NAME.lower() == "pyside6": + pytest.skip("Error in event loop on PySidie6") stc = read_source_estimate(fname_stc, subject="sample") size = (600, 400) brain = stc.plot( From 6aca4ece3080e49b936f4a94096b4b074f0713d2 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Thu, 5 Oct 2023 23:40:19 +0300 Subject: [PATCH 29/37] add Ivan to names.inc (#12081) --- doc/changes/names.inc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index be6ee47ec01..722f0c2dc0d 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -212,6 +212,8 @@ .. _Ilias Machairas: https://github.com/JungleHippo +.. _Ivan Zubarev: https://github.com/zubara + .. _Ivana Kojcic: https://github.com/ikojcic .. _Jaakko Leppakangas: https://github.com/jaeilepp From 23fa43c3df27428734aa23f7bca3911c52b29048 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Fri, 6 Oct 2023 09:32:02 +0300 Subject: [PATCH 30/37] add unicode roundtrip for FIF (#12080) --- doc/changes/devel.rst | 1 + mne/_fiff/tag.py | 7 ++++++- mne/_fiff/write.py | 5 ++++- mne/tests/test_evoked.py | 6 ++++++ 4 files changed, 17 insertions(+), 2 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index ff753de08d4..8fc12edc619 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -65,6 +65,7 @@ Bugs - Fix parsing of eye-link :class:`~mne.Annotations` when ``apply_offsets=False`` is provided to :func:`~mne.io.read_raw_eyelink` (:gh:`12003` by `Mathieu Scheltienne`_) - Correctly prune channel-specific :class:`~mne.Annotations` when creating :class:`~mne.Epochs` without the channel(s) included in the channel specific annotations (:gh:`12010` by `Mathieu Scheltienne`_) - Fix :func:`~mne.viz.plot_volume_source_estimates` with :class:`~mne.VolSourceEstimate` which include a list of vertices (:gh:`12025` by `Mathieu Scheltienne`_) +- Add support for non-ASCII characters in Annotations, Evoked comments, etc when saving to FIFF format (:gh:`12080` by `Daniel McCloy`_) - Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) API changes diff --git a/mne/_fiff/tag.py b/mne/_fiff/tag.py index 87ea6ca9640..f64d16e1ca6 100644 --- a/mne/_fiff/tag.py +++ b/mne/_fiff/tag.py @@ -4,7 +4,9 @@ # License: BSD-3-Clause from functools import partial +import html import struct +import re import numpy as np from scipy.sparse import csc_matrix, csr_matrix @@ -265,7 +267,10 @@ def _read_string(fid, tag, shape, rlims): """Read a string tag.""" # Always decode to ISO 8859-1 / latin1 (FIFF standard). d = _frombuffer_rows(fid, tag.size, dtype=">c", shape=shape, rlims=rlims) - return str(d.tobytes().decode("latin1", "ignore")) + string = str(d.tobytes().decode("latin1", "ignore")) + if re.search(r"&#[0-9a-fA-F]{6};", string): + string = html.unescape(string) + return string def _read_complex_float(fid, tag, shape, rlims): diff --git a/mne/_fiff/write.py b/mne/_fiff/write.py index c66e3245eb5..b8ed1d2b1d8 100644 --- a/mne/_fiff/write.py +++ b/mne/_fiff/write.py @@ -128,7 +128,10 @@ def write_julian(fid, kind, data): def write_string(fid, kind, data): """Write a string tag.""" - str_data = str(data).encode("latin1") + try: + str_data = str(data).encode("latin1") + except UnicodeEncodeError: + str_data = str(data).encode("latin1", errors="xmlcharrefreplace") data_size = len(str_data) # therefore compute size here my_dtype = ">a" # py2/3 compatible on writing -- don't ask me why if data_size > 0: diff --git a/mne/tests/test_evoked.py b/mne/tests/test_evoked.py index 7c45bab7a7e..040978c9ff9 100644 --- a/mne/tests/test_evoked.py +++ b/mne/tests/test_evoked.py @@ -263,6 +263,12 @@ def test_io_evoked(tmp_path): ave_complex = read_evokeds(fname_temp)[0] assert_allclose(ave.data, ave_complex.data.imag) + # test non-ascii comments (gh 11684) + aves1[0].comment = "🙃" + write_evokeds(tmp_path / "evoked-ave.fif", aves1, overwrite=True) + aves1_read = read_evokeds(tmp_path / "evoked-ave.fif")[0] + assert aves1_read.comment == aves1[0].comment + # test warnings on bad filenames fname2 = tmp_path / "test-bad-name.fif" with pytest.warns(RuntimeWarning, match="-ave.fif"): From 566fa0701f5c44324b330c8b21d8c8a434898e3e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 6 Oct 2023 04:15:42 -0400 Subject: [PATCH 31/37] BUG: Fix bug with ch_name resolution (#12086) --- mne/channels/channels.py | 5 ++++- mne/channels/tests/test_channels.py | 15 +++++++++++---- .../40_cluster_1samp_time_freq.py | 7 +++---- .../75_cluster_ftest_spatiotemporal.py | 10 ++++------ 4 files changed, 22 insertions(+), 15 deletions(-) diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 1dd4aa49d14..3ec53636a15 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -1556,7 +1556,10 @@ def find_ch_adjacency(info, ch_type): if conn_name is not None: logger.info(f"Reading adjacency matrix for {conn_name}.") - return read_ch_adjacency(conn_name) + adjacency, ch_names = read_ch_adjacency(conn_name) + if conn_name.startswith("neuromag") and info["ch_names"][0].startswith("MEG "): + ch_names = [ch_name.replace("MEG", "MEG ") for ch_name in ch_names] + return adjacency, ch_names logger.info( "Could not find a adjacency matrix for the data. " "Computing adjacency based on Delaunay triangulations." diff --git a/mne/channels/tests/test_channels.py b/mne/channels/tests/test_channels.py index 5679a7f21e2..1ba186b4218 100644 --- a/mne/channels/tests/test_channels.py +++ b/mne/channels/tests/test_channels.py @@ -465,7 +465,7 @@ def test_1020_selection(): @testing.requires_testing_data def test_find_ch_adjacency(): """Test computing the adjacency matrix.""" - raw = read_raw_fif(raw_fname, preload=True) + raw = read_raw_fif(raw_fname) sizes = {"mag": 828, "grad": 1700, "eeg": 384} nchans = {"mag": 102, "grad": 204, "eeg": 60} for ch_type in ["mag", "grad", "eeg"]: @@ -473,6 +473,13 @@ def test_find_ch_adjacency(): # Silly test for checking the number of neighbors. assert_equal(conn.getnnz(), sizes[ch_type]) assert_equal(len(ch_names), nchans[ch_type]) + kwargs = dict(exclude=()) + if ch_type in ("mag", "grad"): + kwargs["meg"] = ch_type + else: + kwargs[ch_type] = True + want_names = [raw.ch_names[pick] for pick in pick_types(raw.info, **kwargs)] + assert ch_names == want_names pytest.raises(ValueError, find_ch_adjacency, raw.info, None) # Test computing the conn matrix with gradiometers. @@ -506,7 +513,7 @@ def test_find_ch_adjacency(): def test_neuromag122_adjacency(): """Test computing the adjacency matrix of Neuromag122-Data.""" nm122_fname = testing_path / "misc" / "neuromag122_test_file-raw.fif" - raw = read_raw_fif(nm122_fname, preload=True) + raw = read_raw_fif(nm122_fname) conn, ch_names = find_ch_adjacency(raw.info, "grad") assert conn.getnnz() == 1564 assert len(ch_names) == 122 @@ -515,7 +522,7 @@ def test_neuromag122_adjacency(): def test_drop_channels(): """Test if dropping channels works with various arguments.""" - raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + raw = read_raw_fif(raw_fname).crop(0, 0.1) raw.drop_channels(["MEG 0111"]) # list argument raw.drop_channels("MEG 0112") # str argument raw.drop_channels({"MEG 0132", "MEG 0133"}) # set argument @@ -535,7 +542,7 @@ def test_drop_channels(): def test_pick_channels(): """Test if picking channels works with various arguments.""" - raw = read_raw_fif(raw_fname, preload=True).crop(0, 0.1) + raw = read_raw_fif(raw_fname).crop(0, 0.1) # selected correctly 3 channels raw.pick(["MEG 0113", "MEG 0112", "MEG 0111"]) diff --git a/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py index 7936e852141..a43fdfd46aa 100644 --- a/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py +++ b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py @@ -5,10 +5,9 @@ Non-parametric 1 sample cluster statistic on single trial power =============================================================== -This script shows how to estimate significant clusters -in time-frequency power estimates. It uses a non-parametric -statistical procedure based on permutations and cluster -level statistics. +This script shows how to estimate significant clusters in time-frequency power +estimates. It uses a non-parametric statistical procedure based on permutations and +cluster level statistics. The procedure consists of: diff --git a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py index 462a40ca433..db6505fbafe 100644 --- a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py +++ b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py @@ -5,12 +5,10 @@ Spatiotemporal permutation F-test on full sensor data ===================================================== -Tests for differential evoked responses in at least -one condition using a permutation clustering test. -The FieldTrip neighbor templates will be used to determine -the adjacency between sensors. This serves as a spatial prior -to the clustering. Spatiotemporal clusters will then -be visualized using custom matplotlib code. +Tests for differential evoked responses in at least one condition using a permutation +clustering test. The FieldTrip neighbor templates will be used to determine the +adjacency between sensors. This serves as a spatial prior to the clustering. +Spatiotemporal clusters will then be visualized using custom matplotlib code. Here, the unit of observation is epochs from a specific study subject. However, the same logic applies when the unit observation is From 812b02ef0808bff97b32be54473de9bb986366db Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Fri, 6 Oct 2023 12:56:04 +0300 Subject: [PATCH 32/37] Cache avatars (#12077) --- doc/_static/js/contrib-avatars.js | 2 +- doc/_templates/homepage.html | 2 ++ doc/_templates/layout.html | 3 --- doc/conf.py | 1 + doc/sphinxext/_avatar_template.html | 7 ++++++ doc/sphinxext/contrib_avatars.py | 34 +++++++++++++++++++++++++++++ requirements_doc.txt | 1 + tools/circleci_bash_env.sh | 2 ++ 8 files changed, 48 insertions(+), 4 deletions(-) create mode 100644 doc/sphinxext/_avatar_template.html create mode 100644 doc/sphinxext/contrib_avatars.py diff --git a/doc/_static/js/contrib-avatars.js b/doc/_static/js/contrib-avatars.js index bc8257530e3..dde2e8d29f7 100644 --- a/doc/_static/js/contrib-avatars.js +++ b/doc/_static/js/contrib-avatars.js @@ -47,7 +47,7 @@ async function putAvatarsInPage() { } // finish outer.append(title, inner); - document.getElementById("institution-logos").after(outer); + document.body.append(outer); } putAvatarsInPage(); diff --git a/doc/_templates/homepage.html b/doc/_templates/homepage.html index 3b65c11e59c..023e92e4dbc 100644 --- a/doc/_templates/homepage.html +++ b/doc/_templates/homepage.html @@ -41,4 +41,6 @@ {% endfor %} + +{% include 'avatars.html' %} diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index a016af6141f..6bb88d43ed1 100644 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -17,7 +17,4 @@ {%- block scripts_end %} {{ super() }} - {% if pagename == 'index' %} - - {% endif %} {%- endblock %} diff --git a/doc/conf.py b/doc/conf.py index a514bddda39..1da1a81e870 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -103,6 +103,7 @@ "sphinxcontrib.bibtex", "sphinxcontrib.youtube", # homegrown + "contrib_avatars", "gen_commands", "gen_names", "gh_substitutions", diff --git a/doc/sphinxext/_avatar_template.html b/doc/sphinxext/_avatar_template.html new file mode 100644 index 00000000000..a0e086ee85e --- /dev/null +++ b/doc/sphinxext/_avatar_template.html @@ -0,0 +1,7 @@ + + + + + + + diff --git a/doc/sphinxext/contrib_avatars.py b/doc/sphinxext/contrib_avatars.py new file mode 100644 index 00000000000..e3bedc5e649 --- /dev/null +++ b/doc/sphinxext/contrib_avatars.py @@ -0,0 +1,34 @@ +from pathlib import Path + +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.support.ui import WebDriverWait +from selenium.common.exceptions import WebDriverException + + +def generate_contrib_avatars(app, config): + """Render a template webpage with avatars generated by JS and a GitHub API call.""" + root = Path(app.srcdir) + infile = root / "sphinxext" / "_avatar_template.html" + outfile = root / "_templates" / "avatars.html" + try: + options = webdriver.ChromeOptions() + options.add_argument("--headless=new") + driver = webdriver.Chrome(options=options) + except WebDriverException: + options = webdriver.FirefoxOptions() + options.add_argument("--headless=new") + driver = webdriver.Firefox(options=options) + driver.get(f"file://{infile}") + wait = WebDriverWait(driver, 20) + wait.until(lambda d: d.find_element(by=By.ID, value="contributor-avatars")) + body = driver.find_element(by=By.TAG_NAME, value="body").get_attribute("innerHTML") + with open(outfile, "w") as fid: + fid.write(body) + driver.quit() + + +def setup(app): + """Set up the Sphinx app.""" + app.connect("config-inited", generate_contrib_avatars) + return diff --git a/requirements_doc.txt b/requirements_doc.txt index a3cc904f979..c0b2bdce0a6 100644 --- a/requirements_doc.txt +++ b/requirements_doc.txt @@ -19,3 +19,4 @@ pytest graphviz pyzmq!=24.0.0 ipython!=8.7.0 +selenium diff --git a/tools/circleci_bash_env.sh b/tools/circleci_bash_env.sh index c63a6f0da1a..fb5e471c9fd 100755 --- a/tools/circleci_bash_env.sh +++ b/tools/circleci_bash_env.sh @@ -5,6 +5,8 @@ set -o pipefail ./tools/setup_xvfb.sh sudo apt install -qq graphviz optipng python3.10-venv python3-venv libxft2 ffmpeg +wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb +sudo apt install ./google-chrome-stable_current_amd64.deb python3.10 -m venv ~/python_env echo "set -e" >> $BASH_ENV echo "set -o pipefail" >> $BASH_ENV From 92f5dd8520a74a273872536365a144972a656e3c Mon Sep 17 00:00:00 2001 From: Santeri Ruuskanen <66060772+ruuskas@users.noreply.github.com> Date: Fri, 6 Oct 2023 13:24:01 +0300 Subject: [PATCH 33/37] BUG: Fix bug with mne browser backend (#12078) --- doc/changes/devel.rst | 1 + mne/utils/config.py | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 8fc12edc619..eeb06c3d7be 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -67,6 +67,7 @@ Bugs - Fix :func:`~mne.viz.plot_volume_source_estimates` with :class:`~mne.VolSourceEstimate` which include a list of vertices (:gh:`12025` by `Mathieu Scheltienne`_) - Add support for non-ASCII characters in Annotations, Evoked comments, etc when saving to FIFF format (:gh:`12080` by `Daniel McCloy`_) - Correctly handle passing ``"eyegaze"`` or ``"pupil"`` to :meth:`mne.io.Raw.pick` (:gh:`12019` by `Scott Huberty`_) +- Fix bug with :func:`~mne.viz.plot_raw` where changing ``MNE_BROWSER_BACKEND`` via :func:`~mne.set_config` would have no effect within a Python session (:gh:`12078` by `Santeri Ruuskanen`_) API changes ~~~~~~~~~~~ diff --git a/mne/utils/config.py b/mne/utils/config.py index 2217703fd4f..47a26e8109d 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -381,6 +381,10 @@ def set_config(key, value, home_dir=None, set_env=True): config[key] = value if set_env: os.environ[key] = value + if key == "MNE_BROWSER_BACKEND": + from ..viz._figure import set_browser_backend + + set_browser_backend(value) # Write all values. This may fail if the default directory is not # writeable. From 5f7c9b90fdf89bf7b33ee75b74f79fdb1128d42a Mon Sep 17 00:00:00 2001 From: Santeri Ruuskanen <66060772+ruuskas@users.noreply.github.com> Date: Fri, 6 Oct 2023 14:22:06 +0300 Subject: [PATCH 34/37] DOC: Morlet wavelet length in tfr_morlet (#12073) Co-authored-by: Daniel McCloy Co-authored-by: Eric Larson --- doc/conf.py | 2 +- mne/time_frequency/multitaper.py | 3 +- mne/time_frequency/tfr.py | 18 +++++++----- mne/utils/docs.py | 50 +++++++++++++++++++++++--------- 4 files changed, 50 insertions(+), 23 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 1da1a81e870..d8c9f52ad6e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -201,7 +201,7 @@ "path-like": ":term:`path-like`", "array-like": ":term:`array_like `", "Path": ":class:`python:pathlib.Path`", - "bool": ":ref:`python:typebool`", + "bool": ":ref:`bool `", # Matplotlib "colormap": ":ref:`colormap `", "color": ":doc:`color `", diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index a304d50f6c1..8810926f817 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -534,7 +534,8 @@ def tfr_array_multitaper( Notes ----- - %(temporal-window_tfr_notes)s + %(temporal_window_tfr_intro)s + %(temporal_window_tfr_multitaper_notes)s %(time_bandwidth_tfr_notes)s .. versionadded:: 0.14.0 diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 8a687515dda..1a061b8b173 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -114,7 +114,7 @@ def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False): Notes ----- - %(morlet_notes)s + %(morlet_reference)s %(fwhm_morlet_notes)s References @@ -948,9 +948,9 @@ def tfr_morlet( Notes ----- - %(morlet_notes)s - %(temporal-window_tfr_notes)s - %(fwhm_morlet_notes)s + %(morlet_reference)s + %(temporal_window_tfr_intro)s + %(temporal_window_tfr_morlet_notes)s See :func:`mne.time_frequency.morlet` for more information about the Morlet wavelet. @@ -997,7 +997,7 @@ def tfr_array_morlet( Sampling frequency of the data. %(freqs_tfr)s %(n_cycles_tfr)s - zero_mean : bool | False + zero_mean : bool If True, make sure the wavelets have a mean of zero. default False. use_fft : bool Use the FFT for convolutions or not. default True. @@ -1039,8 +1039,9 @@ def tfr_array_morlet( Notes ----- - %(morlet_notes)s - %(temporal-window_tfr_notes)s + %(morlet_reference)s + %(temporal_window_tfr_intro)s + %(temporal_window_tfr_morlet_notes)s .. versionadded:: 0.14.0 @@ -1121,7 +1122,8 @@ def tfr_multitaper( Notes ----- - %(temporal-window_tfr_notes)s + %(temporal_window_tfr_intro)s + %(temporal_window_tfr_multitaper_notes)s %(time_bandwidth_tfr_notes)s .. versionadded:: 0.9.0 diff --git a/mne/utils/docs.py b/mne/utils/docs.py index b62b7f07fd1..f1e8369c5a5 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1829,7 +1829,7 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): of the wavelet is determined by the ``sigma`` parameter, which gives the standard deviation of the wavelet's Gaussian envelope (our wavelets extend to ±5 standard deviations to ensure values very close to zero at the endpoints). -Some authors (e.g., :footcite:`Cohen2019`) recommend specifying and reporting +Some authors (e.g., :footcite:t:`Cohen2019`) recommend specifying and reporting wavelet duration in terms of the full-width half-maximum (FWHM) of the wavelet's Gaussian envelope. The FWHM is related to ``sigma`` by the following identity: :math:`\mathrm{FWHM} = \sigma \times 2 \sqrt{2 \ln{2}}` (or the @@ -1852,7 +1852,7 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): frequency in ``freqs``. If you want different FWHM values at each frequency, do the same computation with ``desired_fwhm`` as an array of the same shape as ``freqs``. -""" # noqa E501 +""" # %% # G @@ -2344,7 +2344,7 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): ] = """ label_tc : array | list (or generator) of array, shape (n_labels[, n_orient], n_times) Extracted time course for each label and source estimate. -""" # noqa: E501 +""" docdict[ "labels_eltc" @@ -2668,10 +2668,9 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): """ docdict[ - "morlet_notes" + "morlet_reference" ] = """ -The Morlet wavelets follow the formulation in -:footcite:`Tallon-BaudryEtAl1997`. +The Morlet wavelets follow the formulation in :footcite:t:`Tallon-BaudryEtAl1997`. """ docdict[ @@ -4178,7 +4177,7 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): .. versionadded:: 0.20 .. versionchanged:: 1.1 Added ``'eeglab'`` option. -""" # noqa E501 +""" docdict[ "split_naming" @@ -4438,8 +4437,8 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): """ docdict[ - "temporal-window_tfr_notes" -] = r""" + "temporal_window_tfr_intro" +] = """ In spectrotemporal analysis (as with traditional fourier methods), the temporal and spectral resolution are interrelated: longer temporal windows allow more precise frequency estimates; shorter temporal windows "smear" @@ -4457,10 +4456,34 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): smoothing increases with frequency.* Source: `FieldTrip tutorial: Time-frequency analysis using Hanning window, multitapers and wavelets `_. +""" # noqa: E501 + +docdict[ + "temporal_window_tfr_morlet_notes" +] = r""" +In MNE-Python, the length of the Morlet wavelet is affected by the arguments +``freqs`` and ``n_cycles``, which define the frequencies of interest +and the number of cycles, respectively. For the time-frequency representation, +the length of the wavelet is defined such that both tails of +the wavelet extend five standard deviations from the midpoint of its Gaussian +envelope and that there is a sample at time zero. + +The length of the wavelet is thus :math:`10\times\mathtt{sfreq}\cdot\sigma-1`, +which is equal to :math:`\frac{5}{\pi} \cdot \frac{\mathtt{n\_cycles} \cdot +\mathtt{sfreq}}{\mathtt{freqs}} - 1`, where +:math:`\sigma = \frac{\mathtt{n\_cycles}}{2\pi f}` corresponds to the standard +deviation of the wavelet's Gaussian envelope. Note that the length of the +wavelet must not exceed the length of your signal. + +For more information on the Morlet wavelet, see :func:`mne.time_frequency.morlet`. +""" -In MNE-Python, the temporal window length is defined by the arguments ``freqs`` -and ``n_cycles``, respectively defining the frequencies of interest and the -number of cycles: :math:`T = \frac{\mathtt{n\_cycles}}{\mathtt{freqs}}` +docdict[ + "temporal_window_tfr_multitaper_notes" +] = r""" +In MNE-Python, the multitaper temporal window length is defined by the arguments +``freqs`` and ``n_cycles``, respectively defining the frequencies of interest +and the number of cycles: :math:`T = \frac{\mathtt{n\_cycles}}{\mathtt{freqs}}` A fixed number of cycles for all frequencies will yield a temporal window which decreases with frequency. For example, ``freqs=np.arange(1, 6, 2)`` and @@ -4468,7 +4491,8 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): To use a temporal window with fixed length, the number of cycles has to be defined based on the frequency. For example, ``freqs=np.arange(1, 6, 2)`` and -``n_cycles=freqs / 2`` yields ``T=array([0.5, 0.5, 0.5])``.""" # noqa: E501 +``n_cycles=freqs / 2`` yields ``T=array([0.5, 0.5, 0.5])``. +""" _theme = """\ theme : str | path-like From 647fdd309a6f4e386a246f522817f0ba3f616b6f Mon Sep 17 00:00:00 2001 From: Mainak Jas Date: Fri, 6 Oct 2023 10:20:42 -0400 Subject: [PATCH 35/37] [MRG] update codeowners (#12089) --- .github/CODEOWNERS | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8d17d366a06..0d333361914 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -33,7 +33,7 @@ *lcmv*.py @britta-wstnr # Channels -/mne/channels @agramfort @mscheltienne @dengemann +/mne/channels @agramfort @mscheltienne @dengemann @jasmainak # Core sensor-space classes /mne/epochs.py @drammock @agramfort @mscheltienne @dengemann @@ -45,13 +45,14 @@ # Decoding /mne/decoding/csp.py @cbrnr @agramfort @dengemann +/mne/decoding/*.py @jasmainak # fNIRS /mne/preprocessing/nirs @rob-luke *fnirs*.py @rob-luke # forward -/mne/forward/ @agramfort +/mne/forward/ @agramfort @jasmainak *forward*.py @agramfort # Intracranial @@ -69,6 +70,8 @@ /mne/io/nirx @rob-luke /mne/io/snirf @rob-luke /mne/export @sappelhoff @cbrnr +/mne/io/eeglab.py @jasmainak +/mne/io/eeglab/tests/test_eeglab.py @jasmainak # Minimum Norm /mne/minimum_norm @agramfort @@ -81,7 +84,7 @@ /mne/preprocessing/e*g.py @mscheltienne # Report -/mne/report @hoechenberger @dengemann +/mne/report @hoechenberger @dengemann @jasmainak # Simulation /mne/simulation/ @agramfort @@ -102,6 +105,9 @@ /tutorials/visualization @larsoner @wmvanvliet @dengemann /examples/visualization @larsoner @dengemann +# Datasets +/mne/datasets/brainstorm @jasmainak + ######################### # Project-level / other # ######################### From 37ae7e37354fddc5eced35aa96973e3f944c9780 Mon Sep 17 00:00:00 2001 From: Hamza Abdelhedi Date: Fri, 6 Oct 2023 10:43:57 -0400 Subject: [PATCH 36/37] Add raw stc (#12001) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Marijn van Vliet Co-authored-by: Daniel McCloy --- doc/changes/devel.rst | 1 + mne/source_estimate.py | 26 +++++++++-------- mne/tests/test_source_estimate.py | 46 +++++++++++++++++++++++++------ mne/utils/docs.py | 3 ++ 4 files changed, 57 insertions(+), 19 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index eeb06c3d7be..81e16e8658e 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -29,6 +29,7 @@ Enhancements - Added public :func:`mne.io.write_info` to complement :func:`mne.io.read_info` (:gh:`11918` by `Eric Larson`_) - Added option ``remove_dc`` to to :meth:`Raw.compute_psd() `, :meth:`Epochs.compute_psd() `, and :meth:`Evoked.compute_psd() `, to allow skipping DC removal when computing Welch or multitaper spectra (:gh:`11769` by `Nikolai Chapochnikov`_) - Add the possibility to provide a float between 0 and 1 as ``n_grad``, ``n_mag`` and ``n_eeg`` in `~mne.compute_proj_raw`, `~mne.compute_proj_epochs` and `~mne.compute_proj_evoked` to select the number of vectors based on the cumulative explained variance (:gh:`11919` by `Mathieu Scheltienne`_) +- Add extracting all time courses in a label using :func:`mne.extract_label_time_course` without applying an aggregation function (like ``mean``) (:gh:`12001` by `Hamza Abdelhedi`_) - Added support for Artinis fNIRS data files to :func:`mne.io.read_raw_snirf` (:gh:`11926` by `Robert Luke`_) - Add helpful error messages when using methods on empty :class:`mne.Epochs`-objects (:gh:`11306` by `Martin Schulz`_) - Add support for passing a :class:`python:dict` as ``sensor_color`` to specify per-channel-type colors in :func:`mne.viz.plot_alignment` (:gh:`12067` by `Eric Larson`_) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 3f0674210ca..211d109222c 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -3240,6 +3240,7 @@ def _pca_flip(flip, data): "mean_flip": lambda flip, data: np.mean(flip * data, axis=0), "max": lambda flip, data: np.max(np.abs(data), axis=0), "pca_flip": _pca_flip, + None: lambda flip, data: data, # Return Identity: Preserves all vertices. } @@ -3494,7 +3495,7 @@ def _volume_labels(src, labels, mri_resolution): def _get_default_label_modes(): - return sorted(_label_funcs.keys()) + ["auto"] + return sorted(_label_funcs.keys(), key=lambda x: (x is None, x)) + ["auto"] def _get_allowed_label_modes(stc): @@ -3572,7 +3573,12 @@ def _gen_extract_label_time_course( ) # do the extraction - label_tc = np.zeros((n_labels,) + stc.data.shape[1:], dtype=stc.data.dtype) + if mode is None: + # prepopulate an empty list for easy array-like index-based assignment + label_tc = [None] * max(len(label_vertidx), len(src_flip)) + else: + # For other modes, initialize the label_tc array + label_tc = np.zeros((n_labels,) + stc.data.shape[1:], dtype=stc.data.dtype) for i, (vertidx, flip) in enumerate(zip(label_vertidx, src_flip)): if vertidx is not None: if isinstance(vertidx, sparse.csr_matrix): @@ -3585,15 +3591,13 @@ def _gen_extract_label_time_course( this_data = stc.data[vertidx] label_tc[i] = func(flip, this_data) - # extract label time series for the vol src space (only mean supported) - offset = nvert[:-n_mean].sum() # effectively :2 or :0 - for i, nv in enumerate(nvert[2:]): - if nv != 0: - v2 = offset + nv - label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0) - offset = v2 - - # this is a generator! + if mode is not None: + offset = nvert[:-n_mean].sum() # effectively :2 or :0 + for i, nv in enumerate(nvert[2:]): + if nv != 0: + v2 = offset + nv + label_tc[n_mode + i] = np.mean(stc.data[offset:v2], axis=0) + offset = v2 yield label_tc diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index 5e0373f718e..9b78113127c 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -678,12 +678,24 @@ def test_extract_label_time_course(kind, vector): label_tcs = dict(mean=np.arange(n_labels)[:, None] * np.ones((n_labels, n_times))) label_tcs["max"] = label_tcs["mean"] + label_tcs[None] = label_tcs["mean"] # compute the mean with sign flip label_tcs["mean_flip"] = np.zeros_like(label_tcs["mean"]) for i, label in enumerate(labels): label_tcs["mean_flip"][i] = i * np.mean(label_sign_flip(label, src[:2])) + # compute pca_flip + label_flip = [] + for i, label in enumerate(labels): + this_flip = i * label_sign_flip(label, src[:2]) + label_flip.append(this_flip) + # compute pca_flip + label_tcs["pca_flip"] = np.zeros_like(label_tcs["mean"]) + for i, (label, flip) in enumerate(zip(labels, label_flip)): + sign = np.sign(np.dot(np.full((flip.shape[0]), i), flip)) + label_tcs["pca_flip"][i] = sign * label_tcs["mean"][i] + # generate some stc's with known data stcs = list() pad = (((0, 0), (2, 0), (0, 0)), "constant") @@ -734,7 +746,7 @@ def test_extract_label_time_course(kind, vector): assert_array_equal(arr[1:], vol_means_t) # test the different modes - modes = ["mean", "mean_flip", "pca_flip", "max", "auto"] + modes = ["mean", "mean_flip", "pca_flip", "max", "auto", None] for mode in modes: if vector and mode not in ("mean", "max", "auto"): @@ -748,18 +760,36 @@ def test_extract_label_time_course(kind, vector): ] assert len(label_tc) == n_stcs assert len(label_tc_method) == n_stcs - for tc1, tc2 in zip(label_tc, label_tc_method): - assert tc1.shape == (n_labels + len(vol_means),) + end_shape - assert tc2.shape == (n_labels + len(vol_means),) + end_shape - assert_allclose(tc1, tc2, rtol=1e-8, atol=1e-16) + for j, (tc1, tc2) in enumerate(zip(label_tc, label_tc_method)): + if mode is None: + assert all(arr.shape[1] == tc1[0].shape[1] for arr in tc1) + assert all(arr.shape[1] == tc2[0].shape[1] for arr in tc2) + assert (len(tc1), tc1[0].shape[1]) == (n_labels,) + end_shape + assert (len(tc2), tc2[0].shape[1]) == (n_labels,) + end_shape + for arr1, arr2 in zip(tc1, tc2): # list of arrays + assert_allclose(arr1, arr2, rtol=1e-8, atol=1e-16) + else: + assert tc1.shape == (n_labels + len(vol_means),) + end_shape + assert tc2.shape == (n_labels + len(vol_means),) + end_shape + assert_allclose(tc1, tc2, rtol=1e-8, atol=1e-16) if mode == "auto": use_mode = "mean" if vector else "mean_flip" else: use_mode = mode - # XXX we don't check pca_flip, probably should someday... - if use_mode in ("mean", "max", "mean_flip"): + if mode == "pca_flip": + for arr1, arr2 in zip(tc1, label_tcs[use_mode]): + assert_array_almost_equal(arr1, arr2) + elif use_mode is None: + for arr1, arr2 in zip( + tc1[:n_labels], label_tcs[use_mode] + ): # list of arrays + assert_allclose( + arr1, np.tile(arr2, (arr1.shape[0], 1)), rtol=1e-8, atol=1e-16 + ) + elif use_mode in ("mean", "max", "mean_flip"): assert_array_almost_equal(tc1[:n_labels], label_tcs[use_mode]) - assert_array_almost_equal(tc1[n_labels:], vol_means_t) + if mode is not None: + assert_array_almost_equal(tc1[n_labels:], vol_means_t) # test label with very few vertices (check SVD conditionals) label = Label(vertices=src[0]["vertno"][:2], hemi="lh") diff --git a/mne/utils/docs.py b/mne/utils/docs.py index f1e8369c5a5..ef311f30a71 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1203,6 +1203,9 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): - ``'auto'`` (default) Uses ``'mean_flip'`` when a standard source estimate is applied, and ``'mean'`` when a vector source estimate is supplied. +- ``None`` + No aggregation is performed, and an array of shape ``(n_vertices, n_times)`` is + returned. .. versionadded:: 0.21 Support for ``'auto'``, vector, and volume source estimates. From fdaeb86206d334242bcbcfd09460c970b33e4dcd Mon Sep 17 00:00:00 2001 From: Mathieu Scheltienne Date: Sat, 7 Oct 2023 05:21:33 +0200 Subject: [PATCH 37/37] Use constrained layout in matplotlib visualization (#12050) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Eric Larson --- README.rst | 2 +- doc/changes/devel.rst | 1 + doc/conf.py | 1 - examples/decoding/decoding_rsa_sgskip.py | 6 +- examples/decoding/decoding_spoc_CMC.py | 3 +- ...decoding_time_generalization_conditions.py | 2 +- examples/decoding/decoding_xdawn_eeg.py | 9 +- examples/decoding/receptive_field_mtrf.py | 16 +- examples/inverse/label_source_activations.py | 6 +- .../inverse/mixed_source_space_inverse.py | 3 +- examples/inverse/source_space_snr.py | 3 +- examples/preprocessing/eeg_bridging.py | 7 +- examples/preprocessing/eeg_csd.py | 3 +- .../preprocessing/eog_artifact_histogram.py | 3 +- examples/preprocessing/eog_regression.py | 7 +- examples/preprocessing/shift_evoked.py | 3 - examples/simulation/plot_stc_metrics.py | 6 +- .../source_label_time_frequency.py | 25 +-- .../source_power_spectrum_opm.py | 1 - .../time_frequency_simulated.py | 17 +- examples/visualization/3d_to_2d.py | 3 +- examples/visualization/evoked_topomap.py | 4 +- mne/conftest.py | 38 +++-- mne/preprocessing/eyetracking/calibration.py | 2 +- mne/preprocessing/ica.py | 1 - mne/report/report.py | 30 +--- mne/time_frequency/spectrum.py | 1 - mne/time_frequency/tfr.py | 38 +++-- mne/viz/_3d.py | 26 +-- mne/viz/__init__.pyi | 2 - mne/viz/_dipole.py | 4 +- mne/viz/_figure.py | 12 +- mne/viz/_mpl_figure.py | 29 +++- mne/viz/_proj.py | 2 +- mne/viz/backends/_abstract.py | 15 +- mne/viz/backends/tests/test_utils.py | 3 + mne/viz/circle.py | 2 +- mne/viz/epochs.py | 14 +- mne/viz/evoked.py | 75 ++++----- mne/viz/ica.py | 24 +-- mne/viz/misc.py | 51 +++--- mne/viz/tests/test_epochs.py | 9 +- mne/viz/tests/test_evoked.py | 4 +- mne/viz/tests/test_topomap.py | 6 +- mne/viz/topo.py | 6 +- mne/viz/topomap.py | 106 +++++------- mne/viz/utils.py | 159 ++---------------- requirements.txt | 2 +- requirements_base.txt | 2 +- tools/github_actions_env_vars.sh | 2 +- .../epochs/60_make_fixed_length_epochs.py | 7 +- .../forward/50_background_freesurfer_mne.py | 3 +- tutorials/intro/70_report.py | 2 +- tutorials/inverse/20_dipole_fit.py | 2 +- tutorials/inverse/60_visualize_stc.py | 3 +- .../inverse/80_brainstorm_phantom_elekta.py | 2 +- tutorials/machine-learning/30_strf.py | 40 ++--- .../preprocessing/25_background_filtering.py | 10 +- .../preprocessing/30_filtering_resampling.py | 3 - .../50_artifact_correction_ssp.py | 5 +- .../preprocessing/60_maxwell_filtering_sss.py | 5 +- .../preprocessing/70_fnirs_processing.py | 15 +- tutorials/preprocessing/80_opm_processing.py | 12 +- tutorials/raw/20_event_arrays.py | 1 - tutorials/simulation/80_dics.py | 3 +- .../stats-sensor-space/10_background_stats.py | 17 +- .../40_cluster_1samp_time_freq.py | 17 +- .../50_cluster_between_time_freq.py | 3 +- .../70_cluster_rmANOVA_time_freq.py | 10 +- .../75_cluster_ftest_spatiotemporal.py | 13 +- .../time-freq/20_sensors_time_frequency.py | 2 +- 71 files changed, 351 insertions(+), 620 deletions(-) diff --git a/README.rst b/README.rst index c601e318b51..a3d35deb76a 100644 --- a/README.rst +++ b/README.rst @@ -96,7 +96,7 @@ The minimum required dependencies to run MNE-Python are: - Python >= 3.8 - NumPy >= 1.21.2 - SciPy >= 1.7.1 -- Matplotlib >= 3.4.3 +- Matplotlib >= 3.5.0 - pooch >= 1.5 - tqdm - Jinja2 diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 81e16e8658e..b46c2a6fc60 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -37,6 +37,7 @@ Enhancements - Add :class:`~mne.time_frequency.EpochsSpectrumArray` and :class:`~mne.time_frequency.SpectrumArray` to support creating power spectra from :class:`NumPy array ` data (:gh:`11803` by `Alex Rockhill`_) - Add support for writing forward solutions to HDF5 and convenience function :meth:`mne.Forward.save` (:gh:`12036` by `Eric Larson`_) - Refactored internals of :func:`mne.read_annotations` (:gh:`11964` by `Paul Roujansky`_) +- By default MNE-Python creates matplotlib figures with ``layout='constrained'`` rather than the default ``layout='tight'`` (:gh:`12050` by `Mathieu Scheltienne`_ and `Eric Larson`_) - Enhance :func:`~mne.viz.plot_evoked_field` with a GUI that has controls for time, colormap, and contour lines (:gh:`11942` by `Marijn van Vliet`_) - Add :class:`mne.viz.ui_events.UIEvent` linking for interactive colorbars, allowing users to link figures and change the colormap and limits interactively. This supports :func:`~mne.viz.plot_evoked_topomap`, :func:`~mne.viz.plot_ica_components`, :func:`~mne.viz.plot_tfr_topomap`, :func:`~mne.viz.plot_projs_topomap`, :meth:`~mne.Evoked.plot_image`, and :meth:`~mne.Epochs.plot_image` (:gh:`12057` by `Santeri Ruuskanen`_) diff --git a/doc/conf.py b/doc/conf.py index d8c9f52ad6e..b8086500640 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1291,7 +1291,6 @@ def reset_warnings(gallery_conf, fname): warnings.filterwarnings("default", module="sphinx") # allow these warnings, but don't show them for key in ( - "The module matplotlib.tight_layout is deprecated", # nilearn "invalid version and will not be supported", # pyxdf "distutils Version classes are deprecated", # seaborn and neo "`np.object` is a deprecated alias for the builtin `object`", # pyxdf diff --git a/examples/decoding/decoding_rsa_sgskip.py b/examples/decoding/decoding_rsa_sgskip.py index 7cc6dbfbb01..3cc8467deb3 100644 --- a/examples/decoding/decoding_rsa_sgskip.py +++ b/examples/decoding/decoding_rsa_sgskip.py @@ -150,7 +150,7 @@ ############################################################################## # Plot labels = [""] * 5 + ["face"] + [""] * 11 + ["bodypart"] + [""] * 6 -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") im = ax.matshow(confusion, cmap="RdBu_r", clim=[0.3, 0.7]) ax.set_yticks(range(len(classes))) ax.set_yticklabels(labels) @@ -159,14 +159,13 @@ ax.axhline(11.5, color="k") ax.axvline(11.5, color="k") plt.colorbar(im) -plt.tight_layout() plt.show() ############################################################################## # Confusion matrix related to mental representations have been historically # summarized with dimensionality reduction using multi-dimensional scaling [1]. # See how the face samples cluster together. -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") mds = MDS(2, random_state=0, dissimilarity="precomputed") chance = 0.5 summary = mds.fit_transform(chance - confusion) @@ -186,7 +185,6 @@ ) ax.axis("off") ax.legend(loc="lower right", scatterpoints=1, ncol=2) -plt.tight_layout() plt.show() ############################################################################## diff --git a/examples/decoding/decoding_spoc_CMC.py b/examples/decoding/decoding_spoc_CMC.py index d73e9af9bbc..4e689d338d5 100644 --- a/examples/decoding/decoding_spoc_CMC.py +++ b/examples/decoding/decoding_spoc_CMC.py @@ -68,7 +68,7 @@ y_preds = cross_val_predict(clf, X, y, cv=cv) # Plot the True EMG power and the EMG power predicted from MEG data -fig, ax = plt.subplots(1, 1, figsize=[10, 4]) +fig, ax = plt.subplots(1, 1, figsize=[10, 4], layout="constrained") times = raw.times[meg_epochs.events[:, 0] - raw.first_samp] ax.plot(times, y_preds, color="b", label="Predicted EMG") ax.plot(times, y, color="r", label="True EMG") @@ -76,7 +76,6 @@ ax.set_ylabel("EMG Power") ax.set_title("SPoC MEG Predictions") plt.legend() -mne.viz.tight_layout() plt.show() ############################################################################## diff --git a/examples/decoding/decoding_time_generalization_conditions.py b/examples/decoding/decoding_time_generalization_conditions.py index 08ca0d9c0c3..a018ebbe75b 100644 --- a/examples/decoding/decoding_time_generalization_conditions.py +++ b/examples/decoding/decoding_time_generalization_conditions.py @@ -88,7 +88,7 @@ # %% # Plot -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") im = ax.matshow( scores, vmin=0, diff --git a/examples/decoding/decoding_xdawn_eeg.py b/examples/decoding/decoding_xdawn_eeg.py index 3bdff716228..e7fac8c52e6 100644 --- a/examples/decoding/decoding_xdawn_eeg.py +++ b/examples/decoding/decoding_xdawn_eeg.py @@ -99,14 +99,13 @@ cm_normalized = cm.astype(float) / cm.sum(axis=1)[:, np.newaxis] # Plot confusion matrix -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") im = ax.imshow(cm_normalized, interpolation="nearest", cmap=plt.cm.Blues) ax.set(title="Normalized Confusion matrix") fig.colorbar(im) tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) -fig.tight_layout() ax.set(ylabel="True label", xlabel="Predicted label") # %% @@ -114,7 +113,10 @@ # cross-validation fold) can be used for visualization. fig, axes = plt.subplots( - nrows=len(event_id), ncols=n_filter, figsize=(n_filter, len(event_id) * 2) + nrows=len(event_id), + ncols=n_filter, + figsize=(n_filter, len(event_id) * 2), + layout="constrained", ) fitted_xdawn = clf.steps[0][1] info = create_info(epochs.ch_names, 1, epochs.get_channel_types()) @@ -131,7 +133,6 @@ show=False, ) axes[ii, 0].set(ylabel=cur_class) -fig.tight_layout(h_pad=1.0, w_pad=1.0, pad=0.1) # %% # References diff --git a/examples/decoding/receptive_field_mtrf.py b/examples/decoding/receptive_field_mtrf.py index 0d24d5ebfa1..e927cd3cf25 100644 --- a/examples/decoding/receptive_field_mtrf.py +++ b/examples/decoding/receptive_field_mtrf.py @@ -67,12 +67,11 @@ n_channels = len(raw.ch_names) # Plot a sample of brain and stimulus activity -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") lns = ax.plot(scale(raw[:, :800][0].T), color="k", alpha=0.1) ln1 = ax.plot(scale(speech[0, :800]), color="r", lw=2) ax.legend([lns[0], ln1[0]], ["EEG", "Speech Envelope"], frameon=False) ax.set(title="Sample activity", xlabel="Time (s)") -mne.viz.tight_layout() # %% # Create and fit a receptive field model @@ -117,12 +116,11 @@ mean_scores = scores.mean(axis=0) # Plot mean prediction scores across all channels -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ix_chs = np.arange(n_channels) ax.plot(ix_chs, mean_scores) ax.axhline(0, ls="--", color="r") ax.set(title="Mean prediction score", xlabel="Channel", ylabel="Score ($r$)") -mne.viz.tight_layout() # %% # Investigate model coefficients @@ -134,7 +132,7 @@ # Print mean coefficients across all time delays / channels (see Fig 1) time_plot = 0.180 # For highlighting a specific time. -fig, ax = plt.subplots(figsize=(4, 8)) +fig, ax = plt.subplots(figsize=(4, 8), layout="constrained") max_coef = mean_coefs.max() ax.pcolormesh( times, @@ -155,16 +153,14 @@ xticks=np.arange(tmin, tmax + 0.2, 0.2), ) plt.setp(ax.get_xticklabels(), rotation=45) -mne.viz.tight_layout() # Make a topographic map of coefficients for a given delay (see Fig 2C) ix_plot = np.argmin(np.abs(time_plot - times)) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") mne.viz.plot_topomap( mean_coefs[:, ix_plot], pos=info, axes=ax, show=False, vlim=(-max_coef, max_coef) ) ax.set(title="Topomap of model coefficients\nfor delay %s" % time_plot) -mne.viz.tight_layout() # %% # Create and fit a stimulus reconstruction model @@ -240,7 +236,7 @@ y_pred = sr.predict(Y[test]) time = np.linspace(0, 2.0, 5 * int(sfreq)) -fig, ax = plt.subplots(figsize=(8, 4)) +fig, ax = plt.subplots(figsize=(8, 4), layout="constrained") ax.plot( time, speech[test][sr.valid_samples_][: int(5 * sfreq)], color="grey", lw=2, ls="--" ) @@ -248,7 +244,6 @@ ax.legend([lns[0], ln1[0]], ["Envelope", "Reconstruction"], frameon=False) ax.set(title="Stimulus reconstruction") ax.set_xlabel("Time (s)") -mne.viz.tight_layout() # %% # Investigate model coefficients @@ -292,7 +287,6 @@ title="Inverse-transformed coefficients\nbetween delays %s and %s" % (time_plot[0], time_plot[1]) ) -mne.viz.tight_layout() # %% # References diff --git a/examples/inverse/label_source_activations.py b/examples/inverse/label_source_activations.py index 599fff4c2f8..035533b4b9a 100644 --- a/examples/inverse/label_source_activations.py +++ b/examples/inverse/label_source_activations.py @@ -62,7 +62,7 @@ # View source activations # ----------------------- -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") t = 1e3 * stc_label.times ax.plot(t, stc_label.data.T, "k", linewidth=0.5, alpha=0.5) pe = [ @@ -81,7 +81,6 @@ xlim=xlim, ylim=ylim, ) -mne.viz.tight_layout() # %% # Using vector solutions @@ -92,7 +91,7 @@ pick_ori = "vector" stc_vec = apply_inverse(evoked, inverse_operator, lambda2, method, pick_ori=pick_ori) data = stc_vec.extract_label_time_course(label, src) -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") stc_vec_label = stc_vec.in_label(label) colors = ["#EE6677", "#228833", "#4477AA"] for ii, name in enumerate("XYZ"): @@ -117,4 +116,3 @@ xlim=xlim, ylim=ylim, ) -mne.viz.tight_layout() diff --git a/examples/inverse/mixed_source_space_inverse.py b/examples/inverse/mixed_source_space_inverse.py index 9baac7da379..f069b5e89ac 100644 --- a/examples/inverse/mixed_source_space_inverse.py +++ b/examples/inverse/mixed_source_space_inverse.py @@ -194,9 +194,8 @@ ) # plot the times series of 2 labels -fig, axes = plt.subplots(1) +fig, axes = plt.subplots(1, layout="constrained") axes.plot(1e3 * stc.times, label_ts[0][0, :], "k", label="bankssts-lh") axes.plot(1e3 * stc.times, label_ts[0][-1, :].T, "r", label="Brain-stem") axes.set(xlabel="Time (ms)", ylabel="MNE current (nAm)") axes.legend() -mne.viz.tight_layout() diff --git a/examples/inverse/source_space_snr.py b/examples/inverse/source_space_snr.py index 12d081f5c61..c7077d091e5 100644 --- a/examples/inverse/source_space_snr.py +++ b/examples/inverse/source_space_snr.py @@ -51,10 +51,9 @@ # Plot an average SNR across source points over time: ave = np.mean(snr_stc.data, axis=0) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.plot(evoked.times, ave) ax.set(xlabel="Time (s)", ylabel="SNR MEG-EEG") -fig.tight_layout() # Find time point of maximum SNR maxidx = np.argmax(ave) diff --git a/examples/preprocessing/eeg_bridging.py b/examples/preprocessing/eeg_bridging.py index d95ac709513..30cdde8502b 100644 --- a/examples/preprocessing/eeg_bridging.py +++ b/examples/preprocessing/eeg_bridging.py @@ -88,7 +88,7 @@ bridged_idx, ed_matrix = ed_data[6] -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), layout="constrained") fig.suptitle("Subject 6 Electrical Distance Matrix") # take median across epochs, only use upper triangular, lower is NaNs @@ -110,8 +110,6 @@ ax.set_xlabel("Channel Index") ax.set_ylabel("Channel Index") -fig.tight_layout() - # %% # Examine the Distribution of Electrical Distances # ------------------------------------------------ @@ -208,7 +206,7 @@ # reflect neural or at least anatomical differences as well (i.e. the # distance from the sensors to the brain). -fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4)) +fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), layout="constrained") fig.suptitle("Electrical Distance Distribution for EEGBCI Subjects") for ax in (ax1, ax2): ax.set_ylabel("Count") @@ -229,7 +227,6 @@ ax1.axvspan(0, 30, color="r", alpha=0.5) ax2.legend(loc=(1.04, 0)) -fig.subplots_adjust(right=0.725, bottom=0.15, wspace=0.4) # %% # For the group of subjects, let's look at their electrical distances diff --git a/examples/preprocessing/eeg_csd.py b/examples/preprocessing/eeg_csd.py index dffe94e3f1e..892f856e75e 100644 --- a/examples/preprocessing/eeg_csd.py +++ b/examples/preprocessing/eeg_csd.py @@ -78,8 +78,7 @@ # CSD has parameters ``stiffness`` and ``lambda2`` affecting smoothing and # spline flexibility, respectively. Let's see how they affect the solution: -fig, ax = plt.subplots(4, 4) -fig.subplots_adjust(hspace=0.5) +fig, ax = plt.subplots(4, 4, layout="constrained") fig.set_size_inches(10, 10) for i, lambda2 in enumerate([0, 1e-7, 1e-5, 1e-3]): for j, m in enumerate([5, 4, 3, 2]): diff --git a/examples/preprocessing/eog_artifact_histogram.py b/examples/preprocessing/eog_artifact_histogram.py index 5aa209228d7..2d51370b571 100644 --- a/examples/preprocessing/eog_artifact_histogram.py +++ b/examples/preprocessing/eog_artifact_histogram.py @@ -50,7 +50,6 @@ # %% # Plot EOG artifact distribution -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.stem(1e3 * epochs.times, data) ax.set(xlabel="Times (ms)", ylabel="Blink counts (from %s trials)" % len(epochs)) -fig.tight_layout() diff --git a/examples/preprocessing/eog_regression.py b/examples/preprocessing/eog_regression.py index 6c88cb01d9a..2123974dde4 100644 --- a/examples/preprocessing/eog_regression.py +++ b/examples/preprocessing/eog_regression.py @@ -69,10 +69,9 @@ epochs_after = mne.Epochs(raw_clean, events, event_id, tmin, tmax, baseline=(tmin, 0)) evoked_after = epochs_after.average() -fig, ax = plt.subplots(nrows=3, ncols=2, figsize=(10, 7), sharex=True, sharey="row") +fig, ax = plt.subplots( + nrows=3, ncols=2, figsize=(10, 7), sharex=True, sharey="row", layout="constrained" +) evoked_before.plot(axes=ax[:, 0], spatial_colors=True) evoked_after.plot(axes=ax[:, 1], spatial_colors=True) -fig.subplots_adjust( - top=0.905, bottom=0.09, left=0.08, right=0.975, hspace=0.325, wspace=0.145 -) fig.suptitle("Before --> After") diff --git a/examples/preprocessing/shift_evoked.py b/examples/preprocessing/shift_evoked.py index 3cd70715ac8..c16becc679c 100644 --- a/examples/preprocessing/shift_evoked.py +++ b/examples/preprocessing/shift_evoked.py @@ -14,7 +14,6 @@ import matplotlib.pyplot as plt import mne -from mne.viz import tight_layout from mne.datasets import sample print(__doc__) @@ -60,5 +59,3 @@ titles=dict(grad="Absolute shift: 500 ms"), time_unit="s", ) - -tight_layout() diff --git a/examples/simulation/plot_stc_metrics.py b/examples/simulation/plot_stc_metrics.py index 750dcab0c21..105c66d7e12 100644 --- a/examples/simulation/plot_stc_metrics.py +++ b/examples/simulation/plot_stc_metrics.py @@ -234,7 +234,7 @@ ] # Plot the results -f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex="col", constrained_layout=True) +f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, sharex="col", layout="constrained") for ax, (title, results) in zip([ax1, ax2, ax3, ax4], region_results.items()): ax.plot(thresholds, results, ".-") ax.set(title=title, ylabel="score", xlabel="Threshold", xticks=thresholds) @@ -243,7 +243,7 @@ ax1.ticklabel_format(axis="y", style="sci", scilimits=(0, 1)) # tweak RLE # Cosine score with respect to time -f, ax1 = plt.subplots(constrained_layout=True) +f, ax1 = plt.subplots(layout="constrained") ax1.plot(stc_true_region.times, cosine_score(stc_true_region, stc_est_region)) ax1.set(title="Cosine score", xlabel="Time", ylabel="Score") @@ -277,6 +277,6 @@ # Plot the results for name, results in dipole_results.items(): - f, ax1 = plt.subplots(constrained_layout=True) + f, ax1 = plt.subplots(layout="constrained") ax1.plot(thresholds, 100 * np.array(results), ".-") ax1.set(title=name, ylabel="Error (cm)", xlabel="Threshold", xticks=thresholds) diff --git a/examples/time_frequency/source_label_time_frequency.py b/examples/time_frequency/source_label_time_frequency.py index da3af06e4dc..2e7cc4d3592 100644 --- a/examples/time_frequency/source_label_time_frequency.py +++ b/examples/time_frequency/source_label_time_frequency.py @@ -76,8 +76,7 @@ # subtract the evoked response in order to exclude evoked activity epochs_induced = epochs.copy().subtract_evoked() -plt.close("all") - +fig, axes = plt.subplots(2, 2, layout="constrained") for ii, (this_epochs, title) in enumerate( zip([epochs, epochs_induced], ["evoked + induced", "induced only"]) ): @@ -99,9 +98,8 @@ ########################################################################## # View time-frequency plots - plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43) - plt.subplot(2, 2, 2 * ii + 1) - plt.imshow( + ax = axes[ii, 0] + ax.imshow( 20 * power, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect="auto", @@ -110,13 +108,10 @@ vmax=30.0, cmap="RdBu_r", ) - plt.xlabel("Time (s)") - plt.ylabel("Frequency (Hz)") - plt.title("Power (%s)" % title) - plt.colorbar() + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=f"Power ({title})") - plt.subplot(2, 2, 2 * ii + 2) - plt.imshow( + ax = axes[ii, 1] + ax.imshow( itc, extent=[times[0], times[-1], freqs[0], freqs[-1]], aspect="auto", @@ -125,9 +120,5 @@ vmax=0.7, cmap="RdBu_r", ) - plt.xlabel("Time (s)") - plt.ylabel("Frequency (Hz)") - plt.title("ITC (%s)" % title) - plt.colorbar() - -plt.show() + ax.set(xlabel="Time (s)", ylabel="Frequency (Hz)", title=f"ITC ({title})") + fig.colorbar(ax.images[0], ax=axes[ii]) diff --git a/examples/time_frequency/source_power_spectrum_opm.py b/examples/time_frequency/source_power_spectrum_opm.py index 14fcfa7039f..ce2ad03f607 100644 --- a/examples/time_frequency/source_power_spectrum_opm.py +++ b/examples/time_frequency/source_power_spectrum_opm.py @@ -84,7 +84,6 @@ .plot(picks="data", exclude="bads") ) fig.suptitle(titles[kind]) - fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) ############################################################################## # Alignment and forward diff --git a/examples/time_frequency/time_frequency_simulated.py b/examples/time_frequency/time_frequency_simulated.py index 46747b6ae69..c6f00a9da32 100644 --- a/examples/time_frequency/time_frequency_simulated.py +++ b/examples/time_frequency/time_frequency_simulated.py @@ -100,7 +100,7 @@ freqs = np.arange(5.0, 100.0, 3.0) vmin, vmax = -3.0, 3.0 # Define our color limits. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") for n_cycles, time_bandwidth, ax, title in zip( [freqs / 2, freqs, freqs / 2], # number of cycles [2.0, 4.0, 8.0], # time bandwidth @@ -130,7 +130,6 @@ show=False, colorbar=False, ) -plt.tight_layout() ############################################################################## # Stockwell (S) transform @@ -143,7 +142,7 @@ # we control the spectral / temporal resolution by specifying different widths # of the gaussian window using the ``width`` parameter. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") fmin, fmax = freqs[[0, -1]] for width, ax in zip((0.2, 0.7, 3.0), axs): power = tfr_stockwell(epochs, fmin=fmin, fmax=fmax, width=width) @@ -151,7 +150,6 @@ [0], baseline=(0.0, 0.1), mode="mean", axes=ax, show=False, colorbar=False ) ax.set_title("Sim: Using S transform, width = {:0.1f}".format(width)) -plt.tight_layout() # %% # Morlet Wavelets @@ -162,7 +160,7 @@ # temporal resolution with the ``n_cycles`` parameter, which defines the # number of cycles to include in the window. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") all_n_cycles = [1, 3, freqs / 2.0] for n_cycles, ax in zip(all_n_cycles, axs): power = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, return_itc=False) @@ -178,7 +176,6 @@ ) n_cycles = "scaled by freqs" if not isinstance(n_cycles, int) else n_cycles ax.set_title(f"Sim: Using Morlet wavelet, n_cycles = {n_cycles}") -plt.tight_layout() # %% # Narrow-bandpass Filter and Hilbert Transform @@ -189,7 +186,7 @@ # important so that you isolate only one oscillation of interest, generally # the width of this filter is recommended to be about 2 Hz. -fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True) +fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True, layout="constrained") bandwidths = [1.0, 2.0, 4.0] for bandwidth, ax in zip(bandwidths, axs): data = np.zeros((len(ch_names), freqs.size, epochs.times.size), dtype=complex) @@ -233,7 +230,6 @@ f"bandwidth = {bandwidth}, " f"transition bandwidth = {4 * bandwidth}" ) -plt.tight_layout() # %% # Calculating a TFR without averaging over epochs @@ -277,12 +273,9 @@ ) # Baseline the output rescale(power, epochs.times, (0.0, 0.1), mode="mean", copy=False) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") x, y = centers_to_edges(epochs.times * 1000, freqs) mesh = ax.pcolormesh(x, y, power[0], cmap="RdBu_r", vmin=vmin, vmax=vmax) ax.set_title("TFR calculated on a numpy array") ax.set(ylim=freqs[[0, -1]], xlabel="Time (ms)") fig.colorbar(mesh) -plt.tight_layout() - -plt.show() diff --git a/examples/visualization/3d_to_2d.py b/examples/visualization/3d_to_2d.py index 590cc9df639..966e97f76ac 100644 --- a/examples/visualization/3d_to_2d.py +++ b/examples/visualization/3d_to_2d.py @@ -129,8 +129,7 @@ lt = mne.channels.read_layout(layout_path / layout_name, scale=False) x = lt.pos[:, 0] * float(im.shape[1]) y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.imshow(im) ax.scatter(x, y, s=80, color="r") -fig.tight_layout() ax.set_axis_off() diff --git a/examples/visualization/evoked_topomap.py b/examples/visualization/evoked_topomap.py index 1497e91bda8..dfd6be7f0f3 100644 --- a/examples/visualization/evoked_topomap.py +++ b/examples/visualization/evoked_topomap.py @@ -94,7 +94,7 @@ # and ``'head'`` otherwise. Here we show each option: extrapolations = ["local", "head", "box"] -fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3) +fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3, layout="constrained") # Here we look at EEG channels, and use a custom head sphere to get all the # sensors to be well within the drawn head surface @@ -111,7 +111,6 @@ sphere=(0.0, 0.0, 0.0, 0.09), ) ax.set_title("%s %s" % (ch_type.upper(), extr), fontsize=14) -fig.tight_layout() # %% # More advanced usage @@ -123,7 +122,6 @@ fig = evoked.plot_topomap( 0.1, ch_type="mag", show_names=True, colorbar=False, size=6, res=128 ) -fig.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88) fig.suptitle("Auditory response") # %% diff --git a/mne/conftest.py b/mne/conftest.py index c1e6b36a93b..a0eeaf18dfb 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -33,7 +33,6 @@ Bunch, _check_qt_version, _TempDir, - check_version, ) # data from sample dataset @@ -84,6 +83,7 @@ def pytest_configure(config): "slowtest", "ultraslowtest", "pgtest", + "pvtest", "allow_unclosed", "allow_unclosed_pyside2", ): @@ -104,6 +104,13 @@ def pytest_configure(config): if os.getenv("PYTEST_QT_API") is None and os.getenv("QT_API") is not None: os.environ["PYTEST_QT_API"] = os.environ["QT_API"] + # suppress: + # Debugger warning: It seems that frozen modules are being used, which may + # make the debugger miss breakpoints. Please pass -Xfrozen_modules=off + # to python to disable frozen modules. + if os.getenv("PYDEVD_DISABLE_FILE_VALIDATION") is None: + os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1" + # https://numba.readthedocs.io/en/latest/reference/deprecation.html#deprecation-of-old-style-numba-captured-errors # noqa: E501 if "NUMBA_CAPTURED_ERRORS" not in os.environ: os.environ["NUMBA_CAPTURED_ERRORS"] = "new_style" @@ -514,8 +521,9 @@ def pg_backend(request, garbage_collect): import mne_qt_browser mne_qt_browser._browser_instances.clear() - if check_version("mne_qt_browser", min_version="0.4"): - _assert_no_instances(MNEQtBrowser, f"Closure of {request.node.name}") + if not _test_passed(request): + return + _assert_no_instances(MNEQtBrowser, f"Closure of {request.node.name}") @pytest.fixture( @@ -541,35 +549,35 @@ def browser_backend(request, garbage_collect, monkeypatch): mne_qt_browser._browser_instances.clear() -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer(request, options_3d, garbage_collect): """Yield the 3D backends.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_pyvistaqt(request, options_3d, garbage_collect): """Yield the PyVista backend.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["notebook"]) +@pytest.fixture(params=[pytest.param("notebook", marks=pytest.mark.pvtest)]) def renderer_notebook(request, options_3d): """Yield the 3D notebook renderer.""" with _use_backend(request.param, interactive=False) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_interactive_pyvistaqt(request, options_3d, qt_windows_closed): """Yield the interactive PyVista backend.""" with _use_backend(request.param, interactive=True) as renderer: yield renderer -@pytest.fixture(params=["pyvistaqt"]) +@pytest.fixture(params=[pytest.param("pyvistaqt", marks=pytest.mark.pvtest)]) def renderer_interactive(request, options_3d): """Yield the interactive 3D backends.""" with _use_backend(request.param, interactive=True) as renderer: @@ -872,6 +880,14 @@ def protect_config(): yield +def _test_passed(request): + try: + outcome = request.node.harvest_rep_call + except Exception: + outcome = "passed" + return outcome == "passed" + + @pytest.fixture() def brain_gc(request): """Ensure that brain can be properly garbage collected.""" @@ -897,11 +913,7 @@ def brain_gc(request): yield close_func() # no need to warn if the test itself failed, pytest-harvest helps us here - try: - outcome = request.node.harvest_rep_call - except Exception: - outcome = "failed" - if outcome != "passed": + if not _test_passed(request): return _assert_no_instances(Brain, "after") # Check VTK diff --git a/mne/preprocessing/eyetracking/calibration.py b/mne/preprocessing/eyetracking/calibration.py index 962299f3a84..1891ebacb30 100644 --- a/mne/preprocessing/eyetracking/calibration.py +++ b/mne/preprocessing/eyetracking/calibration.py @@ -147,7 +147,7 @@ def plot(self, show_offsets=True, axes=None, show=True): ax = axes fig = ax.get_figure() else: # create new figure and axes - fig, ax = plt.subplots(constrained_layout=True) + fig, ax = plt.subplots(layout="constrained") px, py = self["positions"].T gaze_x, gaze_y = self["gaze"].T diff --git a/mne/preprocessing/ica.py b/mne/preprocessing/ica.py index 15c1d286d6e..fdb7d920267 100644 --- a/mne/preprocessing/ica.py +++ b/mne/preprocessing/ica.py @@ -3366,7 +3366,6 @@ def corrmap( template=True, sphere=sphere, ) - template_fig.subplots_adjust(top=0.8) template_fig.canvas.draw() # first run: use user-selected map diff --git a/mne/report/report.py b/mne/report/report.py index 89154d3de76..faf12a79bd6 100644 --- a/mne/report/report.py +++ b/mne/report/report.py @@ -78,7 +78,7 @@ ) from ..viz._brain.view import views_dicts from ..viz.misc import _plot_mri_contours, _get_bem_plotting_surfaces -from ..viz.utils import _ndarray_to_fig, tight_layout +from ..viz.utils import _ndarray_to_fig from ..viz._scraper import _mne_qt_browser_screenshot from ..forward import read_forward_solution, Forward from ..epochs import read_epochs, BaseEpochs @@ -431,11 +431,6 @@ def _fig_to_img(fig, *, image_format="png", own_figure=True): # matplotlib modifies the passed dict, which is a bug mpl_kwargs["pil_kwargs"] = pil_kwargs.copy() with warnings.catch_warnings(): - warnings.filterwarnings( - action="ignore", - message=".*Axes that are not compatible with tight_layout.*", - category=UserWarning, - ) fig.savefig(output, format=image_format, dpi=dpi, **mpl_kwargs) if own_figure: @@ -1648,7 +1643,6 @@ def _add_ica_overlay(self, *, ica, inst, image_format, section, tags, replace): fig = ica.plot_overlay(inst=inst_, show=False, on_baseline="reapply") del inst_ - tight_layout(fig=fig) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) self._add_figure( fig=fig, @@ -1770,9 +1764,6 @@ def _add_ica_components(self, *, ica, picks, image_format, section, tags, replac if not isinstance(figs, list): figs = [figs] - for fig in figs: - tight_layout(fig=fig) - title = "ICA component topographies" if len(figs) == 1: fig = figs[0] @@ -3241,7 +3232,6 @@ def _add_raw( init_kwargs.setdefault("fmax", fmax) plot_kwargs.setdefault("show", False) fig = raw.compute_psd(**init_kwargs).plot(**plot_kwargs) - tight_layout(fig=fig) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) self._add_figure( fig=fig, @@ -3323,7 +3313,6 @@ def _add_projs( # hard to see how (6, 4) could work in all number-of-projs by # number-of-channel-types conditions... fig.set_size_inches((6, 4)) - tight_layout(fig=fig) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) self._add_figure( fig=fig, @@ -3488,6 +3477,7 @@ def _plot_one_evoked_topomap_timepoint( len(ch_types) * 2, gridspec_kw={"width_ratios": [8, 0.5] * len(ch_types)}, figsize=(2.5 * len(ch_types), 2), + layout="constrained", ) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) ch_type_ax_map = dict( @@ -3508,8 +3498,6 @@ def _plot_one_evoked_topomap_timepoint( ) ch_type_ax_map[ch_type][0].set_title(ch_type) - tight_layout(fig=fig) - with BytesIO() as buff: fig.savefig(buff, format="png", pad_inches=0) plt.close(fig) @@ -3616,7 +3604,7 @@ def _add_evoked_gfp( import matplotlib.pyplot as plt - fig, ax = plt.subplots(len(ch_types), 1, sharex=True) + fig, ax = plt.subplots(len(ch_types), 1, sharex=True, layout="constrained") if len(ch_types) == 1: ax = [ax] for idx, ch_type in enumerate(ch_types): @@ -3636,7 +3624,6 @@ def _add_evoked_gfp( if idx < len(ch_types) - 1: ax[idx].set_xlabel(None) - tight_layout(fig=fig) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) title = "Global field power" self._add_figure( @@ -3655,7 +3642,6 @@ def _add_evoked_whitened( ): """Render whitened evoked.""" fig = evoked.plot_white(noise_cov=noise_cov, show=False) - tight_layout(fig=fig) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) title = "Whitened" @@ -4003,7 +3989,6 @@ def _add_epochs( fig = epochs.plot_drop_log( subject=self.subject, ignore=drop_log_ignore, show=False ) - tight_layout(fig=fig) _constrain_fig_resolution( fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES ) @@ -4179,18 +4164,17 @@ def _add_stc( if backend_is_3d: brain.set_time(t) - fig, ax = plt.subplots(figsize=(4.5, 4.5)) + fig, ax = plt.subplots(figsize=(4.5, 4.5), layout="constrained") ax.imshow(brain.screenshot(time_viewer=True, mode="rgb")) ax.axis("off") - tight_layout(fig=fig) _constrain_fig_resolution( fig, max_width=stc_plot_kwargs["size"][0], max_res=MAX_IMG_RES ) figs.append(fig) plt.close(fig) else: - fig_lh = plt.figure() - fig_rh = plt.figure() + fig_lh = plt.figure(layout="constrained") + fig_rh = plt.figure(layout="constrained") brain_lh = stc.plot( views="lat", @@ -4210,8 +4194,6 @@ def _add_stc( backend="matplotlib", figure=fig_rh, ) - tight_layout(fig=fig_lh) # TODO is this necessary? - tight_layout(fig=fig_rh) # TODO is this necessary? _constrain_fig_resolution( fig_lh, max_width=stc_plot_kwargs["size"][0], diff --git a/mne/time_frequency/spectrum.py b/mne/time_frequency/spectrum.py index 52ca167ee6c..1fc2c6ce2bd 100644 --- a/mne/time_frequency/spectrum.py +++ b/mne/time_frequency/spectrum.py @@ -742,7 +742,6 @@ def plot( sphere=sphere, xlabels_list=xlabels_list, ) - fig.subplots_adjust(hspace=0.3) plt_show(show, fig) return fig diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 1a061b8b173..83445a64690 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -70,7 +70,6 @@ figure_nobar, plt_show, _setup_cmap, - _connection_line, _prepare_joint_axes, _setup_vmin_vmax, _set_title_multiple_electrodes, @@ -141,7 +140,7 @@ def morlet(sfreq, freqs, n_cycles=7.0, sigma=None, zero_mean=False): s = w * sfreq / (2 * freq * np.pi) # from SciPy docs wavelet_sp = sp_morlet(M, s, w) * np.sqrt(2) # match our normalization - _, ax = plt.subplots(constrained_layout=True) + _, ax = plt.subplots(layout="constrained") colors = { ('MNE', 'real'): '#66CCEE', ('SciPy', 'real'): '#4477AA', @@ -1732,7 +1731,7 @@ def _plot( elif isinstance(axes, plt.Axes): figs_and_axes = [(ax.get_figure(), ax) for ax in [axes]] elif axes is None: - figs = [plt.figure() for i in range(n_picks)] + figs = [plt.figure(layout="constrained") for i in range(n_picks)] figs_and_axes = [(fig, fig.add_subplot(111)) for fig in figs] else: raise ValueError("axes must be None, plt.Axes, or list " "of plt.Axes.") @@ -1921,7 +1920,7 @@ def plot_joint( .. versionadded:: 0.16.0 """ # noqa: E501 - import matplotlib.pyplot as plt + from matplotlib.patches import ConnectionPatch ##################################### # Handle channels (picks and types) # @@ -2007,7 +2006,7 @@ def plot_joint( # Image plot # ############## - fig, tf_ax, map_ax, cbar_ax = _prepare_joint_axes(n_timefreqs) + fig, tf_ax, map_ax = _prepare_joint_axes(n_timefreqs) cmap = _setup_cmap(cmap) @@ -2162,28 +2161,32 @@ def plot_joint( ############# # Finish up # ############# - if colorbar: from matplotlib import ticker - cbar = plt.colorbar(ax.images[0], cax=cbar_ax) + cbar = fig.colorbar(ax.images[0]) if locator is None: locator = ticker.MaxNLocator(nbins=5) cbar.locator = locator cbar.update_ticks() - plt.subplots_adjust( - left=0.12, right=0.925, bottom=0.14, top=1.0 if title is not None else 1.2 - ) - # draw the connection lines between time series and topoplots - lines = [ - _connection_line( - time_, fig, tf_ax, map_ax_, y=freq_, y_source_transform="transData" + for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax): + con = ConnectionPatch( + xyA=[time_, freq_], + xyB=[0.5, 0], + coordsA="data", + coordsB="axes fraction", + axesA=tf_ax, + axesB=map_ax_, + color="grey", + linestyle="-", + linewidth=1.5, + alpha=0.66, + zorder=1, + clip_on=False, ) - for (time_, freq_), map_ax_ in zip(timefreqs_array, map_ax) - ] - fig.lines.extend(lines) + fig.add_artist(con) plt_show(show) return fig @@ -2289,7 +2292,6 @@ def _onselect( axes=ax, ) ax.set_title(ch_type) - fig.tight_layout() @verbose def plot_topo( diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index ce99f2e6352..680d52022b5 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -88,7 +88,6 @@ _get_color_list, _get_cmap, plt_show, - tight_layout, figure_nobar, _check_time_unit, ) @@ -314,7 +313,9 @@ def plot_head_positions( from mpl_toolkits.mplot3d.art3d import Line3DCollection from mpl_toolkits.mplot3d import Axes3D # noqa: F401, analysis:ignore - fig, ax = plt.subplots(1, subplot_kw=dict(projection="3d")) + fig, ax = plt.subplots( + 1, subplot_kw=dict(projection="3d"), layout="constrained" + ) # First plot the trajectory as a colormap: # http://matplotlib.org/examples/pylab_examples/multicolored_line.html @@ -374,7 +375,6 @@ def plot_head_positions( ax.set(xlabel="x", ylabel="y", zlabel="z", xlim=xlim, ylim=ylim, zlim=zlim) _set_aspect_equal(ax) ax.view_init(30, 45) - tight_layout(fig=fig) plt_show(show) return fig @@ -1901,7 +1901,7 @@ def _key_pressed_slider(event, params): time_viewer.slider.set_val(this_time) -def _smooth_plot(this_time, params): +def _smooth_plot(this_time, params, *, draw=True): """Smooth source estimate data and plot with mpl.""" from ..morph import _hemi_morph @@ -1957,7 +1957,8 @@ def _smooth_plot(this_time, params): _set_aspect_equal(ax) ax.axis("off") ax.set(xlim=[-80, 80], ylim=(-80, 80), zlim=[-80, 80]) - ax.figure.canvas.draw() + if draw: + ax.figure.canvas.draw() def _plot_mpl_stc( @@ -2022,7 +2023,8 @@ def _plot_mpl_stc( del transparent, mapdata time_label, times = _handle_time(time_label, time_unit, stc.times) - fig = plt.figure(figsize=(6, 6)) if figure is None else figure + # don't use constrained layout because Axes3D does not play well with it + fig = plt.figure(figsize=(6, 6), layout=None) if figure is None else figure try: ax = Axes3D(fig, auto_add_to_figure=False) except Exception: # old mpl @@ -2072,7 +2074,7 @@ def _plot_mpl_stc( time_label=time_label, time_unit=time_unit, ) - _smooth_plot(initial_time, params) + _smooth_plot(initial_time, params, draw=False) ax.view_init(**kwargs[hemi][views]) @@ -2100,7 +2102,6 @@ def _plot_mpl_stc( callback_key = partial(_key_pressed_slider, params=params) time_viewer.canvas.mpl_connect("key_press_event", callback_key) - time_viewer.subplots_adjust(left=0.12, bottom=0.05, right=0.75, top=0.95) fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0) # add colorbar @@ -2932,7 +2933,7 @@ def _onclick(event, params, verbose=None): del ijk # Plot initial figure - fig, (axes, ax_time) = plt.subplots(2) + fig, (axes, ax_time) = plt.subplots(2, layout="constrained") axes.set(xticks=[], yticks=[]) marker = "o" if len(stc.times) == 1 else None ydata = stc.data[loc_idx] @@ -2943,7 +2944,6 @@ def _onclick(event, params, verbose=None): vert_legend = ax_time.legend([h], [""], title="Vertex") _update_vertlabel(loc_idx) lx = ax_time.axvline(stc.times[time_idx], color="g") - fig.tight_layout() allow_pos_lims = mode != "glass_brain" mapdata = _process_clim(clim, colormap, transparent, stc.data, allow_pos_lims) @@ -3390,7 +3390,7 @@ def plot_sparse_source_estimates( ) # Show time courses - fig = plt.figure(fig_number) + fig = plt.figure(fig_number, layout="constrained") fig.clf() ax = fig.add_subplot(111) @@ -3757,7 +3757,9 @@ def _plot_dipole_mri_orthoview( dims = len(data) # Symmetric size assumed. dd = dims // 2 if ax is None: - fig, ax = plt.subplots(1, subplot_kw=dict(projection="3d")) + fig, ax = plt.subplots( + 1, subplot_kw=dict(projection="3d"), layout="constrained" + ) else: _validate_type(ax, Axes3D, "ax", "Axes3D", extra='when mode is "orthoview"') fig = ax.get_figure() diff --git a/mne/viz/__init__.pyi b/mne/viz/__init__.pyi index e73226b6909..b709ebc2a05 100644 --- a/mne/viz/__init__.pyi +++ b/mne/viz/__init__.pyi @@ -82,7 +82,6 @@ __all__ = [ "set_3d_view", "set_browser_backend", "snapshot_brain_montage", - "tight_layout", "ui_events", "use_3d_backend", "use_browser_backend", @@ -149,7 +148,6 @@ from .topomap import ( plot_regression_weights, ) from .utils import ( - tight_layout, mne_analyze_colormap, compare_fiff, ClickableImage, diff --git a/mne/viz/_dipole.py b/mne/viz/_dipole.py index 64ab5774ba4..24fc4735f3c 100644 --- a/mne/viz/_dipole.py +++ b/mne/viz/_dipole.py @@ -53,9 +53,7 @@ def _plot_dipole_mri_outlines( _validate_type(surf, (str, None), "surf") _check_option("surf", surf, ("white", "pial", None)) if ax is None: - _, ax = plt.subplots( - 1, 3, figsize=(7, 2.5), squeeze=True, constrained_layout=True - ) + _, ax = plt.subplots(1, 3, figsize=(7, 2.5), squeeze=True, layout="constrained") _validate_if_list_of_axes(ax, 3, name="ax") dipoles = _check_concat_dipoles(dipoles) color = "r" if color is None else color diff --git a/mne/viz/_figure.py b/mne/viz/_figure.py index 82359f585ed..738bf838ce3 100644 --- a/mne/viz/_figure.py +++ b/mne/viz/_figure.py @@ -535,7 +535,7 @@ def _create_epoch_image_fig(self, pick): title = f"Epochs image ({ch_name})" fig = self._new_child_figure(figsize=(6, 4), fig_name=None, window_title=title) fig.suptitle = title - gs = GridSpec(nrows=3, ncols=10) + gs = GridSpec(nrows=3, ncols=10, figure=fig) fig.add_subplot(gs[:2, :9]) fig.add_subplot(gs[2, :9]) fig.add_subplot(gs[:2, 9]) @@ -580,16 +580,6 @@ def _create_epoch_histogram(self): ax.plot((reject, reject), (0, ax.get_ylim()[1]), color="r") # finalize fig.suptitle(title, y=0.99) - if hasattr(fig, "_inch_to_rel"): - kwargs = dict( - bottom=fig._inch_to_rel(0.5, horiz=False), - top=1 - fig._inch_to_rel(0.5, horiz=False), - left=fig._inch_to_rel(0.75), - right=1 - fig._inch_to_rel(0.25), - ) - else: - kwargs = dict() - fig.subplots_adjust(hspace=0.7, **kwargs) self.mne.fig_histogram = fig return fig diff --git a/mne/viz/_mpl_figure.py b/mne/viz/_mpl_figure.py index 2974df90958..c313bfe1edf 100644 --- a/mne/viz/_mpl_figure.py +++ b/mne/viz/_mpl_figure.py @@ -118,7 +118,7 @@ def __init__(self, **kwargs): for key in [k for k in kwargs if not hasattr(self.mne, k)]: setattr(self.mne, key, kwargs[key]) - def _close(self, event): + def _close(self, event=None): """Handle close events.""" logger.debug(f"Closing {self!r}") # remove references from parent fig to child fig @@ -886,9 +886,15 @@ def _create_ch_context_fig(self, idx): fig = super()._create_ch_context_fig(idx) plt_show(fig=fig) - def _new_child_figure(self, fig_name, **kwargs): + def _new_child_figure(self, fig_name, *, layout=None, **kwargs): """Instantiate a new MNE dialog figure (with event listeners).""" - fig = _figure(toolbar=False, parent_fig=self, fig_name=fig_name, **kwargs) + fig = _figure( + toolbar=False, + parent_fig=self, + fig_name=fig_name, + layout=layout, + **kwargs, + ) fig._add_default_callbacks() self.mne.child_figs.append(fig) if isinstance(fig_name, str): @@ -2324,8 +2330,8 @@ def _get_scale_bar_texts(self): class MNELineFigure(MNEFigure): """Interactive figure for non-scrolling line plots.""" - def __init__(self, inst, n_axes, figsize, **kwargs): - super().__init__(figsize=figsize, inst=inst, **kwargs) + def __init__(self, inst, n_axes, figsize, *, layout=None, **kwargs): + super().__init__(figsize=figsize, inst=inst, layout=layout, **kwargs) # AXES: default margins (inches) l_margin = 0.8 @@ -2372,6 +2378,8 @@ def _figure(toolbar=True, FigureClass=MNEFigure, **kwargs): from matplotlib import rc_context title = kwargs.pop("window_title", None) # extract title before init + if "layout" not in kwargs: + kwargs["layout"] = "constrained" rc = dict() if toolbar else dict(toolbar="none") with rc_context(rc=rc): fig = plt.figure(FigureClass=FigureClass, **kwargs) @@ -2379,6 +2387,14 @@ def _figure(toolbar=True, FigureClass=MNEFigure, **kwargs): fig.mne.backend = BACKEND if title is not None: _set_window_title(fig, title) + # TODO: for some reason for topomaps->_prepare_trellis the layout=constrained does + # not work the first time (maybe toolbar=False?) + if kwargs.get("layout") == "constrained": + if hasattr(fig, "set_layout_engine"): # 3.6+ + fig.set_layout_engine("constrained") + else: + fig.set_constrained_layout(True) + # add event callbacks fig._add_default_callbacks() return fig @@ -2409,6 +2425,7 @@ def _line_figure(inst, axes=None, picks=None, **kwargs): FigureClass=MNELineFigure, figsize=figsize, n_axes=n_axes, + layout=None, **kwargs, ) fig.mne.fig_size_px = fig._get_size_px() # can't do in __init__ @@ -2483,7 +2500,7 @@ def _init_browser(**kwargs): """Instantiate a new MNE browse-style figure.""" from mne.io import BaseRaw - fig = _figure(toolbar=False, FigureClass=MNEBrowseFigure, **kwargs) + fig = _figure(toolbar=False, FigureClass=MNEBrowseFigure, layout=None, **kwargs) # splash is ignored (maybe we could do it for mpl if we get_backend() and # check if it's Qt... but seems overkill) diff --git a/mne/viz/_proj.py b/mne/viz/_proj.py index 5a40df7dc03..0f8f02a3089 100644 --- a/mne/viz/_proj.py +++ b/mne/viz/_proj.py @@ -102,7 +102,7 @@ def plot_projs_joint( n_row = len(ch_types) shape = (n_row, n_col) fig = plt.figure( - figsize=(n_col * 1.1 + 0.5, n_row * 1.8 + 0.5), constrained_layout=True + figsize=(n_col * 1.1 + 0.5, n_row * 1.8 + 0.5), layout="constrained" ) ri = 0 # pick some sufficiently distinct colors (6 per proj type, e.g., ECG, diff --git a/mne/viz/backends/_abstract.py b/mne/viz/backends/_abstract.py index c2c3e08eb2b..e924e7deae9 100644 --- a/mne/viz/backends/_abstract.py +++ b/mne/viz/backends/_abstract.py @@ -7,10 +7,8 @@ # License: Simplified BSD from abc import ABC, abstractmethod, abstractclassmethod -from contextlib import nullcontext import warnings -from ..utils import tight_layout from ..ui_events import publish, TimeChange @@ -1333,19 +1331,10 @@ def _mpl_initialize(): class _AbstractMplCanvas(ABC): def __init__(self, width, height, dpi): """Initialize the MplCanvas.""" - from matplotlib import rc_context from matplotlib.figure import Figure - # prefer constrained layout here but live with tight_layout otherwise - context = nullcontext self._extra_events = ("resize",) - try: - context = rc_context({"figure.constrained_layout.use": True}) - self._extra_events = () - except KeyError: - pass - with context: - self.fig = Figure(figsize=(width, height), dpi=dpi) + self.fig = Figure(figsize=(width, height), dpi=dpi, layout="constrained") self.axes = self.fig.add_subplot(111) self.axes.set(xlabel="Time (s)", ylabel="Activation (AU)") self.manager = None @@ -1408,7 +1397,7 @@ def clear(self): def on_resize(self, event): """Handle resize events.""" - tight_layout(fig=self.axes.figure) + pass class _AbstractBrainMplCanvas(_AbstractMplCanvas): diff --git a/mne/viz/backends/tests/test_utils.py b/mne/viz/backends/tests/test_utils.py index 3bec2aafcc9..cfa0c65535f 100644 --- a/mne/viz/backends/tests/test_utils.py +++ b/mne/viz/backends/tests/test_utils.py @@ -7,6 +7,7 @@ from colorsys import rgb_to_hls from contextlib import nullcontext +import platform import numpy as np import pytest @@ -79,6 +80,8 @@ def test_theme_colors(pg_backend, theme, monkeypatch, tmp_path): return # we could add a ton of conditionals below, but KISS is_dark = _qt_is_dark(fig) # on Darwin these checks get complicated, so don't bother for now + if platform.system() == "Darwin": + pytest.skip("Problems on macOS") if theme == "dark": assert is_dark, theme elif theme == "light": diff --git a/mne/viz/circle.py b/mne/viz/circle.py index af160141741..983eef69c5c 100644 --- a/mne/viz/circle.py +++ b/mne/viz/circle.py @@ -212,7 +212,7 @@ def _plot_connectivity_circle( # Use a polar axes if ax is None: - fig = plt.figure(figsize=(8, 8), facecolor=facecolor) + fig = plt.figure(figsize=(8, 8), facecolor=facecolor, layout="constrained") ax = fig.add_subplot(polar=True) else: fig = ax.figure diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index d173c80a45b..7bd1785ada9 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -13,7 +13,6 @@ from collections import Counter from copy import deepcopy -import warnings import numpy as np from scipy.ndimage import gaussian_filter1d @@ -31,7 +30,6 @@ _VALID_CHANNEL_TYPES, ) from .utils import ( - tight_layout, _setup_vmin_vmax, plt_show, _check_cov, @@ -453,7 +451,7 @@ def _validate_fig_and_axes(fig, axes, group_by, evoked, colorbar, clear=False): rowspan = 2 if evoked else 3 shape = (3, 10) for this_group in group_by: - this_fig = figure() + this_fig = figure(layout="constrained") _set_window_title(this_fig, this_group) subplot2grid(shape, (0, 0), colspan=colspan, rowspan=rowspan, fig=this_fig) if evoked: @@ -602,8 +600,6 @@ def _plot_epochs_image( tmax = epochs.times[-1] ax_im = ax["image"] - fig = ax_im.get_figure() - # draw the image cmap = _setup_cmap(cmap, norm=norm) n_epochs = len(image) @@ -664,13 +660,10 @@ def _plot_epochs_image( ax_im.CB = DraggableColorbar( this_colorbar, im, kind="epochs_image", ch_type=unit ) - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore") - tight_layout(fig=fig) # finish plt_show(show) - return fig + return ax_im.get_figure() def plot_drop_log( @@ -733,7 +726,7 @@ def plot_drop_log( ch_names = np.array(list(scores.keys())) counts = np.array(list(scores.values())) # init figure, handle easy case (no drops) - fig, ax = plt.subplots() + fig, ax = plt.subplots(layout="constrained") title = f"{absolute} of {n_epochs_before_drop} epochs removed " f"({percent:.1f}%)" if subject is not None: title = f"{subject}: {title}" @@ -755,7 +748,6 @@ def plot_drop_log( ) ax.set_ylabel("% of epochs removed") ax.grid(axis="y") - tight_layout(pad=1, fig=fig) plt_show(show) return fig diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 687203cad49..5886bb26db3 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -30,7 +30,6 @@ from ..defaults import _handle_default from .utils import ( _draw_proj_checkbox, - tight_layout, _check_delayed_ssp, plt_show, _process_times, @@ -41,7 +40,6 @@ _make_combine_callable, _validate_if_list_of_axes, _triage_rank_sss, - _connection_line, _get_color_list, _setup_ax_spines, _setup_plot_projector, @@ -165,7 +163,11 @@ def _line_plot_onselect( minidx = np.abs(times - xmin).argmin() maxidx = np.abs(times - xmax).argmin() fig, axarr = plt.subplots( - 1, len(ch_types), squeeze=False, figsize=(3 * len(ch_types), 3) + 1, + len(ch_types), + squeeze=False, + figsize=(3 * len(ch_types), 3), + layout="constrained", ) for idx, ch_type in enumerate(ch_types): @@ -211,7 +213,6 @@ def _line_plot_onselect( unit = "Hz" if psd else time_unit fig.suptitle("Average over %.2f%s - %.2f%s" % (xmin, unit, xmax, unit), y=0.1) - tight_layout(pad=2.0, fig=fig) plt_show() if text is not None: text.set_visible(False) @@ -332,7 +333,7 @@ def _plot_evoked( if axes is None: axes = dict() for sel in group_by: - plt.figure() + plt.figure(layout="constrained") axes[sel] = plt.axes() if not isinstance(axes, dict): raise ValueError( @@ -458,8 +459,7 @@ def _plot_evoked( fig = None if axes is None: - fig, axes = plt.subplots(len(ch_types_used), 1) - fig.subplots_adjust(left=0.125, bottom=0.1, right=0.975, top=0.92, hspace=0.63) + fig, axes = plt.subplots(len(ch_types_used), 1, layout="constrained") if isinstance(axes, plt.Axes): axes = [axes] fig.set_size_inches(6.4, 2 + len(axes)) @@ -738,6 +738,7 @@ def _plot_lines( else: y_offset = this_ylim[0] this_gfp += y_offset + ax.autoscale(False) ax.fill_between( times, y_offset, @@ -1628,7 +1629,7 @@ def whitened_gfp(x, rank=None): sharex=True, sharey=False, figsize=(8.8, 2.2 * n_rows), - constrained_layout=True, + layout="constrained", ) else: axes = np.array(axes) @@ -1772,7 +1773,7 @@ def plot_snr_estimate(evoked, inv, show=True, axes=None, verbose=None): snr, snr_est = estimate_snr(evoked, inv) _validate_type(axes, (None, plt.Axes)) if axes is None: - _, ax = plt.subplots(1, 1) + _, ax = plt.subplots(1, 1, layout="constrained") else: ax = axes del axes @@ -1858,7 +1859,7 @@ def plot_evoked_joint( ----- .. versionadded:: 0.12.0 """ - import matplotlib.pyplot as plt + from matplotlib.patches import ConnectionPatch if ts_args is not None and not isinstance(ts_args, dict): raise TypeError("ts_args must be dict or None, got type %s" % (type(ts_args),)) @@ -1955,9 +1956,8 @@ def plot_evoked_joint( # prepare axes for topomap if not got_axes: - fig, ts_ax, map_ax, cbar_ax = _prepare_joint_axes( - len(times_sec), figsize=(8.0, 4.2) - ) + fig, ts_ax, map_ax = _prepare_joint_axes(len(times_sec), figsize=(8.0, 4.2)) + cbar_ax = None else: ts_ax = ts_args["axes"] del ts_args["axes"] @@ -1995,20 +1995,10 @@ def plot_evoked_joint( old_title = ts_ax.get_title() ts_ax.set_title("") - # XXX BUG destroys ax -> fig assignment if title & axes are passed if title is not None: - title_ax = fig.add_subplot(4, 3, 2) if title == "": title = old_title - title_ax.text( - 0.5, - 0.5, - title, - transform=title_ax.transAxes, - horizontalalignment="center", - verticalalignment="center", - ) - title_ax.axis("off") + fig.suptitle(title) # topomap contours = topomap_args.get("contours", 6) @@ -2034,8 +2024,8 @@ def plot_evoked_joint( if topomap_args.get("colorbar", True): from matplotlib import ticker - cbar_ax.grid(False) # auto-removal deprecated as of 2021/10/05 - cbar = plt.colorbar(map_ax[0].images[0], cax=cbar_ax) + cbar = fig.colorbar(map_ax[0].images[0], ax=map_ax, cax=cbar_ax) + cbar.ax.grid(False) # auto-removal deprecated as of 2021/10/05 if isinstance(contours, (list, np.ndarray)): cbar.set_ticks(contours) else: @@ -2044,19 +2034,24 @@ def plot_evoked_joint( cbar.locator = locator cbar.update_ticks() - if not got_axes: - plt.subplots_adjust( - left=0.1, right=0.93, bottom=0.14, top=1.0 if title is not None else 1.2 - ) - # connection lines # draw the connection lines between time series and topoplots - lines = [ - _connection_line(timepoint, fig, ts_ax, map_ax_) - for timepoint, map_ax_ in zip(times_ts, map_ax) - ] - for line in lines: - fig.lines.append(line) + for timepoint, map_ax_ in zip(times_ts, map_ax): + con = ConnectionPatch( + xyA=[timepoint, ts_ax.get_ylim()[1]], + xyB=[0.5, 0], + coordsA="data", + coordsB="axes fraction", + axesA=ts_ax, + axesB=map_ax_, + color="grey", + linestyle="-", + linewidth=1.5, + alpha=0.66, + zorder=1, + clip_on=False, + ) + fig.add_artist(con) # mark times in time series plot for timepoint in times_ts: @@ -2941,7 +2936,9 @@ def plot_compare_evokeds( axes = ["topo"] * len(ch_types) else: if axes is None: - axes = (plt.subplots(figsize=(8, 6))[1] for _ in ch_types) + axes = ( + plt.subplots(figsize=(8, 6), layout="constrained")[1] for _ in ch_types + ) elif isinstance(axes, plt.Axes): axes = [axes] _validate_if_list_of_axes(axes, obligatory_len=len(ch_types)) @@ -3015,7 +3012,7 @@ def plot_compare_evokeds( from .topo import iter_topography from ..channels.layout import find_layout - fig = plt.figure(figsize=(18, 14)) + fig = plt.figure(figsize=(18, 14), layout=None) # Not "constrained" for topo def click_func( ax_, diff --git a/mne/viz/ica.py b/mne/viz/ica.py index a414775b635..d80ed9aec65 100644 --- a/mne/viz/ica.py +++ b/mne/viz/ica.py @@ -14,7 +14,6 @@ from scipy.stats import gaussian_kde from .utils import ( - tight_layout, _make_event_color_dict, _get_cmap, plt_show, @@ -767,7 +766,7 @@ def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica, labels=No if title is None: title = "Reconstructed latent sources, time-locked" - fig, axes = plt.subplots(1) + fig, axes = plt.subplots(1, layout="constrained") ax = axes axes = [axes] times = evoked.times * 1e3 @@ -852,7 +851,6 @@ def _plot_ica_sources_evoked(evoked, picks, exclude, title, show, ica, labels=No ax.set(title=title, xlim=times[[0, -1]], xlabel="Time (ms)", ylabel="(NA)") if len(exclude) > 0: plt.legend(loc="best") - tight_layout(fig=fig) texts.append( ax.text( @@ -959,7 +957,9 @@ def plot_ica_scores( if figsize is None: figsize = (6.4 * n_cols, 2.7 * n_rows) - fig, axes = plt.subplots(n_rows, n_cols, figsize=figsize, sharex=True, sharey=True) + fig, axes = plt.subplots( + n_rows, n_cols, figsize=figsize, sharex=True, sharey=True, layout="constrained" + ) if isinstance(axes, np.ndarray): axes = axes.flatten() @@ -1012,11 +1012,6 @@ def plot_ica_scores( ax.set_title("(%s)" % label) ax.set_xlabel("ICA components") ax.set_xlim(-0.6, len(this_scores) - 0.4) - - tight_layout(fig=fig) - - adjust_top = 0.8 if len(fig.axes) == 1 else 0.9 - fig.subplots_adjust(top=adjust_top) fig.canvas.draw() plt_show(show) return fig @@ -1159,13 +1154,13 @@ def _plot_ica_overlay_raw(*, raw, raw_cln, picks, start, stop, title, show): ch_types = raw.get_channel_types(picks=picks, unique=True) for ch_type in ch_types: if ch_type in ("mag", "grad"): - fig, ax = plt.subplots(3, 1, sharex=True, constrained_layout=True) + fig, ax = plt.subplots(3, 1, sharex=True, layout="constrained") elif ch_type == "eeg" and not _has_eeg_average_ref_proj( raw.info, check_active=True ): - fig, ax = plt.subplots(3, 1, sharex=True, constrained_layout=True) + fig, ax = plt.subplots(3, 1, sharex=True, layout="constrained") else: - fig, ax = plt.subplots(2, 1, sharex=True, constrained_layout=True) + fig, ax = plt.subplots(2, 1, sharex=True, layout="constrained") fig.suptitle(title) # select sensors and retrieve data array @@ -1236,7 +1231,7 @@ def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show): if len(ch_types_used) != len(ch_types_used_cln): raise ValueError("Raw and clean evokeds must match. Found different channels.") - fig, axes = plt.subplots(n_rows, 1) + fig, axes = plt.subplots(n_rows, 1, layout="constrained") if title is None: title = "Average signal before (red) and after (black) ICA" fig.suptitle(title) @@ -1248,9 +1243,6 @@ def _plot_ica_overlay_evoked(evoked, evoked_cln, title, show): line.set_color("r") fig.canvas.draw() evoked_cln.plot(axes=axes, show=False, time_unit="s", spatial_colors=False) - tight_layout(fig=fig) - - fig.subplots_adjust(top=0.90) fig.canvas.draw() plt_show(show) return fig diff --git a/mne/viz/misc.py b/mne/viz/misc.py index d2c1a4242dc..c903244f9ff 100644 --- a/mne/viz/misc.py +++ b/mne/viz/misc.py @@ -50,7 +50,6 @@ ) from ..filter import estimate_ringing_samples from .utils import ( - tight_layout, _get_color_list, _prepare_trellis, plt_show, @@ -172,7 +171,11 @@ def plot_cov( C = np.sqrt((C * C.conj()).real) fig_cov, axes = plt.subplots( - 1, len(idx_names), squeeze=False, figsize=(3.8 * len(idx_names), 3.7) + 1, + len(idx_names), + squeeze=False, + figsize=(3.8 * len(idx_names), 3.7), + layout="constrained", ) for k, (idx, name, _, _, _) in enumerate(idx_names): vlim = np.max(np.abs(C[idx][:, idx])) @@ -192,13 +195,14 @@ def plot_cov( cax.grid(False) # avoid mpl warning about auto-removal plt.colorbar(im, cax=cax, format="%.0e") - fig_cov.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.2, 0.26) - tight_layout(fig=fig_cov) - fig_svd = None if show_svd: fig_svd, axes = plt.subplots( - 1, len(idx_names), squeeze=False, figsize=(3.8 * len(idx_names), 3.7) + 1, + len(idx_names), + squeeze=False, + figsize=(3.8 * len(idx_names), 3.7), + layout="constrained", ) for k, (idx, name, unit, scaling, key) in enumerate(idx_names): this_C = C[idx][:, idx] @@ -233,10 +237,8 @@ def plot_cov( title=name, xlim=[0, len(s) - 1], ) - tight_layout(fig=fig_svd) plt_show(show) - return fig_cov, fig_svd @@ -321,7 +323,7 @@ def plot_source_spectrogram( time_grid, freq_grid = np.meshgrid(time_bounds, freq_bounds) # Plotting the results - fig = plt.figure(figsize=(9, 6)) + fig = plt.figure(figsize=(9, 6), layout="constrained") plt.pcolor(time_grid, freq_grid, source_power[:, source_index, :], cmap="Reds") ax = plt.gca() @@ -344,7 +346,6 @@ def plot_source_spectrogram( plt.grid(True, ls="-") if colorbar: plt.colorbar() - tight_layout(fig=fig) # Covering frequency gaps with horizontal bars for lower_bound, upper_bound in gap_bounds: @@ -481,6 +482,8 @@ def _plot_mri_contours( if slices_as_subplots: ax = axs[ai] else: + # No need for constrained layout here because we make our axes fill the + # entire figure fig = _figure_agg(figsize=figsize, dpi=dpi, facecolor="k") ax = fig.add_axes([0, 0, 1, 1], frame_on=False, facecolor="k") @@ -588,9 +591,6 @@ def _plot_mri_contours( figs.append(fig) if slices_as_subplots: - fig.subplots_adjust( - left=0.0, bottom=0.0, right=1.0, top=1.0, wspace=0.0, hspace=0.0 - ) plt_show(show, fig=fig) return fig else: @@ -848,7 +848,7 @@ def plot_events( fig = None if axes is None: - fig = plt.figure() + fig = plt.figure(layout="constrained") ax = axes if axes else plt.gca() unique_events_id = np.array(unique_events_id) @@ -948,7 +948,7 @@ def plot_dipole_amplitudes(dipoles, colors=None, show=True): if colors is None: colors = cycle(_get_color_list()) - fig, ax = plt.subplots(1, 1) + fig, ax = plt.subplots(1, 1, layout="constrained") xlim = [np.inf, -np.inf] for dip, color in zip(dipoles, colors): ax.plot(dip.times, dip.amplitude * 1e9, color=color, linewidth=1.5) @@ -1191,7 +1191,7 @@ def plot_filter( fig = None if axes is None: - fig, axes = plt.subplots(len(plot), 1) + fig, axes = plt.subplots(len(plot), 1, layout="constrained") if isinstance(axes, plt.Axes): axes = [axes] elif isinstance(axes, np.ndarray): @@ -1263,7 +1263,6 @@ def plot_filter( ) adjust_axes(axes) - tight_layout() plt_show(show) return fig @@ -1357,7 +1356,7 @@ def plot_ideal_filter( my_gain.append(gain[ii]) my_gain = 10 * np.log10(np.maximum(my_gain, 10 ** (alim[0] / 10.0))) if axes is None: - axes = plt.subplots(1)[1] + axes = plt.subplots(1, layout="constrained")[1] for transition in transitions: axes.axvspan(*transition, color=color, alpha=0.1) axes.plot( @@ -1378,7 +1377,6 @@ def plot_ideal_filter( if title: axes.set(title=title) adjust_axes(axes) - tight_layout() plt_show(show) return axes.figure @@ -1508,7 +1506,11 @@ def plot_csd( continue fig, axes = plt.subplots( - n_rows, n_cols, squeeze=False, figsize=(2 * n_cols + 1, 2.2 * n_rows) + n_rows, + n_cols, + squeeze=False, + figsize=(2 * n_cols + 1, 2.2 * n_rows), + layout="constrained", ) csd_mats = [] @@ -1535,8 +1537,6 @@ def plot_csd( ax.set_title("%.1f Hz." % freq) plt.suptitle(title) - plt.subplots_adjust(top=0.8) - if colorbar: cb = plt.colorbar(im, ax=[a for ax_ in axes for a in ax_]) if mode == "csd": @@ -1580,9 +1580,7 @@ def plot_chpi_snr(snr_dict, axes=None): ----- If you supply a list of existing `~matplotlib.axes.Axes`, then the figure legend will not be drawn automatically. If you still want it, running - ``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it, - though you may also need to manually adjust the margin to make room for it - (e.g., using ``fig.subplots_adjust(right=0.8)``). + ``fig.legend(loc='right', title='cHPI frequencies')`` will recreate it. .. versionadded:: 0.24 """ @@ -1593,7 +1591,7 @@ def plot_chpi_snr(snr_dict, axes=None): full_names = dict(mag="magnetometers", grad="gradiometers") axes_was_none = axes is None if axes_was_none: - fig, axes = plt.subplots(len(valid_keys), 1, sharex=True) + fig, axes = plt.subplots(len(valid_keys), 1, sharex=True, layout="constrained") else: fig = axes[0].get_figure() if len(axes) != len(valid_keys): @@ -1627,6 +1625,5 @@ def plot_chpi_snr(snr_dict, axes=None): if axes_was_none: ax.set(xlabel="Time (s)") fig.align_ylabels() - fig.subplots_adjust(left=0.1, right=0.825, bottom=0.075, top=0.95, hspace=0.7) fig.legend(loc="right", title="cHPI frequencies") return fig diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py index 711afdea480..bfe5d07eebf 100644 --- a/mne/viz/tests/test_epochs.py +++ b/mne/viz/tests/test_epochs.py @@ -272,14 +272,7 @@ def test_plot_epochs_nodata(browser_backend): @pytest.mark.slowtest def test_plot_epochs_image(epochs): - """Test plotting of epochs image. - - Note that some of these tests that should pass are triggering MPL - UserWarnings about tight_layout not being applied ("tight_layout cannot - make axes width small enough to accommodate all axes decorations"). Calling - `plt.close('all')` just before the offending test seems to prevent this - warning, though it's unclear why. - """ + """Test plotting of epochs image.""" figs = epochs.plot_image() assert len(figs) == 2 # one fig per ch_type (test data has mag, grad) assert len(plt.get_fignums()) == 2 diff --git a/mne/viz/tests/test_evoked.py b/mne/viz/tests/test_evoked.py index ce67febd0a9..644b2fb4e3e 100644 --- a/mne/viz/tests/test_evoked.py +++ b/mne/viz/tests/test_evoked.py @@ -231,7 +231,7 @@ def test_plot_evoked(): def test_constrained_layout(): """Test that we handle constrained layouts correctly.""" - fig, ax = plt.subplots(1, 1, constrained_layout=True) + fig, ax = plt.subplots(1, 1, layout="constrained") assert fig.get_constrained_layout() evoked = mne.read_evokeds(evoked_fname)[0] evoked.pick(evoked.ch_names[:2]) @@ -612,7 +612,7 @@ def test_plot_ctf(): fig = plt.figure() # create custom axes for topomaps, colorbar and the timeseries - gs = gridspec.GridSpec(3, 7, hspace=0.5, top=0.8) + gs = gridspec.GridSpec(3, 7, hspace=0.5, top=0.8, figure=fig) topo_axes = [ fig.add_subplot(gs[0, idx * 2 : (idx + 1) * 2]) for idx in range(len(times)) ] diff --git a/mne/viz/tests/test_topomap.py b/mne/viz/tests/test_topomap.py index 4f95f586d98..e20b1987dd1 100644 --- a/mne/viz/tests/test_topomap.py +++ b/mne/viz/tests/test_topomap.py @@ -75,8 +75,8 @@ fast_test = dict(res=8, contours=0, sensors=False) -@pytest.mark.parametrize("constrained_layout", (False, True)) -def test_plot_topomap_interactive(constrained_layout): +@pytest.mark.parametrize("layout", (None, "constrained")) +def test_plot_topomap_interactive(layout): """Test interactive topomap projection plotting.""" evoked = read_evokeds(evoked_fname, baseline=(None, 0))[0] evoked.pick(picks="mag") @@ -86,7 +86,7 @@ def test_plot_topomap_interactive(constrained_layout): evoked.add_proj(compute_proj_evoked(evoked, n_mag=1)) plt.close("all") - fig, ax = plt.subplots(constrained_layout=constrained_layout) + fig, ax = plt.subplots(layout=layout) canvas = fig.canvas kwargs = dict( diff --git a/mne/viz/topo.py b/mne/viz/topo.py index 683c22d9a6a..5a832c954a3 100644 --- a/mne/viz/topo.py +++ b/mne/viz/topo.py @@ -145,7 +145,8 @@ def _iter_topography( from ..channels.layout import find_layout if fig is None: - fig = plt.figure() + # Don't use constrained layout because we place axes manually + fig = plt.figure(layout=None) def format_coord_unified(x, y, pos=None, ch_names=None): """Update status bar with channel name under cursor.""" @@ -296,7 +297,8 @@ def _plot_topo( ) if axes is None: - fig = plt.figure() + # Don't use constrained layout because we place axes manually + fig = plt.figure(layout=None) axes = plt.axes([0.015, 0.025, 0.97, 0.95]) axes.set_facecolor(fig_facecolor) else: diff --git a/mne/viz/topomap.py b/mne/viz/topomap.py index d47ec145e07..a90400c6421 100644 --- a/mne/viz/topomap.py +++ b/mne/viz/topomap.py @@ -54,7 +54,6 @@ ) from ..utils.spectrum import _split_psd_kwargs from .utils import ( - tight_layout, _setup_vmin_vmax, _prepare_trellis, _check_delayed_ssp, @@ -301,8 +300,8 @@ def _add_colorbar( ax, im, cmap, + *, side="right", - pad=0.05, title=None, format=None, size="5%", @@ -310,14 +309,10 @@ def _add_colorbar( ch_type=None, ): """Add a colorbar to an axis.""" - import matplotlib.pyplot as plt - from mpl_toolkits.axes_grid1 import make_axes_locatable - - divider = make_axes_locatable(ax) - cax = divider.append_axes(side, size=size, pad=pad) - cbar = plt.colorbar(im, cax=cax, format=format) + cbar = ax.figure.colorbar(im, format=format) if cmap is not None and cmap[1]: ax.CB = DraggableColorbar(cbar, im, kind, ch_type) + cax = cbar.ax if title is not None: cax.set_title(title, y=1.05, fontsize=10) return cbar, cax @@ -450,7 +445,6 @@ def plot_projs_topomap( ) with warnings.catch_warnings(record=True): warnings.simplefilter("ignore") - tight_layout(fig=fig) plt_show(show) return fig @@ -1020,7 +1014,7 @@ def plot_topomap( from matplotlib.colors import Normalize if axes is None: - _, axes = plt.subplots(figsize=(size, size)) + _, axes = plt.subplots(figsize=(size, size), layout="constrained") sphere = _check_sphere(sphere, pos if isinstance(pos, Info) else None) _validate_type(cnorm, (Normalize, None), "cnorm") if cnorm is not None and (vlim[0] is not None or vlim[1] is not None): @@ -1379,9 +1373,6 @@ def _plot_topomap( size="x-small", ) - if not axes.figure.get_constrained_layout(): - axes.figure.subplots_adjust(top=0.95) - if onselect is not None: lim = axes.dataLim x0, y0, width, height = lim.x0, lim.y0, lim.width, lim.height @@ -1475,7 +1466,6 @@ def _plot_ica_topomap( axes, im, cmap, - pad=0.05, title="AU", format="%3.2f", kind="ica_topomap", @@ -1716,7 +1706,6 @@ def plot_ica_components( cmap, title="AU", side="right", - pad=0.05, format=cbar_fmt, kind="ica_comp_topomap", ch_type=ch_type, @@ -1725,9 +1714,6 @@ def plot_ica_components( cbar.set_ticks(_vlim) _hide_frame(ax) del pos - if not user_passed_axes: - tight_layout(fig=fig) - fig.subplots_adjust(top=0.88, bottom=0.0) fig.canvas.draw() # add title selection interactivity @@ -1934,7 +1920,11 @@ def plot_tfr_topomap( vlim = _setup_vmin_vmax(data, *vlim, norm) cmap = _setup_cmap(cmap, norm=norm) - axes = plt.subplots(figsize=(size, size))[1] if axes is None else axes + axes = ( + plt.subplots(figsize=(size, size), layout="constrained")[1] + if axes is None + else axes + ) fig = axes.figure _hide_frame(axes) @@ -2204,18 +2194,17 @@ def plot_evoked_topomap( if interactive: height_ratios = [5, 1] nrows = 2 - ncols = want_axes - width = size * ncols + ncols = n_times + width = size * want_axes height = size + max(0, 0.1 * (4 - size)) fig = figure_nobar(figsize=(width * 1.5, height * 1.5)) - g_kwargs = {"left": 0.2, "right": 0.8, "bottom": 0.05, "top": 0.9} - gs = GridSpec(nrows, ncols, height_ratios=height_ratios, **g_kwargs) + gs = GridSpec(nrows, ncols, height_ratios=height_ratios, figure=fig) axes = [] for ax_idx in range(n_times): axes.append(plt.subplot(gs[0, ax_idx])) elif axes is None: fig, axes, ncols, nrows = _prepare_trellis( - n_times, ncols=ncols, nrows=nrows, colorbar=colorbar, size=size + n_times, ncols=ncols, nrows=nrows, size=size ) else: nrows, ncols = None, None # Deactivate ncols when axes were passed @@ -2227,13 +2216,7 @@ def plot_evoked_topomap( f"You must provide {want_axes} axes (one for " f"each time{cbar_err}), got {len(axes)}." ) - # figure margins - if not fig.get_constrained_layout(): - side_margin = plt.rcParams["figure.subplot.wspace"] / (2 * want_axes) - top_margin = max(0.05, 0.2 / size) - fig.subplots_adjust( - left=side_margin, right=1 - side_margin, bottom=0, top=1 - top_margin - ) + del want_axes # find first index that's >= (to rounding error) to each time point time_idx = [ np.where( @@ -2336,12 +2319,10 @@ def plot_evoked_topomap( images, contours_ = [], [] # loop over times for average_idx, (time, this_average) in enumerate(zip(times, average)): - adjust_for_cbar = colorbar and ncols is not None and average_idx >= ncols - 1 - ax_idx = average_idx + 1 if adjust_for_cbar else average_idx tp, cn, interp = _plot_topomap( data[:, average_idx], pos, - axes=axes[ax_idx], + axes=axes[average_idx], mask=mask_[:, average_idx] if mask is not None else None, vmin=_vlim[0], vmax=_vlim[1], @@ -2362,13 +2343,13 @@ def plot_evoked_topomap( to_time = time_format % (tmax_ * scaling_time) axes_title = f"{from_time} – {to_time}" del from_time, to_time, tmin_, tmax_ - axes[ax_idx].set_title(axes_title) + axes[average_idx].set_title(axes_title) if interactive: # Add a slider to the figure and start publishing and subscribing to time_change # events. kwargs.update(vlim=_vlim) - axes.append(plt.subplot(gs[1, :-1])) + axes.append(fig.add_subplot(gs[1])) slider = Slider( axes[-1], "Time", @@ -2412,19 +2393,15 @@ def _slider_changed(val): ) if colorbar: - if interactive: - cax = plt.subplot(gs[0, -1]) - _resize_cbar(cax, ncols, size) - elif nrows is None or ncols is None: + if nrows is None or ncols is None: # axes were given by the user, so don't resize the colorbar cax = axes[-1] - else: # use the entire last column - cax = axes[ncols - 1] - _resize_cbar(cax, ncols, size) + else: # use the default behavior + cax = None + cbar = fig.colorbar(images[-1], ax=axes, cax=cax, format=cbar_fmt, shrink=0.6) if unit is not None: - cax.set_title(unit) - cbar = fig.colorbar(images[-1], ax=cax, cax=cax, format=cbar_fmt) + cbar.ax.set_title(unit) if cn is not None: cbar.set_ticks(contours) cbar.ax.tick_params(labelsize=7) @@ -2578,9 +2555,7 @@ def _plot_topomap_multi_cbar( ) if colorbar: - cbar, cax = _add_colorbar( - ax, im, cmap, pad=0.25, title=None, size="10%", format=cbar_fmt - ) + cbar, cax = _add_colorbar(ax, im, cmap, title=None, size="10%", format=cbar_fmt) cbar.set_ticks(_vlim) if unit is not None: cbar.ax.set_ylabel(unit, fontsize=8) @@ -2857,7 +2832,9 @@ def plot_psds_topomap( _validate_if_list_of_axes(axes, n_axes) fig = axes[0].figure else: - fig, axes = plt.subplots(1, n_axes, figsize=(2 * n_axes, 1.5)) + fig, axes = plt.subplots( + 1, n_axes, figsize=(2 * n_axes, 1.5), layout="constrained" + ) if n_axes == 1: axes = [axes] # loop over subplots/frequency bands @@ -2892,7 +2869,6 @@ def plot_psds_topomap( ) if not user_passed_axes: - tight_layout(fig=fig) fig.canvas.draw() plt_show(show) return fig @@ -2923,9 +2899,10 @@ def plot_layout(layout, picks=None, show_axes=False, show=True): """ import matplotlib.pyplot as plt - fig = plt.figure(figsize=(max(plt.rcParams["figure.figsize"]),) * 2) + fig = plt.figure( + figsize=(max(plt.rcParams["figure.figsize"]),) * 2, layout="constrained" + ) ax = fig.add_subplot(111) - fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=None, hspace=None) ax.set(xticks=[], yticks=[], aspect="equal") outlines = dict(border=([0, 1, 1, 0, 0], [0, 0, 1, 1, 0])) _draw_outlines(ax, outlines) @@ -2945,7 +2922,6 @@ def plot_layout(layout, picks=None, show_axes=False, show=True): x1, x2, y1, y2 = p[0], p[0] + p[2], p[1], p[1] + p[3] ax.plot([x1, x1, x2, x2, x1], [y1, y2, y2, y1, y1], color="k") ax.axis("off") - tight_layout(fig=fig, pad=0, w_pad=0, h_pad=0) plt_show(show) return fig @@ -3163,7 +3139,6 @@ def _init_anim( outlines_ = _draw_outlines(ax, outlines) params.update({"patch": patch_, "outlines": outlines_}) - tight_layout(fig=ax.figure) return tuple(items) + cont_collections @@ -3306,7 +3281,7 @@ def _topomap_animation( norm = np.min(data) >= 0 vmin, vmax = _setup_vmin_vmax(data, vmin, vmax, norm) - fig = plt.figure(figsize=(6, 5)) + fig = plt.figure(figsize=(6, 5), layout="constrained") shape = (8, 12) colspan = shape[1] - 1 rowspan = shape[0] - bool(butterfly) @@ -3491,8 +3466,6 @@ def _plot_corrmap( border=border, ) _hide_frame(ax) - tight_layout(fig=fig) - fig.subplots_adjust(top=0.8) fig.canvas.draw() plt_show(show) return fig @@ -3652,7 +3625,7 @@ def plot_arrowmap( ) outlines = _make_head_outlines(sphere, pos, outlines, clip_origin) if axes is None: - fig, axes = plt.subplots() + fig, axes = plt.subplots(layout="constrained") else: fig = axes.figure plot_topomap( @@ -3679,11 +3652,7 @@ def plot_arrowmap( dx, dy = _trigradient(x, y, data) dxx = dy.data dyy = -dx.data - axes.quiver(x, y, dxx, dyy, scale=scale, color="k", lw=1, clip_on=False) - axes.figure.canvas.draw_idle() - with warnings.catch_warnings(record=True): - warnings.simplefilter("ignore") - tight_layout(fig=fig) + axes.quiver(x, y, dxx, dyy, scale=scale, color="k", lw=1) plt_show(show) return fig @@ -3735,7 +3704,7 @@ def plot_bridged_electrodes( topomap_args.setdefault("contours", False) sphere = topomap_args.get("sphere", _check_sphere(None)) if "axes" not in topomap_args: - fig, ax = plt.subplots() + fig, ax = plt.subplots(layout="constrained") topomap_args["axes"] = ax else: fig = None @@ -4075,7 +4044,11 @@ def plot_regression_weights( axes_was_none = axes is None if axes_was_none: fig, axes = plt.subplots( - nrows, ncols, squeeze=False, figsize=(ncols * 2, nrows * 1.5 + 1) + nrows, + ncols, + squeeze=False, + figsize=(ncols * 2, nrows * 1.5 + 1), + layout="constrained", ) axes = axes.T.ravel() else: @@ -4143,8 +4116,5 @@ def plot_regression_weights( ) if axes_was_none: fig.suptitle(title) - fig.subplots_adjust( - top=0.88, bottom=0.06, left=0.025, right=0.911, hspace=0.2, wspace=0.5 - ) plt_show(show) return fig diff --git a/mne/viz/utils.py b/mne/viz/utils.py index 78f05ee9109..08d4e69ec48 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -21,7 +21,6 @@ import sys import tempfile import traceback -import warnings import webbrowser from decorator import decorator @@ -203,63 +202,6 @@ def _show_browser(show=True, block=True, fig=None, **kwargs): _qt_app_exec(QApplication.instance()) -def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None): - """Adjust subplot parameters to give specified padding. - - .. note:: For plotting please use this function instead of - ``plt.tight_layout``. - - Parameters - ---------- - pad : float - Padding between the figure edge and the edges of subplots, as a - fraction of the font-size. - h_pad : float - Padding height between edges of adjacent subplots. - Defaults to ``pad_inches``. - w_pad : float - Padding width between edges of adjacent subplots. - Defaults to ``pad_inches``. - fig : instance of Figure - Figure to apply changes to. - - Notes - ----- - This will not force constrained_layout=False if the figure was created - with that method. - """ - _validate_type(pad, "numeric", "pad") - import matplotlib.pyplot as plt - - fig = plt.gcf() if fig is None else fig - - fig.canvas.draw() - constrained = fig.get_constrained_layout() - kwargs = dict(pad=pad, h_pad=h_pad, w_pad=w_pad) - if constrained: - return # no-op - try: # see https://github.com/matplotlib/matplotlib/issues/2654 - with warnings.catch_warnings(record=True) as ws: - fig.tight_layout(**kwargs) - except Exception: - try: - with warnings.catch_warnings(record=True) as ws: - if hasattr(fig, "set_layout_engine"): - fig.set_layout_engine("tight", **kwargs) - else: - fig.set_tight_layout(kwargs) - except Exception: - warn( - 'Matplotlib function "tight_layout" is not supported.' - " Skipping subplot adjustment." - ) - return - for w in ws: - w_msg = str(w.message) if hasattr(w, "message") else w.get_message() - if not w_msg.startswith("This figure includes Axes"): - warn(w_msg, w.category, "matplotlib") - - def _check_delayed_ssp(container): """Handle interactive SSP selection.""" if container.proj is True or all(p["active"] for p in container.info["projs"]): @@ -489,7 +431,6 @@ def _prepare_trellis( ncols, nrows="auto", title=False, - colorbar=False, size=1.3, sharex=False, sharey=False, @@ -517,22 +458,13 @@ def _prepare_trellis( "figure.".format(n_cells, nrows, ncols) ) - if colorbar: - ncols += 1 width = size * ncols height = (size + max(0, 0.1 * (4 - size))) * nrows + bool(title) * 0.5 - height_ratios = None fig = _figure(toolbar=False, figsize=(width * 1.5, 0.25 + height * 1.5)) - gs = GridSpec(nrows, ncols, figure=fig, height_ratios=height_ratios) + gs = GridSpec(nrows, ncols, figure=fig) axes = [] - if colorbar: - # exclude last axis of each row except top row, which is for colorbar - exclude = set(range(2 * ncols - 1, nrows * ncols, ncols)) - ax_idxs = sorted(set(range(nrows * ncols)) - exclude)[: n_cells + 1] - else: - ax_idxs = range(n_cells) - for ax_idx in ax_idxs: + for ax_idx in range(n_cells): subplot_kw = dict() if ax_idx > 0: if sharex: @@ -560,7 +492,8 @@ def _draw_proj_checkbox(event, params, draw_current_state=True): width = max([4.0, max([len(p["desc"]) for p in projs]) / 6.0 + 0.5]) height = (len(projs) + 1) / 6.0 + 1.5 - fig_proj = figure_nobar(figsize=(width, height)) + # We manually place everything here so avoid constrained layouts + fig_proj = figure_nobar(figsize=(width, height), layout=None) _set_window_title(fig_proj, "SSP projection vectors") offset = 1.0 / 6.0 / height params["fig_proj"] = fig_proj # necessary for proper toggling @@ -707,6 +640,8 @@ def figure_nobar(*args, **kwargs): old_val = rcParams["toolbar"] try: rcParams["toolbar"] = "none" + if "layout" not in kwargs: + kwargs["layout"] = "constrained" fig = plt.figure(*args, **kwargs) # remove button press catchers (for toolbar) cbs = list(fig.canvas.callbacks.callbacks["key_press_event"].keys()) @@ -1319,7 +1254,10 @@ def _plot_sensors( if kind == "3d": subplot_kw.update(projection="3d") fig, ax = plt.subplots( - 1, figsize=(max(rcParams["figure.figsize"]),) * 2, subplot_kw=subplot_kw + 1, + figsize=(max(rcParams["figure.figsize"]),) * 2, + subplot_kw=subplot_kw, + layout="constrained", ) else: fig = ax.get_figure() @@ -1367,8 +1305,6 @@ def _plot_sensors( # Equal aspect for 3D looks bad, so only use for 2D ax.set(aspect="equal") - if axes_was_none: # we'll show the plot title as the window title - fig.subplots_adjust(left=0, bottom=0, right=1, top=1) ax.axis("off") # remove border around figure del sphere @@ -1393,14 +1329,6 @@ def _plot_sensors( connect_picker = kind == "select" # make sure no names go off the edge of the canvas xmin, ymin, xmax, ymax = fig.get_window_extent().bounds - renderer = fig.canvas.get_renderer() - extents = [x.get_window_extent(renderer=renderer) for x in ax.texts] - xmaxs = np.array([x.max[0] for x in extents]) - bad_xmax_ixs = np.nonzero(xmaxs > xmax)[0] - if len(bad_xmax_ixs): - needed_space = (xmaxs[bad_xmax_ixs] - xmax).max() / xmax - fig.subplots_adjust(right=1 - 1.1 * needed_space) - if connect_picker: picker = partial( _onpick_sensor, @@ -1530,38 +1458,14 @@ def _setup_cmap(cmap, n_axes=1, norm=False): def _prepare_joint_axes(n_maps, figsize=None): - """Prepare axes for topomaps and colorbar in joint plot figure. - - Parameters - ---------- - n_maps: int - Number of topomaps to include in the figure - figsize: tuple - Figure size, see plt.figsize - - Returns - ------- - fig : matplotlib.figure.Figure - Figure with initialized axes - main_ax: matplotlib.axes._subplots.AxesSubplot - Axes in which to put the main plot - map_ax: list - List of axes for each topomap - cbar_ax: matplotlib.axes._subplots.AxesSubplot - Axes for colorbar next to topomaps - """ import matplotlib.pyplot as plt + from matplotlib.gridspec import GridSpec - fig = plt.figure(figsize=figsize) - main_ax = fig.add_subplot(212) - ts = n_maps + 2 - map_ax = [plt.subplot(4, ts, x + 2 + ts) for x in range(n_maps)] - # Position topomap subplots on the second row, starting on the - # second column - cbar_ax = plt.subplot(4, 5 * (ts + 1), 10 * (ts + 1)) - # Position colorbar at the very end of a more finely divided - # second row of subplots - return fig, main_ax, map_ax, cbar_ax + fig = plt.figure(figsize=figsize, layout="constrained") + gs = GridSpec(2, n_maps, height_ratios=[1, 2], figure=fig) + map_ax = [fig.add_subplot(gs[0, x]) for x in range(n_maps)] # first row + main_ax = fig.add_subplot(gs[1, :]) # second row + return fig, main_ax, map_ax class DraggableColorbar: @@ -1908,37 +1812,6 @@ def _merge_annotations(start, stop, description, annotations, current=()): annotations.append(onset, duration, description) -def _connection_line(x, fig, sourceax, targetax, y=1.0, y_source_transform="transAxes"): - """Connect source and target plots with a line. - - Connect source and target plots with a line, such as time series - (source) and topolots (target). Primarily used for plot_joint - functions. - """ - from matplotlib.lines import Line2D - - trans_fig = fig.transFigure - trans_fig_inv = fig.transFigure.inverted() - - xt, yt = trans_fig_inv.transform(targetax.transAxes.transform([0.5, 0.0])) - xs, _ = trans_fig_inv.transform(sourceax.transData.transform([x, 0.0])) - _, ys = trans_fig_inv.transform( - getattr(sourceax, y_source_transform).transform([0.0, y]) - ) - - return Line2D( - (xt, xs), - (yt, ys), - transform=trans_fig, - color="grey", - linestyle="-", - linewidth=1.5, - alpha=0.66, - zorder=1, - clip_on=False, - ) - - class DraggableLine: """Custom matplotlib line for moving around by drag and drop. diff --git a/requirements.txt b/requirements.txt index 39ae2c37815..90944200247 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ # requirements for full MNE-Python functionality (other than raw/epochs export) numpy>=1.15.4 scipy>=1.7.1 -matplotlib>=3.4.3 +matplotlib>=3.5.0 tqdm pooch>=1.5 decorator diff --git a/requirements_base.txt b/requirements_base.txt index 551156522c3..2e6ba6e6c80 100644 --- a/requirements_base.txt +++ b/requirements_base.txt @@ -1,7 +1,7 @@ # requirements for basic MNE-Python functionality numpy>=1.21.2 scipy>=1.7.1 -matplotlib>=3.4.3 +matplotlib>=3.5.0 tqdm pooch>=1.5 decorator diff --git a/tools/github_actions_env_vars.sh b/tools/github_actions_env_vars.sh index a0ab494a9db..ba1cac712a5 100755 --- a/tools/github_actions_env_vars.sh +++ b/tools/github_actions_env_vars.sh @@ -4,7 +4,7 @@ set -eo pipefail -x # old and minimal use conda if [[ "$MNE_CI_KIND" == "old" ]]; then echo "Setting conda env vars for old" - echo "CONDA_DEPENDENCIES=numpy=1.21.2 scipy=1.7.1 matplotlib=3.4.3 pandas=1.3.2 scikit-learn=1.0" >> $GITHUB_ENV + echo "CONDA_DEPENDENCIES=numpy=1.21.2 scipy=1.7.1 matplotlib=3.5.0 pandas=1.3.2 scikit-learn=1.0" >> $GITHUB_ENV echo "MNE_IGNORE_WARNINGS_IN_TESTS=true" >> $GITHUB_ENV echo "MNE_SKIP_NETWORK_TESTS=1" >> $GITHUB_ENV elif [[ "$MNE_CI_KIND" == "minimal" ]]; then diff --git a/tutorials/epochs/60_make_fixed_length_epochs.py b/tutorials/epochs/60_make_fixed_length_epochs.py index a3186ca25c2..9a6eace0ab9 100644 --- a/tutorials/epochs/60_make_fixed_length_epochs.py +++ b/tutorials/epochs/60_make_fixed_length_epochs.py @@ -113,13 +113,10 @@ color_lims = np.percentile(np.array(corr_matrices), [5, 95]) titles = ["First 30 Seconds", "Last 30 Seconds"] -fig, axes = plt.subplots(nrows=1, ncols=2) +fig, axes = plt.subplots(nrows=1, ncols=2, layout="constrained") fig.suptitle("Correlation Matrices from First 30 Seconds and Last 30 Seconds") for ci, corr_matrix in enumerate(corr_matrices): ax = axes[ci] mpbl = ax.imshow(corr_matrix, clim=color_lims) ax.set_xlabel(titles[ci]) -fig.subplots_adjust(right=0.8) -cax = fig.add_axes([0.85, 0.2, 0.025, 0.6]) -cbar = fig.colorbar(ax.images[0], cax=cax) -cbar.set_label("Correlation Coefficient") +cbar = fig.colorbar(ax.images[0], label="Correlation Coefficient") diff --git a/tutorials/forward/50_background_freesurfer_mne.py b/tutorials/forward/50_background_freesurfer_mne.py index 5efcc07d0d1..0150088de83 100644 --- a/tutorials/forward/50_background_freesurfer_mne.py +++ b/tutorials/forward/50_background_freesurfer_mne.py @@ -124,7 +124,7 @@ def imshow_mri(data, img, vox, xyz, suptitle): """Show an MRI slice with a voxel annotated.""" i, j, k = vox - fig, ax = plt.subplots(1, figsize=(6, 6)) + fig, ax = plt.subplots(1, figsize=(6, 6), layout="constrained") codes = nibabel.orientations.aff2axcodes(img.affine) # Figure out the title based on the code of this axis ori_slice = dict( @@ -157,7 +157,6 @@ def imshow_mri(data, img, vox, xyz, suptitle): title=f"{title} view: i={i} ({ori_names[codes[0]]}+)", ) fig.suptitle(suptitle) - fig.subplots_adjust(0.1, 0.1, 0.95, 0.85) return fig diff --git a/tutorials/intro/70_report.py b/tutorials/intro/70_report.py index b23c8852694..12f04772ce8 100644 --- a/tutorials/intro/70_report.py +++ b/tutorials/intro/70_report.py @@ -463,7 +463,7 @@ fig_array_rotated = fig_array_rotated.clip(min=0, max=1) # Create the figure - fig, ax = plt.subplots(figsize=(3, 3), constrained_layout=True) + fig, ax = plt.subplots(figsize=(3, 3), layout="constrained") ax.imshow(fig_array_rotated) ax.set_axis_off() diff --git a/tutorials/inverse/20_dipole_fit.py b/tutorials/inverse/20_dipole_fit.py index 89cf81af671..c81c16f3252 100644 --- a/tutorials/inverse/20_dipole_fit.py +++ b/tutorials/inverse/20_dipole_fit.py @@ -100,6 +100,7 @@ ncols=4, figsize=[10.0, 3.4], gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1], top=0.85), + layout="constrained", ) vmin, vmax = -400, 400 # make sure each plot has same colour range @@ -119,7 +120,6 @@ "at {:.0f} ms".format(best_time * 1000.0), fontsize=16, ) -fig.tight_layout() # %% # Estimate the time course of a single dipole with fixed position and diff --git a/tutorials/inverse/60_visualize_stc.py b/tutorials/inverse/60_visualize_stc.py index 01bd0c28a84..3be86643c61 100644 --- a/tutorials/inverse/60_visualize_stc.py +++ b/tutorials/inverse/60_visualize_stc.py @@ -156,7 +156,7 @@ label_tc = stc.extract_label_time_course(fname_aseg, src=src) lidx, tidx = np.unravel_index(np.argmax(label_tc), label_tc.shape) -fig, ax = plt.subplots(1) +fig, ax = plt.subplots(1, layout="constrained") ax.plot(stc.times, label_tc.T, "k", lw=1.0, alpha=0.5) xy = np.array([stc.times[tidx], label_tc[lidx, tidx]]) xytext = xy + [0.01, 1] @@ -164,7 +164,6 @@ ax.set(xlim=stc.times[[0, -1]], xlabel="Time (s)", ylabel="Activation") for key in ("right", "top"): ax.spines[key].set_visible(False) -fig.tight_layout() # %% # We can plot several labels with the most activation in their time course diff --git a/tutorials/inverse/80_brainstorm_phantom_elekta.py b/tutorials/inverse/80_brainstorm_phantom_elekta.py index cca2c3470af..95a2a8e8f59 100644 --- a/tutorials/inverse/80_brainstorm_phantom_elekta.py +++ b/tutorials/inverse/80_brainstorm_phantom_elekta.py @@ -144,7 +144,7 @@ actual_amp = 100.0 # nAm fig, (ax1, ax2, ax3) = plt.subplots( - nrows=3, ncols=1, figsize=(6, 7), constrained_layout=True + nrows=3, ncols=1, figsize=(6, 7), layout="constrained" ) diffs = 1000 * np.sqrt(np.sum((dip.pos - actual_pos) ** 2, axis=-1)) diff --git a/tutorials/machine-learning/30_strf.py b/tutorials/machine-learning/30_strf.py index af0db4d1d20..9cc53a7a2da 100644 --- a/tutorials/machine-learning/30_strf.py +++ b/tutorials/machine-learning/30_strf.py @@ -86,12 +86,10 @@ shading="gouraud", ) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.pcolormesh(delays_sec, freqs, weights, **kwargs) ax.set(title="Simulated STRF", xlabel="Time Lags (s)", ylabel="Frequency (Hz)") plt.setp(ax.get_xticklabels(), rotation=45) -plt.autoscale(tight=True) -mne.viz.tight_layout() # %% # Simulate a neural response @@ -147,7 +145,7 @@ X_plt = scale(np.hstack(X[:2]).T).T y_plt = scale(np.hstack(y[:2])) time = np.arange(X_plt.shape[-1]) / sfreq -_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True) +_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True, layout="constrained") ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap="Reds", shading="gouraud") ax1.set_title("Input auditory features") ax1.set(ylim=[freqs.min(), freqs.max()], ylabel="Frequency (Hz)") @@ -158,7 +156,6 @@ xlabel="Time (s)", ylabel="Activity (a.u.)", ) -mne.viz.tight_layout() # %% @@ -197,14 +194,19 @@ best_pred = best_mod.predict(X_test)[:, 0] # Plot the original STRF, and the one that we recovered with modeling. -_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True) +_, (ax1, ax2) = plt.subplots( + 1, + 2, + figsize=(6, 3), + sharey=True, + sharex=True, + layout="constrained", +) ax1.pcolormesh(delays_sec, freqs, weights, **kwargs) ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs) ax1.set_title("Original STRF") ax2.set_title("Best Reconstructed STRF") plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45) -plt.autoscale(tight=True) -mne.viz.tight_layout() # Plot the actual response and the predicted response on a held out stimulus time_pred = np.arange(best_pred.shape[0]) / sfreq @@ -213,8 +215,6 @@ ax.plot(time_pred, best_pred, color="r", lw=1) ax.set(title="Original and predicted activity", xlabel="Time (s)") ax.legend(["Original", "Predicted"]) -plt.autoscale(tight=True) -mne.viz.tight_layout() # %% @@ -229,7 +229,7 @@ # in :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,HoldgrafEtAl2016`. # Plot model score for each ridge parameter -fig = plt.figure(figsize=(10, 4)) +fig = plt.figure(figsize=(10, 4), layout="constrained") ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas)) ax.plot(np.arange(len(alphas)), scores, marker="o", color="r") ax.annotate( @@ -244,7 +244,6 @@ ylabel="Score ($R^2$)", xlim=[-0.4, len(alphas) - 0.6], ) -mne.viz.tight_layout() # Plot the STRF of each ridge parameter for ii, (rf, i_alpha) in enumerate(zip(models, alphas)): @@ -252,9 +251,7 @@ ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs) plt.xticks([], []) plt.yticks([], []) - plt.autoscale(tight=True) fig.suptitle("Model coefficients / scores for many ridge parameters", y=1) -mne.viz.tight_layout() # %% # Using different regularization types @@ -308,7 +305,7 @@ # This matches the "true" receptive field structure and results in a better # model fit. -fig = plt.figure(figsize=(10, 6)) +fig = plt.figure(figsize=(10, 6), layout="constrained") ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas)) ax.plot(np.arange(len(alphas)), scores_lap, marker="o", color="r") ax.plot(np.arange(len(alphas)), scores, marker="o", color="0.5", ls=":") @@ -330,7 +327,6 @@ ylabel="Score ($R^2$)", xlim=[-0.4, len(alphas) - 0.6], ) -mne.viz.tight_layout() # Plot the STRF of each ridge parameter xlim = times[[0, -1]] @@ -346,13 +342,19 @@ if ii == 0: ax.set(ylabel="Ridge") fig.suptitle("Model coefficients / scores for laplacian regularization", y=1) -mne.viz.tight_layout() # %% # Plot the original STRF, and the one that we recovered with modeling. rf = models[ix_best_alpha] rf_lap = models_lap[ix_best_alpha_lap] -_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3), sharey=True, sharex=True) +_, (ax1, ax2, ax3) = plt.subplots( + 1, + 3, + figsize=(9, 3), + sharey=True, + sharex=True, + layout="constrained", +) ax1.pcolormesh(delays_sec, freqs, weights, **kwargs) ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs) ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs) @@ -360,8 +362,6 @@ ax2.set_title("Best Ridge STRF") ax3.set_title("Best Laplacian STRF") plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45) -plt.autoscale(tight=True) -mne.viz.tight_layout() # %% # References diff --git a/tutorials/preprocessing/25_background_filtering.py b/tutorials/preprocessing/25_background_filtering.py index a5ec433ac7c..09e5db8173e 100644 --- a/tutorials/preprocessing/25_background_filtering.py +++ b/tutorials/preprocessing/25_background_filtering.py @@ -478,7 +478,7 @@ # and the time-domain ringing is thus more pronounced for the steep-slope, # long-duration filter than the shorter, shallower-slope filter: -axes = plt.subplots(1, 2)[1] +axes = plt.subplots(1, 2, layout="constrained")[1] def plot_signal(x, offset): @@ -524,7 +524,6 @@ def plot_signal(x, offset): for text in axes[0].get_yticklabels(): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel="Frequency (Hz)", ylabel="Magnitude (dB)") -mne.viz.tight_layout() plt.show() # %% @@ -665,7 +664,7 @@ def plot_signal(x, offset): # Now let's look at how our shallow and steep Butterworth IIR filters # perform on our Morlet signal from before: -axes = plt.subplots(1, 2)[1] +axes = plt.subplots(1, 2, layout="constrained")[1] yticks = np.arange(4) / -30.0 yticklabels = ["Original", "Noisy", "Butterworth-2", "Butterworth-8"] plot_signal(x_orig, offset=yticks[0]) @@ -684,7 +683,6 @@ def plot_signal(x, offset): text.set(rotation=45, size=8) axes[1].set(xlim=flim, ylim=(-60, 10), xlabel="Frequency (Hz)", ylabel="Magnitude (dB)") mne.viz.adjust_axes(axes) -mne.viz.tight_layout() plt.show() # %% @@ -793,7 +791,6 @@ def plot_signal(x, offset): ) mne.viz.adjust_axes(axes) -mne.viz.tight_layout() plt.show() # %% @@ -832,7 +829,7 @@ def plot_signal(x, offset): def baseline_plot(x): - all_axes = plt.subplots(3, 2)[1] + all_axes = plt.subplots(3, 2, layout="constrained")[1] for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])): for ci, ax in enumerate(axes): if ci == 0: @@ -849,7 +846,6 @@ def baseline_plot(x): ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel) ax.set_ylabel("%0.1f Hz" % freq, rotation=0, horizontalalignment="right") mne.viz.adjust_axes(axes) - mne.viz.tight_layout() plt.suptitle(title) plt.show() diff --git a/tutorials/preprocessing/30_filtering_resampling.py b/tutorials/preprocessing/30_filtering_resampling.py index 32854096194..53b1f550fcc 100644 --- a/tutorials/preprocessing/30_filtering_resampling.py +++ b/tutorials/preprocessing/30_filtering_resampling.py @@ -156,7 +156,6 @@ def add_arrows(axes): raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks) for title, data in zip(["Un", "Notch "], [raw, raw_notch]): fig = data.compute_psd(fmax=250).plot(average=True, picks="data", exclude="bads") - fig.subplots_adjust(top=0.85) fig.suptitle("{}filtered".format(title), size="xx-large", weight="bold") add_arrows(fig.axes[:2]) @@ -176,7 +175,6 @@ def add_arrows(axes): ) for title, data in zip(["Un", "spectrum_fit "], [raw, raw_notch_fit]): fig = data.compute_psd(fmax=250).plot(average=True, picks="data", exclude="bads") - fig.subplots_adjust(top=0.85) fig.suptitle("{}filtered".format(title), size="xx-large", weight="bold") add_arrows(fig.axes[:2]) @@ -212,7 +210,6 @@ def add_arrows(axes): for data, title in zip([raw, raw_downsampled], ["Original", "Downsampled"]): fig = data.compute_psd().plot(average=True, picks="data", exclude="bads") - fig.subplots_adjust(top=0.9) fig.suptitle(title) plt.setp(fig.axes, xlim=(0, 300)) diff --git a/tutorials/preprocessing/50_artifact_correction_ssp.py b/tutorials/preprocessing/50_artifact_correction_ssp.py index a1ea7135d8e..55d18b276a6 100644 --- a/tutorials/preprocessing/50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/50_artifact_correction_ssp.py @@ -498,7 +498,9 @@ evoked_eeg = epochs.average().pick("eeg") evoked_eeg.del_proj().add_proj(ecg_projs).add_proj(eog_projs) -fig, axes = plt.subplots(1, 3, figsize=(8, 3), sharex=True, sharey=True) +fig, axes = plt.subplots( + 1, 3, figsize=(8, 3), sharex=True, sharey=True, layout="constrained" +) for pi, proj in enumerate((False, True, "reconstruct")): ax = axes[pi] evoked_eeg.plot(proj=proj, axes=ax, spatial_colors=True) @@ -512,7 +514,6 @@ ax.yaxis.set_tick_params(labelbottom=True) for text in list(ax.texts): text.remove() -mne.viz.tight_layout() # %% # Note that here the bias in the EEG and magnetometer channels is reduced by diff --git a/tutorials/preprocessing/60_maxwell_filtering_sss.py b/tutorials/preprocessing/60_maxwell_filtering_sss.py index 191eabf2b45..a3659b1f765 100644 --- a/tutorials/preprocessing/60_maxwell_filtering_sss.py +++ b/tutorials/preprocessing/60_maxwell_filtering_sss.py @@ -163,7 +163,7 @@ ) # First, plot the "raw" scores. -fig, ax = plt.subplots(1, 2, figsize=(12, 8)) +fig, ax = plt.subplots(1, 2, figsize=(12, 8), layout="constrained") fig.suptitle( f"Automated noisy channel detection: {ch_type}", fontsize=16, fontweight="bold" ) @@ -188,9 +188,6 @@ ] ax[1].set_title("Scores > Limit", fontweight="bold") -# The figure title should not overlap with the subplots. -fig.tight_layout(rect=[0, 0.03, 1, 0.95]) - # %% # # .. note:: You can use the very same code as above to produce figures for diff --git a/tutorials/preprocessing/70_fnirs_processing.py b/tutorials/preprocessing/70_fnirs_processing.py index 1dd30c628ab..886d99fc618 100644 --- a/tutorials/preprocessing/70_fnirs_processing.py +++ b/tutorials/preprocessing/70_fnirs_processing.py @@ -110,7 +110,7 @@ # coupling index. sci = mne.preprocessing.nirs.scalp_coupling_index(raw_od) -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.hist(sci) ax.set(xlabel="Scalp Coupling Index", ylabel="Count", xlim=[0, 1]) @@ -157,7 +157,6 @@ for when, _raw in dict(Before=raw_haemo_unfiltered, After=raw_haemo).items(): fig = _raw.compute_psd().plot(average=True, picks="data", exclude="bads") fig.suptitle(f"{when} filtering", weight="bold", size="x-large") - fig.subplots_adjust(top=0.88) # %% # Extract epochs @@ -172,7 +171,6 @@ events, event_dict = mne.events_from_annotations(raw_haemo) fig = mne.viz.plot_events(events, event_id=event_dict, sfreq=raw_haemo.info["sfreq"]) -fig.subplots_adjust(right=0.7) # make room for the legend # %% @@ -238,7 +236,7 @@ # pairs that we selected. All the channels in this data are located over the # motor cortex, and all channels show a similar pattern in the data. -fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6)) +fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(15, 6), layout="constrained") clims = dict(hbo=[-20, 20], hbr=[-20, 20]) epochs["Control"].average().plot_image(axes=axes[:, 0], clim=clims) epochs["Tapping"].average().plot_image(axes=axes[:, 1], clim=clims) @@ -308,7 +306,11 @@ # And we can plot the comparison at a single time point for two conditions. fig, axes = plt.subplots( - nrows=2, ncols=4, figsize=(9, 5), gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1]) + nrows=2, + ncols=4, + figsize=(9, 5), + gridspec_kw=dict(width_ratios=[1, 1, 1, 0.1]), + layout="constrained", ) vlim = (-8, 8) ts = 9.0 @@ -341,13 +343,12 @@ for column, condition in enumerate(["Tapping Left", "Tapping Right", "Left-Right"]): for row, chroma in enumerate(["HbO", "HbR"]): axes[row, column].set_title("{}: {}".format(chroma, condition)) -fig.tight_layout() # %% # Lastly, we can also look at the individual waveforms to see what is # driving the topographic plot above. -fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4)) +fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(6, 4), layout="constrained") mne.viz.plot_evoked_topo( epochs["Left"].average(picks="hbo"), color="b", axes=axes, legend=False ) diff --git a/tutorials/preprocessing/80_opm_processing.py b/tutorials/preprocessing/80_opm_processing.py index 7c76499fd36..a8d30c12abd 100644 --- a/tutorials/preprocessing/80_opm_processing.py +++ b/tutorials/preprocessing/80_opm_processing.py @@ -57,7 +57,7 @@ data_ds, time_ds = raw[picks[::5], :stop] data_ds, time_ds = data_ds[:, ::step] * amp_scale, time_ds[::step] -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") plot_kwargs = dict(lw=1, alpha=0.5) ax.plot(time_ds, data_ds.T - np.mean(data_ds, axis=1), **plot_kwargs) ax.grid(True) @@ -111,7 +111,7 @@ data_ds, _ = raw[picks[::5], :stop] data_ds = data_ds[:, ::step] * amp_scale -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") ax.plot(time_ds, data_ds.T - np.mean(data_ds, axis=1), **plot_kwargs) ax.grid(True, ls=":") ax.set(title="After reference regression", **set_kwargs) @@ -139,7 +139,7 @@ data_ds, _ = raw[picks[::5], :stop] data_ds = data_ds[:, ::step] * amp_scale -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") ax.plot(time_ds, data_ds.T - np.mean(data_ds, axis=1), **plot_kwargs) ax.grid(True, ls=":") ax.set(title="After HFC", **set_kwargs) @@ -168,7 +168,7 @@ shielding = 10 * np.log10(psd_pre[:] / psd_post_reg[:]) -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") ax.plot(psd_post_reg.freqs, shielding.T, **plot_kwargs) ax.grid(True, ls=":") ax.set(xticks=psd_post_reg.freqs) @@ -182,7 +182,7 @@ shielding = 10 * np.log10(psd_pre[:] / psd_post_hfc[:]) -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") ax.plot(psd_post_hfc.freqs, shielding.T, **plot_kwargs) ax.grid(True, ls=":") ax.set(xticks=psd_post_hfc.freqs) @@ -215,7 +215,7 @@ # plot data_ds, _ = raw[picks[::5], :stop] data_ds = data_ds[:, ::step] * amp_scale -fig, ax = plt.subplots(constrained_layout=True) +fig, ax = plt.subplots(layout="constrained") plot_kwargs = dict(lw=1, alpha=0.5) ax.plot(time_ds, data_ds.T - np.mean(data_ds, axis=1), **plot_kwargs) ax.grid(True) diff --git a/tutorials/raw/20_event_arrays.py b/tutorials/raw/20_event_arrays.py index 6fedcfe0ade..e13b1f361a7 100644 --- a/tutorials/raw/20_event_arrays.py +++ b/tutorials/raw/20_event_arrays.py @@ -158,7 +158,6 @@ fig = mne.viz.plot_events( events, sfreq=raw.info["sfreq"], first_samp=raw.first_samp, event_id=event_dict ) -fig.subplots_adjust(right=0.7) # make room for legend # %% # Plotting events and raw data together diff --git a/tutorials/simulation/80_dics.py b/tutorials/simulation/80_dics.py index b8efcad9319..951671df1e4 100644 --- a/tutorials/simulation/80_dics.py +++ b/tutorials/simulation/80_dics.py @@ -99,7 +99,7 @@ def coh_signal_gen(): signal1 = coh_signal_gen() signal2 = coh_signal_gen() -fig, axes = plt.subplots(2, 2, figsize=(8, 4)) +fig, axes = plt.subplots(2, 2, figsize=(8, 4), layout="constrained") # Plot the timeseries ax = axes[0][0] @@ -133,7 +133,6 @@ def coh_signal_gen(): ylabel="Coherence", title="Coherence between the timeseries", ) -fig.tight_layout() # %% # Now we put the signals at two locations on the cortex. We construct a diff --git a/tutorials/stats-sensor-space/10_background_stats.py b/tutorials/stats-sensor-space/10_background_stats.py index 066ab249121..412715b3042 100644 --- a/tutorials/stats-sensor-space/10_background_stats.py +++ b/tutorials/stats-sensor-space/10_background_stats.py @@ -76,7 +76,7 @@ # %% # The data averaged over all subjects looks like this: -fig, ax = plt.subplots() +fig, ax = plt.subplots(layout="constrained") ax.imshow(X.mean(0), cmap="inferno") ax.set(xticks=[], yticks=[], title="Data averaged over subjects") @@ -121,7 +121,7 @@ def plot_t_p(t, p, title, mcc, axes=None): if axes is None: - fig = plt.figure(figsize=(6, 3)) + fig = plt.figure(figsize=(6, 3), layout="constrained") axes = [fig.add_subplot(121, projection="3d"), fig.add_subplot(122)] show = True else: @@ -150,7 +150,7 @@ def plot_t_p(t, p, title, mcc, axes=None): xticks=[], yticks=[], zticks=[], xlim=[0, width - 1], ylim=[0, width - 1] ) axes[0].view_init(30, 15) - cbar = plt.colorbar( + cbar = axes[0].figure.colorbar( ax=axes[0], shrink=0.75, orientation="horizontal", @@ -172,7 +172,7 @@ def plot_t_p(t, p, title, mcc, axes=None): use_p, cmap="inferno", vmin=p_lims[0], vmax=p_lims[1], interpolation="nearest" ) axes[1].set(xticks=[], yticks=[]) - cbar = plt.colorbar( + cbar = axes[1].figure.colorbar( ax=axes[1], shrink=0.75, orientation="horizontal", @@ -188,8 +188,6 @@ def plot_t_p(t, p, title, mcc, axes=None): text = fig.suptitle(title) if mcc: text.set_weight("bold") - plt.subplots_adjust(0, 0.05, 1, 0.9, wspace=0, hspace=0) - mne.viz.utils.plt_show() plot_t_p(ts[-1], ps[-1], titles[-1], mccs[-1]) @@ -286,7 +284,7 @@ def plot_t_p(t, p, title, mcc, axes=None): N = np.arange(1, 80) alpha = 0.05 p_type_I = 1 - (1 - alpha) ** N -fig, ax = plt.subplots(figsize=(4, 3)) +fig, ax = plt.subplots(figsize=(4, 3), layout="constrained") ax.scatter(N, p_type_I, 3) ax.set( xlim=N[[0, -1]], @@ -295,7 +293,6 @@ def plot_t_p(t, p, title, mcc, axes=None): ylabel="Probability of at least\none type I error", ) ax.grid(True) -fig.tight_layout() fig.show() # %% @@ -612,7 +609,7 @@ def plot_t_p(t, p, title, mcc, axes=None): # and the bottom shows p-values for various statistical tests, with the ones # with proper control over FWER or FDR with bold titles. -fig = plt.figure(facecolor="w", figsize=(14, 3)) +fig = plt.figure(facecolor="w", figsize=(14, 3), layout="constrained") assert len(ts) == len(titles) == len(ps) for ii in range(len(ts)): ax = [ @@ -620,8 +617,6 @@ def plot_t_p(t, p, title, mcc, axes=None): fig.add_subplot(2, 10, 11 + ii), ] plot_t_p(ts[ii], ps[ii], titles[ii], mccs[ii], ax) -fig.tight_layout(pad=0, w_pad=0.05, h_pad=0.1) -plt.show() # %% # The first three columns show the parametric and non-parametric statistics diff --git a/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py index a43fdfd46aa..cf49f48ddf4 100644 --- a/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py +++ b/tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py @@ -235,8 +235,7 @@ evoked_data = evoked.data times = 1e3 * evoked.times -plt.figure() -plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43) +fig, (ax, ax2) = plt.subplots(2, layout="constrained") T_obs_plot = np.nan * np.ones_like(T_obs) for c, p_val in zip(clusters, cluster_p_values): @@ -252,8 +251,7 @@ vmax = np.max(np.abs(T_obs)) vmin = -vmax -plt.subplot(2, 1, 1) -plt.imshow( +ax.imshow( T_obs[ch_idx], cmap=plt.cm.gray, extent=[times[0], times[-1], freqs[0], freqs[-1]], @@ -262,7 +260,7 @@ vmin=vmin, vmax=vmax, ) -plt.imshow( +ax.imshow( T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r, extent=[times[0], times[-1], freqs[0], freqs[-1]], @@ -271,11 +269,8 @@ vmin=vmin, vmax=vmax, ) -plt.colorbar() -plt.xlabel("Time (ms)") -plt.ylabel("Frequency (Hz)") -plt.title(f"Induced power ({tfr_epochs.ch_names[ch_idx]})") +fig.colorbar(ax.images[0]) +ax.set(xlabel="Time (ms)", ylabel="Frequency (Hz)") +ax.set(title=f"Induced power ({tfr_epochs.ch_names[ch_idx]})") -ax2 = plt.subplot(2, 1, 2) evoked.plot(axes=[ax2], time_unit="s") -plt.show() diff --git a/tutorials/stats-sensor-space/50_cluster_between_time_freq.py b/tutorials/stats-sensor-space/50_cluster_between_time_freq.py index 6ef0eaf3de3..69bdbbc5d91 100644 --- a/tutorials/stats-sensor-space/50_cluster_between_time_freq.py +++ b/tutorials/stats-sensor-space/50_cluster_between_time_freq.py @@ -147,8 +147,7 @@ times = 1e3 * epochs_condition_1.times # change unit to ms -fig, (ax, ax2) = plt.subplots(2, 1, figsize=(6, 4)) -fig.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43) +fig, (ax, ax2) = plt.subplots(2, 1, figsize=(6, 4), layout="constrained") # Compute the difference in evoked to determine which was greater since # we used a 1-way ANOVA which tested for a difference in population means diff --git a/tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py b/tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py index 1dfcfc79f86..a57112bedc4 100644 --- a/tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py +++ b/tutorials/stats-sensor-space/70_cluster_rmANOVA_time_freq.py @@ -172,7 +172,7 @@ effect_labels = ["modality", "location", "modality by location"] -fig, axes = plt.subplots(3, 1, figsize=(6, 6)) +fig, axes = plt.subplots(3, 1, figsize=(6, 6), layout="constrained") # let's visualize our effects by computing f-images for effect, sig, effect_label, ax in zip(fvals, pvals, effect_labels, axes): @@ -198,8 +198,6 @@ ax.set_ylabel("Frequency (Hz)") ax.set_title(f'Time-locked response for "{effect_label}" ({ch_name})') -fig.tight_layout() - # %% # Account for multiple comparisons using FDR versus permutation clustering test # ----------------------------------------------------------------------------- @@ -250,7 +248,7 @@ def stat_fun(*args): F_obs_plot = F_obs.copy() F_obs_plot[~clusters[np.squeeze(good_clusters)]] = np.nan -fig, ax = plt.subplots(figsize=(6, 4)) +fig, ax = plt.subplots(figsize=(6, 4), layout="constrained") for f_image, cmap in zip([F_obs, F_obs_plot], ["gray", "autumn"]): c = ax.imshow( f_image, @@ -267,7 +265,6 @@ def stat_fun(*args): f'Time-locked response for "modality by location" ({ch_name})\n' "cluster-level corrected (p <= 0.05)" ) -fig.tight_layout() # %% # Now using FDR: @@ -276,7 +273,7 @@ def stat_fun(*args): F_obs_plot2 = F_obs.copy() F_obs_plot2[~mask.reshape(F_obs_plot.shape)] = np.nan -fig, ax = plt.subplots(figsize=(6, 4)) +fig, ax = plt.subplots(figsize=(6, 4), layout="constrained") for f_image, cmap in zip([F_obs, F_obs_plot2], ["gray", "autumn"]): c = ax.imshow( f_image, @@ -293,7 +290,6 @@ def stat_fun(*args): f'Time-locked response for "modality by location" ({ch_name})\n' "FDR corrected (p <= 0.05)" ) -fig.tight_layout() # %% # Both cluster-level and FDR correction help get rid of potential diff --git a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py index db6505fbafe..7a3234c5346 100644 --- a/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py +++ b/tutorials/stats-sensor-space/75_cluster_ftest_spatiotemporal.py @@ -199,7 +199,7 @@ mask[ch_inds, :] = True # initialize figure - fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) + fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3), layout="constrained") # plot average test statistic and mark significant sensors f_evoked = mne.EvokedArray(f_map[:, np.newaxis], epochs.info, tmin=0) @@ -251,10 +251,7 @@ (ymin, ymax), sig_times[0], sig_times[-1], color="orange", alpha=0.3 ) - # clean up viz - mne.viz.tight_layout(fig=fig) - fig.subplots_adjust(bottom=0.05) - plt.show() +plt.show() # %% # Permutation statistic for time-frequencies @@ -352,7 +349,7 @@ sig_times = epochs.times[time_inds] # initialize figure - fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3)) + fig, ax_topo = plt.subplots(1, 1, figsize=(10, 3), layout="constrained") # create spatial mask mask = np.zeros((f_map.shape[0], 1), dtype=bool) @@ -414,9 +411,7 @@ ax_colorbar2.set_ylabel("F-stat") # clean up viz - mne.viz.tight_layout(fig=fig) - fig.subplots_adjust(bottom=0.05) - plt.show() +plt.show() # %% diff --git a/tutorials/time-freq/20_sensors_time_frequency.py b/tutorials/time-freq/20_sensors_time_frequency.py index 776a230ecad..07a31e99db5 100644 --- a/tutorials/time-freq/20_sensors_time_frequency.py +++ b/tutorials/time-freq/20_sensors_time_frequency.py @@ -209,7 +209,7 @@ power.plot_topo(baseline=(-0.5, 0), mode="logratio", title="Average power") power.plot([82], baseline=(-0.5, 0), mode="logratio", title=power.ch_names[82]) -fig, axes = plt.subplots(1, 2, figsize=(7, 4), constrained_layout=True) +fig, axes = plt.subplots(1, 2, figsize=(7, 4), layout="constrained") topomap_kw = dict( ch_type="grad", tmin=0.5, tmax=1.5, baseline=(-0.5, 0), mode="logratio", show=False )