diff --git a/.circleci/config.yml b/.circleci/config.yml index d1866e34ec3..460f672a30a 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -56,7 +56,7 @@ jobs: name: Install 3D rendering libraries \ PyQt5 dependencies \ graphviz \ optipng (for optimized images) command: | sudo apt-get install libosmesa6 libglx-mesa0 libopengl0 libglx0 libdbus-1-3 \ - libxkbcommon-x11-0 \ + libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-shape0 libxcb-xfixes0 libxcb-xinerama0 \ graphviz \ optipng @@ -81,7 +81,6 @@ jobs: command: | python -m pip install --user --upgrade --progress-bar off pip numpy setuptools python -m pip install --user --upgrade --progress-bar off -f "https://vtk.org/download" "vtk>=9" - python -m pip install --user --upgrade --progress-bar off https://github.com/enthought/mayavi/zipball/master python -m pip install --user --upgrade --progress-bar off -r requirements.txt python -m pip uninstall -yq pysurfer mayavi python -m pip install --user --upgrade --progress-bar off --pre sphinx @@ -94,6 +93,10 @@ jobs: paths: - ~/.cache/pip + - run: + name: Check PyQt5 + command: LD_DEBUG=libs python -c "from PyQt5.QtWidgets import QApplication, QWidget; app = QApplication([])" + # Look at what we have and fail early if there is some library conflict - run: name: Check installation diff --git a/.gitignore b/.gitignore index 6f2b6ff377a..8262b46dcac 100644 --- a/.gitignore +++ b/.gitignore @@ -46,6 +46,7 @@ coverage .pytest_cache/ __pycache__/ prof/ +mne/viz/_brain/tests/.ipynb_checkpoints dist/ doc/_build/ diff --git a/.travis.yml b/.travis.yml index bddc44ff936..5d160b7fbab 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,6 +6,7 @@ addons: apt: packages: - libxkbcommon-x11-0 + - libxcb* - libosmesa6 - libglx-mesa0 - libopengl0 @@ -52,22 +53,21 @@ before_install: - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset; fi; - - if [ -z "$CONDA_ENV" ] && [ -z "$CONDA_DEPENDENCIES" ]; then - pip uninstall -yq numpy; - pip install -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --pre numpy; - pip install -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow; - pip install -f "https://vtk.org/download" "vtk>=9"; - pip install https://github.com/enthought/mayavi/zipball/master; - pip install $PIP_DEPENDENCIES; - pip install --upgrade -r requirements.txt; + - | + if [ -z "$CONDA_ENV" ] && [ -z "$CONDA_DEPENDENCIES" ]; then + pip uninstall -yq numpy + pip install -i "https://pypi.anaconda.org/scipy-wheels-nightly/simple" --pre numpy + pip install -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" scipy pandas scikit-learn matplotlib h5py Pillow + pip install -f "https://vtk.org/download" "vtk>=9" + pip install --upgrade -r requirements.txt else - git clone https://github.com/astropy/ci-helpers.git; - source ci-helpers/travis/setup_conda.sh; + git clone https://github.com/astropy/ci-helpers.git + source ci-helpers/travis/setup_conda.sh if [ ! -z "$CONDA_ENV" ]; then - conda activate base; - conda env update --file $CONDA_ENV; - pip uninstall -yq mne; - fi; + conda activate base + conda env update --file $CONDA_ENV + pip uninstall -yq mne + fi fi # Always install these via pip so we get the latest possible versions (testing bugfixes) - pip install --upgrade "pytest<5.4" pytest-sugar pytest-cov pytest-mock pytest-timeout pytest-xdist codecov @@ -141,7 +141,7 @@ script: pip install -e .; python mne/tests/test_evoked.py; fi; - - echo pytest -m "${CONDITION}" --cov=mne -n 1 ${USE_DIRS} + - echo 'pytest -m "${CONDITION}" --cov=mne -n 1 ${USE_DIRS}' - pytest -m "${CONDITION}" --tb=short --cov=mne -n 1 ${USE_DIRS} # run the minimal one with the testing data - if [ "${DEPS}" == "minimal" ]; then diff --git a/MANIFEST.in b/MANIFEST.in index f4541f51d89..dec0dcdd32c 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -32,6 +32,7 @@ recursive-include mne mne/datasets *.csv include mne/io/edf/gdf_encodes.txt include mne/datasets/sleep_physionet/SHA1SUMS include mne/externals/tqdm/_tqdm/tqdm.1 +include mne/viz/_brain/tests/test.ipynb ### Exclude diff --git a/Makefile b/Makefile index 27816bdb37c..a68e628828c 100644 --- a/Makefile +++ b/Makefile @@ -105,10 +105,10 @@ flake: @echo "flake8 passed" codespell: # running manually - @codespell --builtin clear,rare,informal,names -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS) + @codespell --builtin clear,rare,informal,names,usage -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS) codespell-error: # running on travis - @codespell --builtin clear,rare,informal,names -i 0 -q 7 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS) + @codespell --builtin clear,rare,informal,names,usage -i 0 -q 7 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt $(CODESPELL_DIRS) pydocstyle: @echo "Running pydocstyle" diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 472927d4b04..3f3633a5f78 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -15,11 +15,9 @@ jobs: - job: Style pool: vmImage: 'ubuntu-18.04' - strategy: - matrix: - Python37-64bit-fast: - PYTHON_VERSION: '3.7' - PYTHON_ARCH: 'x64' + variables: + PYTHON_VERSION: '3.8' + PYTHON_ARCH: 'x64' steps: - task: UsePythonVersion@0 inputs: @@ -28,6 +26,7 @@ jobs: addToPath: true displayName: 'Get Python' - bash: | + set -e python -m pip install --upgrade pip setuptools python -m pip install numpy scipy matplotlib sphinx pytest pytest-cov pytest-timeout pytest-sugar flake8 displayName: Install dependencies @@ -89,7 +88,7 @@ jobs: OPENBLAS_NUM_THREADS: '1' steps: - bash: | - sudo apt install libxkbcommon-x11-0 xvfb tcsh + sudo apt install libxkbcommon-x11-0 xvfb tcsh libxcb* displayName: 'Install Ubuntu dependencies' - bash: | source tools/get_minimal_commands.sh @@ -124,16 +123,55 @@ jobs: displayName: 'Get test data' - script: pytest -m "ultraslowtest" --tb=short --cov=mne -n 1 mne displayName: 'Run ultraslow tests' - - script: codecov --root %BUILD_REPOSITORY_LOCALPATH% -t %CODECOV_TOKEN% + - script: codecov --root $BUILD_REPOSITORY_LOCALPATH -t $CODECOV_TOKEN displayName: 'Codecov' env: CODECOV_TOKEN: $(CODECOV_TOKEN) - condition: always() + condition: succeededOrFailed() - task: PublishTestResults@2 inputs: testResultsFiles: 'junit-*.xml' - testRunTitle: 'Publish test results for Python $(python.version)' - condition: always() + testRunTitle: 'Publish test results for $(Agent.JobName)' + failTaskOnFailedTests: true + condition: succeededOrFailed() + + +- job: Notebook + pool: + vmImage: 'ubuntu-18.04' + variables: + CONDA_ENV: 'server_environment.yml' + steps: + - bash: | + set -e + wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh --progress=dot:mega + bash miniconda.sh -b -p ~/miniconda + source ~/miniconda/etc/profile.d/conda.sh + conda activate base + conda env update --file $CONDA_ENV + pip uninstall -yq mne + pip install -ve . + pip install pytest pytest-cov pytest-timeout pytest-sugar pytest-xdist flake8 codecov + echo "##vso[task.setvariable variable=PATH]${PATH}" + echo "##vso[task.setvariable variable=LD_LIBRARY_PATH]${LD_LIBRARY_PATH}" + displayName: 'Install dependencies' + - script: mne sys_info + displayName: 'Print config and test access to commands' + - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" + displayName: 'Get test data' + - script: pytest --tb=short --cov=mne mne/viz + displayName: 'Run viz tests' + - script: codecov --root $BUILD_REPOSITORY_LOCALPATH -t $CODECOV_TOKEN + displayName: 'Codecov' + env: + CODECOV_TOKEN: $(CODECOV_TOKEN) + condition: succeededOrFailed() + - task: PublishTestResults@2 + inputs: + testResultsFiles: 'junit-*.xml' + testRunTitle: 'Publish test results for $(Agent.JobName)' + failTaskOnFailedTests: true + condition: succeededOrFailed() - job: Windows @@ -220,15 +258,16 @@ jobs: displayName: Print NumPy config - script: python -c "import mne; mne.datasets.testing.data_path(verbose=True)" displayName: 'Get test data' - - script: pytest -m "not ultraslowtest" --tb=short --cov=mne -n 1 mne + - script: pytest -m "not slowtest" --tb=short --cov=mne -n 1 mne displayName: 'Run tests' - script: codecov --root %BUILD_REPOSITORY_LOCALPATH% -t %CODECOV_TOKEN% displayName: 'Codecov' env: CODECOV_TOKEN: $(CODECOV_TOKEN) - condition: always() + condition: succeededOrFailed() - task: PublishTestResults@2 inputs: testResultsFiles: 'junit-*.xml' - testRunTitle: 'Publish test results for Python $(python.version)' - condition: always() + testRunTitle: 'Publish test results for $(Agent.JobName) $(TEST_MODE) $(PYTHON_VERSION)' + failTaskOnFailedTests: true + condition: succeededOrFailed() diff --git a/doc/_static/blender_import_obj/blender_import_obj1.jpg b/doc/_static/blender_import_obj/blender_import_obj1.jpg new file mode 100644 index 00000000000..7aba4ccfebc Binary files /dev/null and b/doc/_static/blender_import_obj/blender_import_obj1.jpg differ diff --git a/doc/_static/blender_import_obj/blender_import_obj2.jpg b/doc/_static/blender_import_obj/blender_import_obj2.jpg new file mode 100644 index 00000000000..928c7480aee Binary files /dev/null and b/doc/_static/blender_import_obj/blender_import_obj2.jpg differ diff --git a/doc/_static/blender_import_obj/blender_import_obj3.jpg b/doc/_static/blender_import_obj/blender_import_obj3.jpg new file mode 100644 index 00000000000..4f486a87da9 Binary files /dev/null and b/doc/_static/blender_import_obj/blender_import_obj3.jpg differ diff --git a/doc/_templates/layout.html b/doc/_templates/layout.html index e0af6795416..731be366ad2 100755 --- a/doc/_templates/layout.html +++ b/doc/_templates/layout.html @@ -64,7 +64,7 @@ {% endblock %} {# put the sidebar before the body #} -{% block sidebar1 %}{{ sidebar() }}{% endblock %} +{% block sidebar1 %}{% endblock %} {% block sidebar2 %}{% endblock %} {%- block footer %} diff --git a/doc/changes/latest.inc b/doc/changes/latest.inc index 7f5ded56550..230dd6821d6 100644 --- a/doc/changes/latest.inc +++ b/doc/changes/latest.inc @@ -27,12 +27,18 @@ Changelog - Add :meth:`mne.MixedSourceEstimate.surface` and :meth:`mne.MixedSourceEstimate.volume` methods to allow surface and volume extraction by `Eric Larson`_ +- Add :meth:`mne.VectorSourceEstimate.project` to project vector source estimates onto the direction of maximum source power by `Eric Larson`_ + - Add support to :func:`mne.extract_label_time_course` for vector-valued and volumetric source estimates by `Eric Larson`_ - Add method :meth:`mne.VolSourceEstimate.in_label` by `Eric Larson`_ - Add support for mixed source spaces to :func:`mne.compute_source_morph` by `Eric Larson`_ +- Add support for omitting the SDR step in volumetric morphing by passing ``n_iter_sdr=()`` to `mne.compute_source_morph` by `Eric Larson`_ + +- Add support for passing a string argument to ``bg_img`` in `mne.viz.plot_volume_source_estimates` by `Eric Larson`_ + - Add support for providing the destination surface source space in the ``src_to`` argument of :func:`mne.compute_source_morph` by `Eric Larson`_ - Add ``tol_kind`` option to :func:`mne.compute_rank` by `Eric Larson`_ @@ -73,6 +79,8 @@ Changelog - Add estimation method legend to :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ +- Add support to `mne.SourceSpaces.export_volume` for ``mri_resolution='sparse'`` to color only the nearest-neighbor voxels instead of entire regions by `Eric Larson`_ + - Add ``axes`` argument to :func:`mne.viz.plot_evoked_white`, :meth:`mne.Evoked.plot_white`, and :func:`mne.viz.plot_snr_estimate` by `Eric Larson`_ - Add ``sources`` and ``detectors`` options for fNIRS use of :meth:`mne.viz.plot_alignment` allowing plotting of optode locations in addition to channel midpoint ``channels`` and ``path`` between fNIRS optodes by `Kyle Mathewson`_ @@ -85,12 +93,20 @@ Changelog - BIDS conformity: When saving FIF files to disk and the files are split into parts, the ``split_naming='bids'`` parameter now uses a "_split-%d" naming instead of the previous "_part-%d", by `Stefan Appelhoff`_ +- Add better error message when trying to save incompatible `~mne.Evoked` objects to the same file by `Eric Larson`_ + - Add support for loading complex numbers from mat files by `Thomas Hartmann`_ - Add generic reader function :func:`mne.io.read_raw` that loads files based on their extensions (it wraps the underlying specific ``read_raw_xxx`` functions) by `Clemens Brunner`_ - Add ``'auto'`` option to :meth:`mne.preprocessing.ICA.find_bads_ecg` to automatically determine the threshold for CTPS method by `Yu-Han Luo`_ +- Add a ``notebook`` 3d backend for visualization in jupyter notebook with :func:`mne.viz.set_3d_backend` by`Guillaume Favelier`_ + +- Add support for reading and writing surfaces in Wavefront .obj format to the :func:`mne.read_surface` and :func:`mne.write_surface` by `Marijn van Vliet`_ + +- Add tutorial on how to manually fix BEM meshes in Blender by `Marijn van Vliet`_ and `Ezequiel Mikulan`_ + Bug ~~~ @@ -100,10 +116,16 @@ Bug - Fix bug with :func:`mne.setup_volume_source_space` when ``volume_label`` was supplied where voxels slightly (in a worst case, about 37% times ``pos`` in distance) outside the voxel-grid-based bounds of regions were errantly included, by `Eric Larson`_ +- Fix bug with `mne.SourceSpaces.export_volume` with ``use_lut=False`` where no values were written by `Eric Larson`_ + - Fix bug with :func:`mne.preprocessing.annotate_movement` where bad data segments, specified in ``raw.annotations``, would be handled incorrectly by `Luke Bloy`_ - Fix bug with :func:`mne.compute_source_morph` when more than one volume source space was present (e.g., when using labels) where only the first label would be interpolated when ``mri_resolution=True`` by `Eric Larson`_ +- Fix bug with :func:`mne.compute_source_morph` when morphing to a volume source space when ``src_to`` is used and the destination subject is not ``fsaverage`` by `Eric Larson`_ + +- Fix bug with :func:`mne.compute_source_morph` where outermost voxels in the destination source space could be errantly omitted by `Eric Larson`_ + - Fix bug with :func:`mne.minimum_norm.compute_source_psd_epochs` and :func:`mne.minimum_norm.source_band_induced_power` raised errors when ``method='eLORETA'`` by `Eric Larson`_ - Fix bug with :func:`mne.minimum_norm.apply_inverse` where the explained variance did not work for complex data by `Eric Larson`_ @@ -132,6 +154,8 @@ Bug - Fix default to be ``foreground=None`` in :func:`mne.viz.plot_source_estimates` to use white or black text based on the background color by `Eric Larson`_ +- Fix bug with writing EGI and CTF `mne.Info` to H5 format, e.g., with `mne.time_frequency.AverageTFR.save` by `Eric Larson`_ + - Fix bug with :func:`mne.io.Raw.plot` where toggling all projectors did not actually take effect by `Eric Larson`_ - Fix bug with :func:`mne.read_epochs` when loading data in complex format with ``preload=False`` by `Eric Larson`_ @@ -211,6 +235,8 @@ API - The method ``stc_mixed.plot_surface`` for a :class:`mne.MixedSourceEstimate` has been deprecated in favor of :meth:`stc.surface().plot(...) ` by `Eric Larson`_ +- The method ``stc.normal`` for :class:`mne.VectorSourceEstimate` has been deprecated in favor of :meth:`stc.project('nn', src) ` by `Eric Larson`_ + - Add ``use_dev_head_trans`` parameter to :func:`mne.preprocessing.annotate_movement` to allow choosing the device to head transform is used to define the fixed cHPI coordinates by `Luke Bloy`_ - The function ``mne.channels.read_dig_captrack`` will be deprecated in version 0.22 in favor of :func:`mne.channels.read_dig_captrak` to correct the spelling error: "captraCK" -> "captraK", by `Stefan Appelhoff`_ diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 063d340c796..04ff24602e3 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -305,3 +305,5 @@ .. _Lx37: https://github.com/Lx37 .. _Kyle Mathewson: https://github.com/kylemath + +.. _Ezequiel Mikulan: https://github.com/ezemikulan diff --git a/doc/conf.py b/doc/conf.py index ebbf4e411bc..4189d1cfedd 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -325,7 +325,7 @@ # There are some problems with dipy's redirect: # https://github.com/nipy/dipy/issues/1955 'dipy': ('https://dipy.org/documentation/latest', - 'https://dipy.org/documentation/1.0.0./objects.inv/'), + 'https://dipy.org/documentation/1.1.1./objects.inv/'), 'mne_realtime': ('https://mne.tools/mne-realtime', None), 'picard': ('https://pierreablin.github.io/picard/', None), } diff --git a/doc/index.rst b/doc/index.rst index 2b19bfd598a..add0c8d14ff 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -52,8 +52,8 @@
  • Agence Nationale de la Recherche: 14-NEUC-0002-01
    IDEX Paris-Saclay, 11-IDEX-0003-02
  • Paris-Saclay Center for Data Science: PARIS-SACLAY
  • Google: Summer of code (×6)
  • -
  • Amazon: - AWS Research Grants
  • -
  • Chan Zuckerberg Initiative: - Essential Open Source Software for Science
  • +
  • Amazon: AWS Research Grants
  • +
  • Chan Zuckerberg Initiative: Essential Open Source Software for Science
  • diff --git a/doc/install/advanced.rst b/doc/install/advanced.rst index db2ad772df5..56054d229d2 100644 --- a/doc/install/advanced.rst +++ b/doc/install/advanced.rst @@ -61,6 +61,19 @@ If you use another Python setup and you encounter some difficulties please report them on the `MNE mailing list`_ or on the `GitHub issues page`_ to get assistance. +It is also possible to interact with the 3D plots without installing Qt by using +the notebook 3d backend: + +.. code-block:: ipython + + In [1]: import mne + In [2]: mne.viz.set_3d_backend("notebook") + + +The notebook 3d backend requires PyVista to be installed along with other packages, +please follow :doc:`mne_python` + + .. _installing_master: Using the development version of MNE-Python (latest master) diff --git a/doc/install/mne_python.rst b/doc/install/mne_python.rst index 3fcfe66cd9a..6661ce2d6a9 100644 --- a/doc/install/mne_python.rst +++ b/doc/install/mne_python.rst @@ -124,6 +124,7 @@ Once you have Anaconda installed, the easiest way to install MNE-Python with all dependencies is update your base Anaconda environment: .. _environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/environment.yml +.. _server environment file: https://raw.githubusercontent.com/mne-tools/mne-python/master/server_environment.yml .. collapse:: |linux| Linux @@ -179,6 +180,11 @@ you can create a new dedicated environment (here called "mne") with .. collapse:: |hand-stop-o| If you are installing on a headless server... :class: danger + With `pyvista`_: + Follow the steps described in :ref:`standard_instructions` + but use `server environment file`_ instead of `environment file`_. + + With `mayavi`_: Installing `mayavi`_ requires a running `X server`_. If you are installing MNE-Python into a computer with no display connected to it, you can try removing `mayavi`_ from the :file:`environment.yml` file before @@ -294,6 +300,7 @@ Python development are: .. LINKS .. _`mayavi`: https://docs.enthought.com/mayavi/mayavi/ +.. _`pyvista`: https://docs.pyvista.org/ .. _`X server`: https://en.wikipedia.org/wiki/X_Window_System .. _`xvfb`: https://en.wikipedia.org/wiki/Xvfb .. _`off-screen rendering`: https://docs.enthought.com/mayavi/mayavi/tips.html#off-screen-rendering diff --git a/doc/overview/faq.rst b/doc/overview/faq.rst index c028af604e7..6fa1d5e775c 100644 --- a/doc/overview/faq.rst +++ b/doc/overview/faq.rst @@ -383,8 +383,8 @@ order of difficulty): :ref:`mne watershed_bem`. 2. Changing the ``--atlas`` and ``--gcaatlas`` options of :ref:`mne watershed_bem`. -3. Manually editing the meshes (see `this tutorial - `__. +3. Manually editing the meshes (see :ref:`this tutorial + `). 4. Manually running mri_watershed_ with various FreeSurfer flags (e.g., ``-less`` to fix the output). 5. Going farther back in your Freesurfer pipeline to fix the problem. diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 2f4909f843b..60b318ed649 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -621,6 +621,7 @@ Forward Modeling setup_source_space setup_volume_source_space surface.complete_surface_info + surface.read_curvature use_coil_def write_bem_surfaces write_trans diff --git a/environment.yml b/environment.yml index a436f10d439..f0e564363b9 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,6 @@ dependencies: - scipy - matplotlib - numba -- pyqt>=5.9 - pandas>=0.18 - xlrd - scikit-learn @@ -37,7 +36,7 @@ dependencies: - https://github.com/numpy/numpydoc/archive/master.zip - imageio-ffmpeg>=0.4.1 - vtk - - pyvista==0.24 + - pyvista==0.24.3 - https://github.com/enthought/mayavi/archive/master.zip - PySurfer[save_movie] - dipy --only-binary dipy @@ -49,4 +48,4 @@ dependencies: - codespell - python-picard - tqdm - - PyQt5>=5.10,<5.14; platform_system == "Darwin" + - PyQt5>=5.10 diff --git a/examples/inverse/plot_mixed_source_space_inverse.py b/examples/inverse/plot_mixed_source_space_inverse.py index 04ed50e1471..81a5867697b 100644 --- a/examples/inverse/plot_mixed_source_space_inverse.py +++ b/examples/inverse/plot_mixed_source_space_inverse.py @@ -3,7 +3,7 @@ Compute MNE inverse solution on evoked data in a mixed source space =================================================================== -Create a mixed source space and compute MNE inverse solution on an +Create a mixed source space and compute an MNE inverse solution on an evoked dataset. """ # Author: Annalisa Pascarella @@ -79,6 +79,8 @@ print('the src space contains %d spaces and %d points' % (len(src), n)) ############################################################################### +# Viewing the source space +# ------------------------ # We could write the mixed source space with:: # # >>> write_source_spaces(fname_mixed_src, src, overwrite=True) @@ -87,7 +89,6 @@ nii_fname = op.join(bem_dir, '%s-mixed-src.nii' % subject) src.export_volume(nii_fname, mri_resolution=True, overwrite=True) - plotting.plot_img(nii_fname, cmap='nipy_spectral') ############################################################################### diff --git a/examples/inverse/plot_morph_volume_stc.py b/examples/inverse/plot_morph_volume_stc.py index a6935de3fc9..4b7581ab007 100644 --- a/examples/inverse/plot_morph_volume_stc.py +++ b/examples/inverse/plot_morph_volume_stc.py @@ -9,7 +9,8 @@ :class:`mne.VolSourceEstimate` to a common reference space. We achieve this using :class:`mne.SourceMorph`. Pre-computed data will be morphed based on an affine transformation and a nonlinear registration method -known as Symmetric Diffeomorphic Registration (SDR) by Avants et al. [1]_. +known as Symmetric Diffeomorphic Registration (SDR) by +:footcite:`AvantsEtAl2008`. Transformation is estimated from the subject's anatomical T1 weighted MRI (brain) to `FreeSurfer's 'fsaverage' T1 weighted MRI (brain) @@ -18,13 +19,6 @@ Afterwards the transformation will be applied to the volumetric source estimate. The result will be plotted, showing the fsaverage T1 weighted anatomical MRI, overlaid with the morphed volumetric source estimate. - -References ----------- -.. [1] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. """ # Author: Tommy Clausner # @@ -153,3 +147,6 @@ # # >>> morph.apply(stc) # +# References +# ---------- +# .. footbibliography:: diff --git a/examples/inverse/plot_vector_mne_solution.py b/examples/inverse/plot_vector_mne_solution.py index 8ed50bf87ae..90d5d851151 100644 --- a/examples/inverse/plot_vector_mne_solution.py +++ b/examples/inverse/plot_vector_mne_solution.py @@ -21,6 +21,7 @@ # # License: BSD (3-clause) +import numpy as np import mne from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, apply_inverse @@ -52,6 +53,21 @@ brain = stc.plot( initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir) +############################################################################### +# Plot the activation in the direction of maximal power for this data: + +stc_max, directions = stc.project('pca', src=inv['src']) +# These directions must by design be close to the normals because this +# inverse was computed with loose=0.2: +print('Absolute cosine similarity between source normals and directions: ' + f'{np.abs(np.sum(directions * inv["source_nn"][2::3], axis=-1)).mean()}') +brain_max = stc_max.plot( + initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, + time_label='Max power') +brain_normal = stc.project('normal', inv['src'])[0].plot( + initial_time=peak_time, hemi='lh', subjects_dir=subjects_dir, + time_label='Normal') + ############################################################################### # You can also do this with a fixed-orientation inverse. It looks a lot like # the result above because the ``loose=0.2`` orientation constraint keeps diff --git a/examples/simulation/plot_simulate_evoked_data.py b/examples/simulation/plot_simulate_evoked_data.py index 76065d21e36..a205c360968 100644 --- a/examples/simulation/plot_simulate_evoked_data.py +++ b/examples/simulation/plot_simulate_evoked_data.py @@ -46,7 +46,7 @@ ############################################################################### # Generate source time courses from 2 dipoles and the correspond evoked data -times = np.arange(300, dtype=np.float) / raw.info['sfreq'] - 0.1 +times = np.arange(300, dtype=np.float64) / raw.info['sfreq'] - 0.1 rng = np.random.RandomState(42) diff --git a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py b/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py index 08a61b45785..dbc8e5954e3 100644 --- a/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py +++ b/examples/simulation/plot_simulated_raw_data_using_subject_anatomy.py @@ -143,7 +143,7 @@ def data_fun(times, latency, duration): # event, the second is not used. The third one is the event id, which is # different for each of the 4 areas. -times = np.arange(150, dtype=np.float) / info['sfreq'] +times = np.arange(150, dtype=np.float64) / info['sfreq'] duration = 0.03 rng = np.random.RandomState(7) source_simulator = mne.simulation.SourceSimulator(src, tstep=tstep) diff --git a/examples/time_frequency/plot_time_frequency_simulated.py b/examples/time_frequency/plot_time_frequency_simulated.py index da22307274e..b93a2f64481 100644 --- a/examples/time_frequency/plot_time_frequency_simulated.py +++ b/examples/time_frequency/plot_time_frequency_simulated.py @@ -43,7 +43,7 @@ noise = rng.randn(n_epochs, len(ch_names), n_times) # Add a 50 Hz sinusoidal burst to the noise and ramp it. -t = np.arange(n_times, dtype=np.float) / sfreq +t = np.arange(n_times, dtype=np.float64) / sfreq signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) diff --git a/ignore_words.txt b/ignore_words.txt index 9153630e536..f7769cf756c 100644 --- a/ignore_words.txt +++ b/ignore_words.txt @@ -32,3 +32,4 @@ dof nwe thead sherif +master diff --git a/mne/beamformer/tests/test_dics.py b/mne/beamformer/tests/test_dics.py index 597b73e2b9a..261e3452220 100644 --- a/mne/beamformer/tests/test_dics.py +++ b/mne/beamformer/tests/test_dics.py @@ -260,7 +260,7 @@ def test_make_dics(tmpdir, _load_forward, idx, mat_tol, vol_tol): assert isinstance(filters, Beamformer) assert isinstance(filters_read, Beamformer) for key in ['tmin', 'tmax']: # deal with strictness of object_diff - setattr(filters['csd'], key, np.float(getattr(filters['csd'], key))) + setattr(filters['csd'], key, np.float64(getattr(filters['csd'], key))) assert object_diff(filters, filters_read) == '' diff --git a/mne/beamformer/tests/test_lcmv.py b/mne/beamformer/tests/test_lcmv.py index 2c3fcac20f5..9a51edd4578 100644 --- a/mne/beamformer/tests/test_lcmv.py +++ b/mne/beamformer/tests/test_lcmv.py @@ -428,6 +428,7 @@ def test_make_lcmv(tmpdir, reg, proj): noise_cov=noise_cov) +@pytest.mark.slowtest @pytest.mark.parametrize('weight_norm', (None, 'unit-noise-gain', 'nai')) @pytest.mark.parametrize('pick_ori', (None, 'max-power', 'vector')) def test_make_lcmv_sphere(pick_ori, weight_norm): diff --git a/mne/bem.py b/mne/bem.py index 6d6219b00ae..5af9e18e15a 100644 --- a/mne/bem.py +++ b/mne/bem.py @@ -1341,7 +1341,7 @@ def _read_bem_surface(fid, this, def_coord_frame, s_id=None): if tag is None: raise ValueError('Vertex data not found') - res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug + res['rr'] = tag.data.astype(np.float64) # XXX : double because of mayavi if res['rr'].shape[0] != res['np']: raise ValueError('Vertex information is incorrect') diff --git a/mne/channels/channels.py b/mne/channels/channels.py index 8d52102c21c..2b8ae0ab81f 100644 --- a/mne/channels/channels.py +++ b/mne/channels/channels.py @@ -373,7 +373,7 @@ def _set_channel_positions(self, pos, names): if len(pos) != len(names): raise ValueError('Number of channel positions not equal to ' 'the number of names given.') - pos = np.asarray(pos, dtype=np.float) + pos = np.asarray(pos, dtype=np.float64) if pos.shape[-1] != 3 or pos.ndim != 2: msg = ('Channel positions must have the shape (n_points, 3) ' 'not %s.' % (pos.shape,)) diff --git a/mne/channels/interpolation.py b/mne/channels/interpolation.py index 4ce20efc315..87b4f36a200 100644 --- a/mne/channels/interpolation.py +++ b/mne/channels/interpolation.py @@ -136,8 +136,8 @@ def _interpolate_bads_eeg(inst, origin, verbose=None): inst : mne.io.Raw, mne.Epochs or mne.Evoked The data to interpolate. Must be preloaded. """ - bads_idx = np.zeros(len(inst.ch_names), dtype=np.bool) - goods_idx = np.zeros(len(inst.ch_names), dtype=np.bool) + bads_idx = np.zeros(len(inst.ch_names), dtype=bool) + goods_idx = np.zeros(len(inst.ch_names), dtype=bool) picks = pick_types(inst.info, meg=False, eeg=True, exclude=[]) inst.info._check_consistency() diff --git a/mne/channels/layout.py b/mne/channels/layout.py index 83bc29266de..2cfa301d31f 100644 --- a/mne/channels/layout.py +++ b/mne/channels/layout.py @@ -126,7 +126,7 @@ def _read_lout(fname): name = chkind + ' ' + nb else: cid, x, y, dx, dy, name = splits - pos.append(np.array([x, y, dx, dy], dtype=np.float)) + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) names.append(name) ids.append(int(cid)) @@ -147,7 +147,7 @@ def _read_lay(fname): name = chkind + ' ' + nb else: cid, x, y, dx, dy, name = splits - pos.append(np.array([x, y, dx, dy], dtype=np.float)) + pos.append(np.array([x, y, dx, dy], dtype=np.float64)) names.append(name) ids.append(int(cid)) @@ -637,7 +637,7 @@ def _auto_topomap_coords(info, picks, ignore_overlap, to_sphere, sphere): positions overlap, an error is thrown. to_sphere : bool If True, the radial distance of spherical coordinates is ignored, in - effect fitting the xyz-coordinates to a sphere. Defaults to True. + effect fitting the xyz-coordinates to a sphere. sphere : array-like | str The head sphere definition. diff --git a/mne/channels/tests/test_interpolation.py b/mne/channels/tests/test_interpolation.py index 83b0c02926b..4c577bc51e4 100644 --- a/mne/channels/tests/test_interpolation.py +++ b/mne/channels/tests/test_interpolation.py @@ -45,6 +45,7 @@ def _load_data(kind): @pytest.mark.parametrize('offset', (0., 0.1)) @pytest.mark.filterwarnings('ignore:.*than 20 mm from head frame origin.*') +@pytest.mark.slowtest def test_interpolation_eeg(offset): """Test interpolation of EEG channels.""" raw, epochs_eeg = _load_data('eeg') @@ -183,13 +184,14 @@ def _this_interpol(inst, ref_meg=False): return inst +@pytest.mark.slowtest def test_interpolate_meg_ctf(): """Test interpolation of MEG channels from CTF system.""" thresh = .85 tol = .05 # assert the new interpol correlates at least .05 "better" bad = 'MLC22-2622' # select a good channel to test the interpolation - raw = io.read_raw_fif(raw_fname_ctf, preload=True) # 3 secs + raw = io.read_raw_fif(raw_fname_ctf).crop(0, 1.0).load_data() # 3 secs raw.apply_gradient_compensation(3) # Show that we have to exclude ref_meg for interpolating CTF MEG-channels diff --git a/mne/channels/tests/test_montage.py b/mne/channels/tests/test_montage.py index 618b665c525..7c957c28814 100644 --- a/mne/channels/tests/test_montage.py +++ b/mne/channels/tests/test_montage.py @@ -336,6 +336,9 @@ def test_montage_readers( assert_allclose(actual_ch_pos[kk], expected_ch_pos[kk], atol=1e-5) for d1, d2 in zip(dig_montage.dig, expected_dig.dig): assert d1['coord_frame'] == d2['coord_frame'] + for key in ('coord_frame', 'ident', 'kind'): + assert isinstance(d1[key], int) + assert isinstance(d2[key], int) @testing.requires_testing_data diff --git a/mne/chpi.py b/mne/chpi.py index 0e6a4f83806..4a1488702d3 100644 --- a/mne/chpi.py +++ b/mne/chpi.py @@ -557,7 +557,7 @@ def _fit_chpi_amplitudes(raw, time_sl, hpi): # loads hpi_stim channel chpi_data = raw[hpi['hpi_pick'], time_sl][0] - ons = (np.round(chpi_data).astype(np.int) & + ons = (np.round(chpi_data).astype(np.int64) & hpi['on'][:, np.newaxis]).astype(bool) n_on = ons.all(axis=-1).sum(axis=0) if not (n_on >= 3).all(): diff --git a/mne/conftest.py b/mne/conftest.py index e1780d3d75e..f8f000034f9 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -88,6 +88,11 @@ def pytest_configure(config): ignore:.*trait.*handler.*deprecated.*:DeprecationWarning ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning + ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning + ignore:.*`np.long` is a deprecated alias:DeprecationWarning ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning ignore:.*sphinx\.util\.smartypants is deprecated.*: ignore:.*pandas\.util\.testing is deprecated.*: @@ -273,13 +278,32 @@ def renderer_interactive(backend_name_interactive): def _check_skip_backend(name): - from mne.viz.backends.tests._utils import has_mayavi, has_pyvista + from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista, + has_pyqt5, has_imageio_ffmpeg) if name == 'mayavi': if not has_mayavi(): pytest.skip("Test skipped, requires mayavi.") elif name == 'pyvista': if not has_pyvista(): pytest.skip("Test skipped, requires pyvista.") + if not has_imageio_ffmpeg(): + pytest.skip("Test skipped, requires imageio-ffmpeg") + if not has_pyqt5(): + pytest.skip("Test skipped, requires PyQt5.") + + +@pytest.fixture() +def renderer_notebook(): + """Verify that pytest_notebook is installed.""" + from mne.viz.backends.renderer import _use_test_3d_backend + try: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=DeprecationWarning) + from pytest_notebook import execution + except ImportError: + pytest.skip("Test skipped, requires pytest-notebook") + with _use_test_3d_backend('notebook'): + yield execution @pytest.fixture(scope='function', params=[testing._pytest_param()]) diff --git a/mne/connectivity/effective.py b/mne/connectivity/effective.py index 9d42ee710e2..3de4864d14a 100644 --- a/mne/connectivity/effective.py +++ b/mne/connectivity/effective.py @@ -133,7 +133,7 @@ def phase_slope_index(data, indices=None, sfreq=2 * np.pi, # allocate space for output out_shape = list(cohy.shape) out_shape[freq_dim] = n_bands - psi = np.zeros(out_shape, dtype=np.float) + psi = np.zeros(out_shape, dtype=np.float64) # allocate accumulator acc_shape = copy.copy(out_shape) diff --git a/mne/connectivity/spectral.py b/mne/connectivity/spectral.py index 94c796de06f..2aa99f423ee 100644 --- a/mne/connectivity/spectral.py +++ b/mne/connectivity/spectral.py @@ -980,7 +980,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, raise ValueError('define frequencies of interest using ' 'cwt_freqs') else: - cwt_freqs = cwt_freqs.astype(np.float) + cwt_freqs = cwt_freqs.astype(np.float64) if any(cwt_freqs > (sfreq / 2.)): raise ValueError('entries in cwt_freqs cannot be ' 'larger than Nyquist (sfreq / 2)') @@ -1003,7 +1003,7 @@ def _prepare_connectivity(epoch_block, tmin, tmax, fmin, fmax, sfreq, indices, 5. / np.min(fmin), five_cycle_freq)) # create a frequency mask for all bands - freq_mask = np.zeros(len(freqs_all), dtype=np.bool) + freq_mask = np.zeros(len(freqs_all), dtype=bool) for f_lower, f_upper in zip(fmin, fmax): freq_mask |= ((freqs_all >= f_lower) & (freqs_all <= f_upper)) diff --git a/mne/cov.py b/mne/cov.py index 69f7530f225..8d9776d27b0 100644 --- a/mne/cov.py +++ b/mne/cov.py @@ -846,8 +846,8 @@ def _unpack_epochs(epochs): # prepare mean covs n_epoch_types = len(epochs) data_mean = [0] * n_epoch_types - n_samples = np.zeros(n_epoch_types, dtype=np.int) - n_epochs = np.zeros(n_epoch_types, dtype=np.int) + n_samples = np.zeros(n_epoch_types, dtype=np.int64) + n_epochs = np.zeros(n_epoch_types, dtype=np.int64) for ii, epochs_t in enumerate(epochs): @@ -1757,7 +1757,7 @@ def compute_whitener(noise_cov, info=None, picks=None, rank=None, nzero = (eig > 0) eig[~nzero] = 0. # get rid of numerical noise (negative) ones - W = np.zeros((n_chan, 1), dtype=np.float) + W = np.zeros((n_chan, 1), dtype=np.float64) W[nzero, 0] = 1.0 / np.sqrt(eig[nzero]) # Rows of eigvec are the eigenvectors W = W * noise_cov['eigvec'] # C ** -0.5 @@ -1963,7 +1963,7 @@ def _write_cov(fid, cov): else: # Store only lower part of covariance matrix dim = cov['dim'] - mask = np.tril(np.ones((dim, dim), dtype=np.bool)) > 0 + mask = np.tril(np.ones((dim, dim), dtype=bool)) > 0 vals = cov['data'][mask].ravel() write_double(fid, FIFF.FIFF_MNE_COV, vals) diff --git a/mne/datasets/utils.py b/mne/datasets/utils.py index cfe5f58dd28..ebdb25300cb 100644 --- a/mne/datasets/utils.py +++ b/mne/datasets/utils.py @@ -239,7 +239,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, path = _get_path(path, key, name) # To update the testing or misc dataset, push commits, then make a new # release on GitHub. Then update the "releases" variable: - releases = dict(testing='0.92', misc='0.6') + releases = dict(testing='0.93', misc='0.6') # And also update the "md5_hashes['testing']" variable below. # To update any other dataset, update the data archive itself (upload @@ -326,7 +326,7 @@ def _data_path(path=None, force_update=False, update_path=True, download=True, sample='12b75d1cb7df9dfb4ad73ed82f61094f', somato='ea825966c0a1e9b2f84e3826c5500161', spm='9f43f67150e3b694b523a21eb929ea75', - testing='42daafd1b882da2ef041de860ca6e771', + testing='2eb998a0893a28faedd583973c70b517', multimodal='26ec847ae9ab80f58f204d09e2c08367', fnirs_motor='c4935d19ddab35422a69f3326a01fef8', opm='370ad1dcfd5c47e029e692c85358a374', diff --git a/mne/decoding/base.py b/mne/decoding/base.py index 934f45d5af5..a7a9c64b9c6 100644 --- a/mne/decoding/base.py +++ b/mne/decoding/base.py @@ -224,7 +224,7 @@ def _set_cv(cv, estimator=None, X=None, y=None): # Setup CV from sklearn import model_selection as models from sklearn.model_selection import (check_cv, StratifiedKFold, KFold) - if isinstance(cv, (int, np.int)): + if isinstance(cv, (int, np.int64)): XFold = StratifiedKFold if est_is_classifier else KFold cv = XFold(n_splits=cv) elif isinstance(cv, str): diff --git a/mne/decoding/tests/test_receptive_field.py b/mne/decoding/tests/test_receptive_field.py index 859a5e6f907..418eeed69ec 100644 --- a/mne/decoding/tests/test_receptive_field.py +++ b/mne/decoding/tests/test_receptive_field.py @@ -108,7 +108,7 @@ def test_time_delay(): _delay_time_series(X, tmin, tmax, sfreq=[1]) # Delays must be int/float with pytest.raises(TypeError, match='.*complex.*'): - _delay_time_series(X, np.complex(tmin), tmax, 1) + _delay_time_series(X, np.complex128(tmin), tmax, 1) # Make sure swapaxes works start, stop = int(round(tmin * isfreq)), int(round(tmax * isfreq)) + 1 n_delays = stop - start diff --git a/mne/dipole.py b/mne/dipole.py index 3bf69519b29..6361ed79472 100644 --- a/mne/dipole.py +++ b/mne/dipole.py @@ -1306,7 +1306,7 @@ def fit_dipole(evoked, cov, bem, trans=None, min_dist=5., n_jobs=1, # cov = prepare_noise_cov(cov, info_nb, info_nb['ch_names'], verbose=False) # nzero = (cov['eig'] > 0) # n_chan = len(info_nb['ch_names']) - # whitener = np.zeros((n_chan, n_chan), dtype=np.float) + # whitener = np.zeros((n_chan, n_chan), dtype=np.float64) # whitener[nzero, nzero] = 1.0 / np.sqrt(cov['eig'][nzero]) # whitener = np.dot(whitener, cov['eigvec']) diff --git a/mne/epochs.py b/mne/epochs.py index 96a9fc5b2b3..535a1a43ba1 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -904,7 +904,7 @@ def subtract_evoked(self, evoked=None): else: if self._offset is None: self._offset = np.zeros((len(self.ch_names), len(self.times)), - dtype=np.float) + dtype=np.float64) self._offset[ep_picks] -= evoked.data[picks] logger.info('[done]') @@ -2900,35 +2900,37 @@ def add_channels_epochs(epochs_list, verbose=None): return epochs -def _compare_epochs_infos(info1, info2, ind): +def _compare_epochs_infos(info1, info2, name): """Compare infos.""" + if not isinstance(name, str): # passed epochs index + name = f'epochs[{name:d}]' info1._check_consistency() info2._check_consistency() if info1['nchan'] != info2['nchan']: - raise ValueError('epochs[%d][\'info\'][\'nchan\'] must match' % ind) + raise ValueError(f'{name}.info[\'nchan\'] must match') if info1['bads'] != info2['bads']: - raise ValueError('epochs[%d][\'info\'][\'bads\'] must match' % ind) + raise ValueError(f'{name}.info[\'bads\'] must match') if info1['sfreq'] != info2['sfreq']: - raise ValueError('epochs[%d][\'info\'][\'sfreq\'] must match' % ind) + raise ValueError(f'{name}.info[\'sfreq\'] must match') if set(info1['ch_names']) != set(info2['ch_names']): - raise ValueError('epochs[%d][\'info\'][\'ch_names\'] must match' % ind) + raise ValueError(f'{name}.info[\'ch_names\'] must match') if len(info2['projs']) != len(info1['projs']): - raise ValueError('SSP projectors in epochs files must be the same') + raise ValueError(f'SSP projectors in {name} must be the same') if any(not _proj_equal(p1, p2) for p1, p2 in zip(info2['projs'], info1['projs'])): - raise ValueError('SSP projectors in epochs files must be the same') + raise ValueError(f'SSP projectors in {name} must be the same') if (info1['dev_head_t'] is None) != (info2['dev_head_t'] is None) or \ (info1['dev_head_t'] is not None and not np.allclose(info1['dev_head_t']['trans'], info2['dev_head_t']['trans'], rtol=1e-6)): - raise ValueError('epochs[%d][\'info\'][\'dev_head_t\'] must match. ' - 'The epochs probably come from different runs, and ' + raise ValueError(f'{name}.info[\'dev_head_t\'] must match. The ' + 'instances probably come from different runs, and ' 'are therefore associated with different head ' 'positions. Manually change info[\'dev_head_t\'] to ' 'avoid this message but beware that this means the ' 'MEG sensors will not be properly spatially aligned. ' 'See mne.preprocessing.maxwell_filter to realign the ' - 'runs to a common head position.' % ind) + 'runs to a common head position.') def _concatenate_epochs(epochs_list, with_data=True, add_offset=True): diff --git a/mne/event.py b/mne/event.py index 35c9007e61b..8a76d92e704 100644 --- a/mne/event.py +++ b/mne/event.py @@ -49,7 +49,7 @@ def pick_events(events, include=None, exclude=None, step=False): if include is not None: if not isinstance(include, list): include = [include] - mask = np.zeros(len(events), dtype=np.bool) + mask = np.zeros(len(events), dtype=bool) for e in include: mask = np.logical_or(mask, events[:, 2] == e) if step: @@ -58,7 +58,7 @@ def pick_events(events, include=None, exclude=None, step=False): elif exclude is not None: if not isinstance(exclude, list): exclude = [exclude] - mask = np.ones(len(events), dtype=np.bool) + mask = np.ones(len(events), dtype=bool) for e in exclude: mask = np.logical_and(mask, events[:, 2] != e) if step: @@ -432,7 +432,7 @@ def find_stim_steps(raw, pad_start=None, pad_stop=None, merge=0, if np.any(data < 0): warn('Trigger channel contains negative values, using absolute value.') data = np.abs(data) # make sure trig channel is positive - data = data.astype(np.int) + data = data.astype(np.int64) return _find_stim_steps(data, raw.first_samp, pad_start=pad_start, pad_stop=pad_stop, merge=merge) @@ -452,9 +452,9 @@ def _find_events(data, first_samp, verbose=None, output='onset', else: merge = 0 - data = data.astype(np.int) + data = data.astype(np.int64) if uint_cast: - data = data.astype(np.uint16).astype(np.int) + data = data.astype(np.uint16).astype(np.int64) if data.min() < 0: warn('Trigger channel contains negative values, using absolute ' 'value. If data were acquired on a Neuromag system with ' diff --git a/mne/evoked.py b/mne/evoked.py index 24ac8ea82e6..e5e0618d536 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -746,7 +746,7 @@ def __init__(self, data, info, tmin=0., comment='', nave=1, kind='average', self.first = int(round(tmin * info['sfreq'])) self.last = self.first + np.shape(data)[-1] - 1 self.times = np.arange(self.first, self.last + 1, - dtype=np.float) / info['sfreq'] + dtype=np.float64) / info['sfreq'] self.info = info.copy() # do not modify original info self.nave = nave self.kind = kind @@ -1111,7 +1111,7 @@ def _read_evoked(fname, condition=None, kind='average', allow_maxshield=False): else: # Put the old style epochs together data = np.concatenate([e.data[None, :] for e in epoch], axis=0) - data = data.astype(np.float) + data = data.astype(np.float64) if first_time is not None and nsamp is not None: times = first_time + np.arange(nsamp) / info['sfreq'] @@ -1163,6 +1163,7 @@ def write_evokeds(fname, evoked): def _write_evokeds(fname, evoked, check=True): """Write evoked data.""" + from .epochs import _compare_epochs_infos if check: check_fname(fname, 'evoked', ('-ave.fif', '-ave.fif.gz', '_ave.fif', '_ave.fif.gz')) @@ -1184,7 +1185,9 @@ def _write_evokeds(fname, evoked, check=True): # One or more evoked data sets start_block(fid, FIFF.FIFFB_PROCESSED_DATA) - for e in evoked: + for ei, e in enumerate(evoked): + if ei: + _compare_epochs_infos(evoked[0].info, e.info, f'evoked[{ei}]') start_block(fid, FIFF.FIFFB_EVOKED) # Comment is optional @@ -1277,7 +1280,7 @@ def _get_peak(data, times, tmin=None, tmax=None, mode='abs'): raise ValueError('The tmin must be smaller or equal to tmax') time_win = (times >= tmin) & (times <= tmax) - mask = np.ones_like(data).astype(np.bool) + mask = np.ones_like(data).astype(bool) mask[:, time_win] = False maxfun = np.argmax diff --git a/mne/filter.py b/mne/filter.py index 34eae5468d4..d1ef0cdef50 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -179,7 +179,7 @@ def _overlap_add_filter(x, h, n_fft=None, phase='zero', picks=None, # cost function based on number of multiplications N = 2 ** np.arange(np.ceil(np.log2(min_fft)), np.ceil(np.log2(max_fft)) + 1, dtype=int) - cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float)) * + cost = (np.ceil(n_x / (N - len(h) + 1).astype(np.float64)) * N * (np.log2(N) + 1)) # add a heuristic term to prevent too-long FFT's which are slow @@ -2005,7 +2005,7 @@ def resample(self, sfreq, npad='auto', window='boxcar', n_jobs=1, lowpass = self.info.get('lowpass') lowpass = np.inf if lowpass is None else lowpass self.info['lowpass'] = min(lowpass, sfreq / 2.) - new_times = (np.arange(self._data.shape[-1], dtype=np.float) / + new_times = (np.arange(self._data.shape[-1], dtype=np.float64) / sfreq + self.times[0]) # adjust indirectly affected variables if isinstance(self, BaseEpochs): diff --git a/mne/fixes.py b/mne/fixes.py index e2fa1af2004..3b0b5bfc2b2 100644 --- a/mne/fixes.py +++ b/mne/fixes.py @@ -93,14 +93,14 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): nvert = _fread3(fobj) nquad = _fread3(fobj) (fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.) - coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div + coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float64) / div coords = coords.reshape(-1, 3) quads = _fread3_many(fobj, nquad * 4) quads = quads.reshape(nquad, 4) # # Face splitting follows # - faces = np.zeros((2 * nquad, 3), dtype=np.int) + faces = np.zeros((2 * nquad, 3), dtype=np.int64) nface = 0 for quad in quads: if (quad[0] % 2) == 0: @@ -127,7 +127,7 @@ def _read_geometry(filepath, read_metadata=False, read_stamp=False): else: raise ValueError("File does not appear to be a Freesurfer surface") - coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits + coords = coords.astype(np.float64) # XXX: due to mayavi bug on mac 32bits ret = (coords, faces) if read_metadata: @@ -1078,3 +1078,16 @@ def _np_apply_along_axis(func1d, axis, arr): @jit() def mean(array, axis): return _np_apply_along_axis(np.mean, axis, array) + + +############################################################################### +# Added in Python 3.7 (remove when we drop support for 3.6) + +try: + from contextlib import nullcontext +except ImportError: + from contextlib import contextmanager + + @contextmanager + def nullcontext(enter_result=None): + yield enter_result diff --git a/mne/forward/_make_forward.py b/mne/forward/_make_forward.py index 7b2ca9b6bc5..10734a019a3 100644 --- a/mne/forward/_make_forward.py +++ b/mne/forward/_make_forward.py @@ -690,7 +690,7 @@ def make_forward_dipole(dipole, bem, info, trans=None, n_jobs=1, verbose=None): # Check for omissions due to proximity to inner skull in # make_forward_solution, which will result in an exception if fwd['src'][0]['nuse'] != len(pos): - inuse = fwd['src'][0]['inuse'].astype(np.bool) + inuse = fwd['src'][0]['inuse'].astype(bool) head = ('The following dipoles are outside the inner skull boundary') msg = len(head) * '#' + '\n' + head + '\n' for (t, pos) in zip(times[np.logical_not(inuse)], diff --git a/mne/forward/forward.py b/mne/forward/forward.py index 8bf1c04a946..ac80fd7f774 100644 --- a/mne/forward/forward.py +++ b/mne/forward/forward.py @@ -175,12 +175,12 @@ def _block_diag(A, n): if na % n > 0: raise ValueError('Width of matrix must be a multiple of n') - tmp = np.arange(ma * bdn, dtype=np.int).reshape(bdn, ma) + tmp = np.arange(ma * bdn, dtype=np.int64).reshape(bdn, ma) tmp = np.tile(tmp, (1, n)) ii = tmp.ravel() - jj = np.arange(na, dtype=np.int)[None, :] - jj = jj * np.ones(ma, dtype=np.int)[:, None] + jj = np.arange(na, dtype=np.int64)[None, :] + jj = jj * np.ones(ma, dtype=np.int64)[:, None] jj = jj.T.ravel() # column indices foreach sparse bd bd = sparse.coo_matrix((A.T.ravel(), np.c_[ii, jj].T)).tocsc() @@ -994,7 +994,7 @@ def compute_orient_prior(forward, loose=0.2, verbose=None): if not (0 <= loose <= 1): raise ValueError('loose value should be between 0 and 1, ' 'got %s.' % (loose,)) - orient_prior = np.ones(n_sources, dtype=np.float) + orient_prior = np.ones(n_sources, dtype=np.float64) if loose > 0.: if is_fixed_ori: raise ValueError('loose must be 0. with forward operator ' diff --git a/mne/forward/tests/test_make_forward.py b/mne/forward/tests/test_make_forward.py index f145e22b708..acab22df01d 100644 --- a/mne/forward/tests/test_make_forward.py +++ b/mne/forward/tests/test_make_forward.py @@ -336,7 +336,7 @@ def test_forward_mixed_source_space(tmpdir): # head coordinates and mri_resolution, but wrong trans file vox_mri_t = vol1[0]['vox_mri_t'] - with pytest.raises(ValueError, match='mri<->head, got mri_voxel->mri'): + with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'): src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=vox_mri_t) diff --git a/mne/inverse_sparse/_gamma_map.py b/mne/inverse_sparse/_gamma_map.py index 2457166b8c6..12457a485e0 100644 --- a/mne/inverse_sparse/_gamma_map.py +++ b/mne/inverse_sparse/_gamma_map.py @@ -51,7 +51,7 @@ def _gamma_map_opt(M, G, alpha, maxit=10000, tol=1e-6, update_mode=1, M = M.copy() if gammas is None: - gammas = np.ones(G.shape[1], dtype=np.float) + gammas = np.ones(G.shape[1], dtype=np.float64) eps = np.finfo(float).eps @@ -132,7 +132,7 @@ def denom_fun(x): gammas = np.repeat(gammas_comb / group_size, group_size) # compute convergence criterion - gammas_full = np.zeros(n_sources, dtype=np.float) + gammas_full = np.zeros(n_sources, dtype=np.float64) gammas_full[active_set] = gammas err = (np.sum(np.abs(gammas_full - gammas_full_old)) / diff --git a/mne/inverse_sparse/mxne_inverse.py b/mne/inverse_sparse/mxne_inverse.py index f6e01d876c1..b3cebaa8073 100644 --- a/mne/inverse_sparse/mxne_inverse.py +++ b/mne/inverse_sparse/mxne_inverse.py @@ -403,7 +403,7 @@ def mixed_norm(evoked, forward, noise_cov, alpha, loose='auto', depth=0.8, M_estimated = np.dot(gain[:, active_set], X) if mask is not None: - active_set_tmp = np.zeros(len(mask), dtype=np.bool) + active_set_tmp = np.zeros(len(mask), dtype=bool) active_set_tmp[mask] = active_set active_set = active_set_tmp del active_set_tmp @@ -662,7 +662,7 @@ def tf_mixed_norm(evoked, forward, noise_cov, M_estimated = np.dot(gain[:, active_set], X) if mask is not None: - active_set_tmp = np.zeros(len(mask), dtype=np.bool) + active_set_tmp = np.zeros(len(mask), dtype=bool) active_set_tmp[mask] = active_set active_set = active_set_tmp del active_set_tmp diff --git a/mne/inverse_sparse/mxne_optim.py b/mne/inverse_sparse/mxne_optim.py index b06e598db49..64fffb1bdb5 100644 --- a/mne/inverse_sparse/mxne_optim.py +++ b/mne/inverse_sparse/mxne_optim.py @@ -67,7 +67,7 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False): Examples -------- - >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float), (2, 1)) + >>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float64), (2, 1)) >>> Y = np.r_[Y, np.zeros_like(Y)] >>> print(Y) # doctest:+SKIP [[ 0. 4. 3. 0. 0.] @@ -82,7 +82,7 @@ def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False): [ True True False False] """ if len(Y) == 0: - return np.zeros_like(Y), np.zeros((0,), dtype=np.bool) + return np.zeros_like(Y), np.zeros((0,), dtype=bool) if shape is not None: shape_init = Y.shape Y = Y.reshape(*shape) @@ -141,7 +141,7 @@ def prox_l1(Y, alpha, n_orient): Examples -------- - >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float), (2, 1)) + >>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float64), (2, 1)) >>> Y = np.r_[Y, np.zeros_like(Y)] >>> print(Y) # doctest:+SKIP [[ 1. 2. 3. 2. 0.] @@ -252,7 +252,7 @@ def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200, Y = np.zeros((n_sources, n_times)) # FISTA aux variable E = [] # track primal objective function highest_d_obj = - np.inf - active_set = np.ones(n_sources, dtype=np.bool) # start with full AS + active_set = np.ones(n_sources, dtype=bool) # start with full AS for i in range(maxit): X0, active_set_0 = X, active_set # store previous values @@ -332,7 +332,7 @@ def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200, E = [] # track primal objective function highest_d_obj = - np.inf - active_set = np.zeros(n_sources, dtype=np.bool) # start with full AS + active_set = np.zeros(n_sources, dtype=bool) # start with full AS alpha_lc = alpha / lipschitz_constant @@ -544,7 +544,7 @@ def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None, E = list() highest_d_obj = - np.inf X_init = None - active_set = np.zeros(n_dipoles, dtype=np.bool) + active_set = np.zeros(n_dipoles, dtype=bool) idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient)) new_active_idx = idx_large_corr[-active_set_size:] if n_orient > 1: @@ -673,7 +673,7 @@ def gprime(w): E = list() - active_set = np.ones(G.shape[1], dtype=np.bool) + active_set = np.ones(G.shape[1], dtype=bool) weights = np.ones(G.shape[1]) X = np.zeros((G.shape[1], M.shape[1])) @@ -740,7 +740,7 @@ def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None): """ n_times = M.shape[1] n_points = G.shape[1] - iv = np.ones((n_points, n_times), dtype=np.float) + iv = np.ones((n_points, n_times), dtype=np.float64) v = phi(iv) L = 1e100 for it in range(100): @@ -1251,7 +1251,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, n_positions = n_sources // n_orient Z = dict.fromkeys(np.arange(n_positions), 0.0) - active_set = np.zeros(n_sources, dtype=np.bool) + active_set = np.zeros(n_sources, dtype=bool) active = [] if Z_init is not None: if Z_init.shape != (n_sources, phi.n_coefs.sum()): @@ -1295,7 +1295,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_( M, G[:, active_set], Z_init, - np.ones(len(active) * n_orient, dtype=np.bool), + np.ones(len(active) * n_orient, dtype=bool), candidates_, alpha_space, alpha_time, lipschitz_constant[active_set[::n_orient]], phi, phiT, w_space=w_space_as, w_time=w_time_as, @@ -1322,7 +1322,7 @@ def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time, Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])]) X = phiT(Z) else: - Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex) + Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128) X = np.zeros((0, n_times)) return X, Z, active_set, E, gap @@ -1547,8 +1547,8 @@ def g_time_prime_inv(Z): E = list() - active_set = np.ones(n_sources, dtype=np.bool) - Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex) + active_set = np.ones(n_sources, dtype=bool) + Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128) for k in range(n_tfmxne_iter): active_set_0 = active_set.copy() diff --git a/mne/inverse_sparse/tests/test_mxne_debiasing.py b/mne/inverse_sparse/tests/test_mxne_debiasing.py index fe0d684fd93..e1bf466aaef 100755 --- a/mne/inverse_sparse/tests/test_mxne_debiasing.py +++ b/mne/inverse_sparse/tests/test_mxne_debiasing.py @@ -14,7 +14,7 @@ def test_compute_debiasing(): rng = np.random.RandomState(42) G = rng.randn(10, 4) X = rng.randn(4, 20) - debias_true = np.arange(1, 5, dtype=np.float) + debias_true = np.arange(1, 5, dtype=np.float64) M = np.dot(G, X * debias_true[:, np.newaxis]) debias = compute_bias(M, G, X, max_iter=10000, n_orient=1, tol=1e-7) assert_almost_equal(debias, debias_true, decimal=5) diff --git a/mne/inverse_sparse/tests/test_mxne_optim.py b/mne/inverse_sparse/tests/test_mxne_optim.py index a16d0bfe237..08b858a9a49 100644 --- a/mne/inverse_sparse/tests/test_mxne_optim.py +++ b/mne/inverse_sparse/tests/test_mxne_optim.py @@ -309,6 +309,7 @@ def test_iterative_reweighted_mxne(): assert_array_equal(X_hat_bcd, X_hat_cd, 5) +@pytest.mark.slowtest def test_iterative_reweighted_tfmxne(): """Test convergence of irTF-MxNE solver.""" M, G, true_active_set = _generate_tf_data() diff --git a/mne/io/__init__.py b/mne/io/__init__.py index 156d23b9d31..99c0281abe0 100644 --- a/mne/io/__init__.py +++ b/mne/io/__init__.py @@ -8,7 +8,7 @@ from .open import fiff_open, show_fiff, _fiff_get_fid from .meas_info import (read_fiducials, write_fiducials, read_info, write_info, _empty_info, _merge_info, _force_update_info, Info, - anonymize_info) + anonymize_info, _writing_info_hdf5) from .proj import make_eeg_average_ref_proj, Projection from .tag import _loc_to_coil_trans, _coil_trans_to_loc, _loc_to_eeg_loc diff --git a/mne/io/_digitization.py b/mne/io/_digitization.py index e447a54ff63..9e2680fb127 100644 --- a/mne/io/_digitization.py +++ b/mne/io/_digitization.py @@ -67,6 +67,9 @@ def _count_points_by_type(dig): ) +_dig_keys = {'kind', 'ident', 'r', 'coord_frame'} + + class DigPoint(dict): """Container for a digitization point. @@ -90,12 +93,11 @@ class DigPoint(dict): def __repr__(self): # noqa: D105 if self['kind'] == FIFF.FIFFV_POINT_CARDINAL: - id_ = _cardinal_kind_rev.get( - self.get('ident', -1), 'Unknown cardinal') + id_ = _cardinal_kind_rev.get(self['ident'], 'Unknown cardinal') else: id_ = _dig_kind_proper[ - _dig_kind_rev.get(self.get('kind', -1), 'unknown')] - id_ = ('%s #%s' % (id_, self.get('ident', -1))) + _dig_kind_rev.get(self['kind'], 'unknown')] + id_ = ('%s #%s' % (id_, self['ident'])) id_ = id_.rjust(10) cf = _coord_frame_name(self['coord_frame']) pos = ('(%0.1f, %0.1f, %0.1f) mm' % tuple(1000 * self['r'])).ljust(25) @@ -115,9 +117,9 @@ def __eq__(self, other): # noqa: D105 coordinate frame and position. """ my_keys = ['kind', 'ident', 'coord_frame'] - if sorted(self.keys()) != sorted(other.keys()): + if set(self.keys()) != set(other.keys()): return False - elif any([self[_] != other[_] for _ in my_keys]): + elif any(self[_] != other[_] for _ in my_keys): return False else: return np.allclose(self['r'], other['r']) @@ -434,7 +436,7 @@ def _make_dig_points(nasion=None, lpa=None, rpa=None, hpi=None, except ValueError: # and if any conversion fails, simply use arange idents = np.arange(1, len(dig_ch_pos) + 1) for key, ident in zip(dig_ch_pos, idents): - dig.append({'r': dig_ch_pos[key], 'ident': ident, + dig.append({'r': dig_ch_pos[key], 'ident': int(ident), 'kind': FIFF.FIFFV_POINT_EEG, 'coord_frame': coord_frame}) diff --git a/mne/io/artemis123/utils.py b/mne/io/artemis123/utils.py index 5171c8f82e2..de7e98c9113 100644 --- a/mne/io/artemis123/utils.py +++ b/mne/io/artemis123/utils.py @@ -20,7 +20,7 @@ def _load_mne_locs(fname=None): with open(fname, 'r') as fid: for line in fid: vals = line.strip().split(',') - locs[vals[0]] = np.array(vals[1::], np.float) + locs[vals[0]] = np.array(vals[1::], np.float64) return locs @@ -56,9 +56,9 @@ def _load_tristan_coil_locs(coil_loc_path): channel_info[vals[0]] = dict() if vals[6]: channel_info[vals[0]]['inner_coil'] = \ - np.array(vals[2:5], np.float) + np.array(vals[2:5], np.float64) channel_info[vals[0]]['outer_coil'] = \ - np.array(vals[5:8], np.float) + np.array(vals[5:8], np.float64) else: # nothing supplied channel_info[vals[0]]['inner_coil'] = np.zeros(3) channel_info[vals[0]]['outer_coil'] = np.zeros(3) diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py index 1a2ceff4eee..3d79b4a2060 100644 --- a/mne/io/brainvision/brainvision.py +++ b/mne/io/brainvision/brainvision.py @@ -647,7 +647,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # highpass relaxed / no filters highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') else np.Inf for filt in highpass] - info['highpass'] = np.max(np.array(highpass, dtype=np.float)) + info['highpass'] = np.max(np.array(highpass, dtype=np.float64)) # Coveniently enough 1 / np.Inf = 0.0, so this works for # DC / no highpass filter # filter time constant t [secs] to Hz conversion: 1/2*pi*t @@ -662,7 +662,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): else: highpass = [float(filt) if filt not in ('NaN', 'Off', 'DC') else 0.0 for filt in highpass] - info['highpass'] = np.min(np.array(highpass, dtype=np.float)) + info['highpass'] = np.min(np.array(highpass, dtype=np.float64)) if info['highpass'] == 0.0 and len(set(highpass)) == 1: # not actually heterogeneous in effect # ... just heterogeneously disabled @@ -691,7 +691,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # infinitely relaxed / no filters lowpass = [float(filt) if filt not in ('NaN', 'Off') else 0.0 for filt in lowpass] - info['lowpass'] = np.min(np.array(lowpass, dtype=np.float)) + info['lowpass'] = np.min(np.array(lowpass, dtype=np.float64)) try: # filter time constant t [secs] to Hz conversion: 1/2*pi*t info['lowpass'] = 1. / (2 * np.pi * info['lowpass']) @@ -713,7 +713,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): # infinitely relaxed / no filters lowpass = [float(filt) if filt not in ('NaN', 'Off') else np.Inf for filt in lowpass] - info['lowpass'] = np.max(np.array(lowpass, dtype=np.float)) + info['lowpass'] = np.max(np.array(lowpass, dtype=np.float64)) if np.isinf(info['lowpass']): # No lowpass actually set for the weakest setting @@ -764,7 +764,7 @@ def _get_vhdr_info(vhdr_fname, eog, misc, scale): ch_name=ch_name, coil_type=coil_type, kind=kind, logno=idx + 1, scanno=idx + 1, cal=cals[idx], range=ranges[idx], loc=np.full(12, np.nan), - unit=unit, unit_mul=0., # always zero- mne manual pg. 273 + unit=unit, unit_mul=FIFF.FIFF_UNITM_NONE, coord_frame=FIFF.FIFFV_COORD_HEAD)) info._update_redundant() diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 993cbc66787..33812d2b965 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1136,7 +1136,7 @@ def _get_bti_info(pdf_fname, config_fname, head_shape_fname, rotation_x, chan_info['ch_name'] = chan_neuromag if rename_channels else chan_4d chan_info['logno'] = idx + BTI.FIFF_LOGNO chan_info['scanno'] = idx + 1 - chan_info['cal'] = bti_info['chs'][idx]['scale'] + chan_info['cal'] = float(bti_info['chs'][idx]['scale']) if any(chan_4d.startswith(k) for k in ('A', 'M', 'G')): loc = bti_info['chs'][idx]['loc'] diff --git a/mne/io/bti/read.py b/mne/io/bti/read.py index 710ece56917..210ff827992 100644 --- a/mne/io/bti/read.py +++ b/mne/io/bti/read.py @@ -34,7 +34,7 @@ def read_char(fid, count=1): def read_bool(fid): """Read bool value from bti file.""" - return _unpack_simple(fid, '>?', np.bool) + return _unpack_simple(fid, '>?', bool) def read_uint8(fid): diff --git a/mne/io/ctf/info.py b/mne/io/ctf/info.py index e1f599aad96..9700267eb0e 100644 --- a/mne/io/ctf/info.py +++ b/mne/io/ctf/info.py @@ -239,7 +239,8 @@ def _convert_channel_info(res4, t, use_eeg_pos): nmeg += 1 ch['logno'] = nmeg # Encode the software gradiometer order - ch['coil_type'] = ch['coil_type'] | (cch['grad_order_no'] << 16) + ch['coil_type'] = int( + ch['coil_type'] | (cch['grad_order_no'] << 16)) ch['coord_frame'] = FIFF.FIFFV_COORD_DEVICE elif cch['sensor_type_index'] == CTF.CTFV_EEG_CH: coord_frame = FIFF.FIFFV_COORD_HEAD @@ -296,7 +297,7 @@ def _conv_comp(comp, first, last, chs): col_names = comp[first]['sensors'][:n_col] row_names = [comp[p]['sensor_name'] for p in range(first, last + 1)] mask = np.in1d(col_names, ch_names) # missing channels excluded - col_names = np.array(col_names)[mask] + col_names = np.array(col_names)[mask].tolist() n_col = len(col_names) n_row = len(row_names) ccomp = dict(ctfkind=np.array([comp[first]['coeff_type']]), @@ -478,8 +479,8 @@ def _annotate_bad_segments(directory, start_time, meas_date): for f in fid.readlines(): tmp = f.strip().split() desc.append('bad_%s' % tmp[0]) - onsets.append(np.float(tmp[1]) - start_time) - durations.append(np.float(tmp[2]) - np.float(tmp[1])) + onsets.append(np.float64(tmp[1]) - start_time) + durations.append(np.float64(tmp[2]) - np.float64(tmp[1])) # return None if there are no bad segments if len(onsets) == 0: return None diff --git a/mne/io/ctf_comp.py b/mne/io/ctf_comp.py index 5c420c81b7f..3be634e31f6 100644 --- a/mne/io/ctf_comp.py +++ b/mne/io/ctf_comp.py @@ -109,8 +109,8 @@ def read_ctf_comp(fid, node, chs, verbose=None): # Calibrate... _calibrate_comp(one, chs, mat['row_names'], mat['col_names']) else: - one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float) - one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float) + one['rowcals'] = np.ones(mat['data'].shape[0], dtype=np.float64) + one['colcals'] = np.ones(mat['data'].shape[1], dtype=np.float64) compdata.append(one) diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index c302904bde6..e3b2bbe621c 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -412,7 +412,7 @@ def _get_info(fname, stim_channel, eog, misc, exclude, preload): chan_info['logno'] = idx + 1 chan_info['scanno'] = idx + 1 chan_info['range'] = physical_range - chan_info['unit_mul'] = 0. + chan_info['unit_mul'] = FIFF.FIFF_UNITM_NONE chan_info['ch_name'] = ch_name chan_info['unit'] = FIFF.FIFF_UNIT_V chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD diff --git a/mne/io/eeglab/tests/test_eeglab.py b/mne/io/eeglab/tests/test_eeglab.py index 359e2543b54..3f5bb3e0a3f 100644 --- a/mne/io/eeglab/tests/test_eeglab.py +++ b/mne/io/eeglab/tests/test_eeglab.py @@ -56,6 +56,7 @@ def _check_h5(fname): @requires_h5py @testing.requires_testing_data +@pytest.mark.slowtest @pytest.mark.parametrize( 'fname', [raw_fname_mat, raw_fname_h5], ids=op.basename ) diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index 0cc4bd16ecd..3bc67d2449b 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -63,7 +63,6 @@ def my_fread(*x, **y): for event in range(info['n_events']): event_codes = ''.join(np.fromfile(fid, 'S1', 4).astype('U1')) info['event_codes'].append(event_codes) - info['event_codes'] = np.array(info['event_codes']) else: raise NotImplementedError('Only continuous files are supported') info['unsegmented'] = unsegmented @@ -246,7 +245,7 @@ def __init__(self, input_fname, eog=None, misc=None, sti_ch_idx = [i for i, name in enumerate(ch_names) if name.startswith('STI') or name in event_codes] for idx in sti_ch_idx: - chs[idx].update({'unit_mul': 0, 'cal': 1, + chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, 'cal': 1., 'kind': FIFF.FIFFV_STIM_CH, 'coil_type': FIFF.FIFFV_COIL_NONE, 'unit': FIFF.FIFF_UNIT_NONE}) diff --git a/mne/io/egi/egimff.py b/mne/io/egi/egimff.py index 7ed93338569..86900fc8fe9 100644 --- a/mne/io/egi/egimff.py +++ b/mne/io/egi/egimff.py @@ -436,7 +436,8 @@ def __init__(self, input_fname, eog=None, misc=None, sti_ch_idx = [i for i, name in enumerate(ch_names) if name.startswith('STI') or name in event_codes] for idx in sti_ch_idx: - chs[idx].update({'unit_mul': 0, 'cal': cals[idx], + chs[idx].update({'unit_mul': FIFF.FIFF_UNITM_NONE, + 'cal': cals[idx], 'kind': FIFF.FIFFV_STIM_CH, 'coil_type': FIFF.FIFFV_COIL_NONE, 'unit': FIFF.FIFF_UNIT_NONE}) diff --git a/mne/io/egi/events.py b/mne/io/egi/events.py index 2f4ee5f6170..7333b0089eb 100644 --- a/mne/io/egi/events.py +++ b/mne/io/egi/events.py @@ -25,7 +25,7 @@ def _read_events(input_fname, info): n_samples = info['last_samps'][-1] mff_events, event_codes = _read_mff_events(input_fname, info['sfreq']) info['n_events'] = len(event_codes) - info['event_codes'] = np.asarray(event_codes).astype(' 0: + info['dig'] = _dict_pack(info['dig'], _dig_cast) + info['chs'] = _dict_pack(info['chs'], _ch_cast) + info['chs']['ch_name'] = np.char.encode( + info['chs']['ch_name'], encoding='utf8') + yield + finally: + if orig_dig is not None: + info['dig'] = orig_dig + info['chs'] = orig_chs + + +def _dict_pack(obj, casts): + # pack a list of dict into dict of array + return {key: np.array([o[key] for o in obj]) for key in casts} + + +def _dict_unpack(obj, casts): + # unpack a dict of array into a list of dict + n = len(obj[list(casts)[0]]) + return [{key: cast(obj[key][ii]) for key, cast in casts.items()} + for ii in range(n)] diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 1b2b5e47728..3c43fab3884 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -15,12 +15,15 @@ from ...annotations import Annotations from ...transforms import apply_trans, _get_trans from ...utils import logger, verbose, fill_doc +from ...utils import warn @fill_doc def read_raw_nirx(fname, preload=False, verbose=None): """Reader for a NIRX fNIRS recording. + This function has only been tested with NIRScout devices. + Parameters ---------- fname : str @@ -69,9 +72,12 @@ def __init__(self, fname, preload=False, verbose=None): if fname.endswith('.hdr'): fname = op.dirname(op.abspath(fname)) + if not op.isdir(fname): + raise RuntimeError('The path you specified does not exist.') + # Check if required files exist and store names for later use files = dict() - keys = ('dat', 'evt', 'hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', + keys = ('evt', 'hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2', 'config.txt', 'probeInfo.mat') for key in keys: files[key] = glob.glob('%s/*%s' % (fname, key)) @@ -79,6 +85,11 @@ def __init__(self, fname, preload=False, verbose=None): raise RuntimeError('Expect one %s file, got %d' % (key, len(files[key]),)) files[key] = files[key][0] + if len(glob.glob('%s/*%s' % (fname, 'dat'))) != 1: + warn("A single dat file was expected in the specified path, but " + "got %d. This may indicate that the file structure has been " + "modified since the measurement was saved." % + (len(glob.glob('%s/*%s' % (fname, 'dat'))))) # Read number of rows/samples of wavelength data last_sample = -1 @@ -86,35 +97,6 @@ def __init__(self, fname, preload=False, verbose=None): for line in fid: last_sample += 1 - # Read participant information file - inf = ConfigParser(allow_no_value=True) - inf.read(files['inf']) - inf = inf._sections['Subject Demographics'] - - # Store subject information from inf file in mne format - # Note: NIRX also records "Study Type", "Experiment History", - # "Additional Notes", "Contact Information" and this information - # is currently discarded - subject_info = {} - names = inf['name'].split() - if len(names) > 0: - subject_info['first_name'] = \ - inf['name'].split()[0].replace("\"", "") - if len(names) > 1: - subject_info['last_name'] = \ - inf['name'].split()[-1].replace("\"", "") - if len(names) > 2: - subject_info['middle_name'] = \ - inf['name'].split()[-2].replace("\"", "") - # subject_info['birthday'] = inf['age'] # TODO: not formatted properly - subject_info['sex'] = inf['gender'].replace("\"", "") - # Recode values - if subject_info['sex'] in {'M', 'Male', '1'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE - elif subject_info['sex'] in {'F', 'Female', '2'}: - subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE - # NIRStar does not record an id, or handedness by default - # Read header file # The header file isn't compliant with the configparser. So all the # text between comments must be removed before passing to parser @@ -129,6 +111,10 @@ def __init__(self, fname, preload=False, verbose=None): ["\"15.0\"", "\"15.2\""]): raise RuntimeError('MNE does not support this NIRStar version' ' (%s)' % (hdr['GeneralInfo']['NIRStar'],)) + if "NIRScout" not in hdr['GeneralInfo']['Device']: + warn("Only import of data from NIRScout devices have been " + "thoroughly tested. You are using a %s device. " % + hdr['GeneralInfo']['Device']) # Parse required header fields @@ -155,6 +141,35 @@ def __init__(self, fname, preload=False, verbose=None): # Extract sampling rate samplingrate = float(hdr['ImagingParameters']['SamplingRate']) + # Read participant information file + inf = ConfigParser(allow_no_value=True) + inf.read(files['inf']) + inf = inf._sections['Subject Demographics'] + + # Store subject information from inf file in mne format + # Note: NIRX also records "Study Type", "Experiment History", + # "Additional Notes", "Contact Information" and this information + # is currently discarded + subject_info = {} + names = inf['name'].split() + if len(names) > 0: + subject_info['first_name'] = \ + inf['name'].split()[0].replace("\"", "") + if len(names) > 1: + subject_info['last_name'] = \ + inf['name'].split()[-1].replace("\"", "") + if len(names) > 2: + subject_info['middle_name'] = \ + inf['name'].split()[-2].replace("\"", "") + # subject_info['birthday'] = inf['age'] # TODO: not formatted properly + subject_info['sex'] = inf['gender'].replace("\"", "") + # Recode values + if subject_info['sex'] in {'M', 'Male', '1'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE + elif subject_info['sex'] in {'F', 'Female', '2'}: + subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE + # NIRStar does not record an id, or handedness by default + # Read information about probe/montage/optodes # A word on terminology used here: # Sources produce light diff --git a/mne/io/nirx/tests/test_nirx.py b/mne/io/nirx/tests/test_nirx.py index 45f921510a5..aaa2de4874f 100644 --- a/mne/io/nirx/tests/test_nirx.py +++ b/mne/io/nirx/tests/test_nirx.py @@ -5,6 +5,7 @@ import os.path as op import shutil +import os import pytest from numpy.testing import assert_allclose, assert_array_equal @@ -36,6 +37,24 @@ def test_nirx_hdr_load(): assert raw.info['sfreq'] == 12.5 +@requires_testing_data +def test_nirx_missing_warn(): + """Test reading NIRX files when missing data.""" + with pytest.raises(RuntimeError, match='The path you'): + read_raw_nirx(fname_nirx_15_2_short + "1", preload=True) + + +@requires_testing_data +def test_nirx_dat_warn(tmpdir): + """Test reading NIRX files when missing data.""" + shutil.copytree(fname_nirx_15_2_short, str(tmpdir) + "/data/") + os.rename(str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.dat", + str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.tmp") + fname = str(tmpdir) + "/data" + "/NIRS-2019-08-23_001.hdr" + with pytest.raises(RuntimeWarning, match='A single dat'): + read_raw_nirx(fname, preload=True) + + @requires_testing_data def test_nirx_15_2_short(): """Test reading NIRX files.""" diff --git a/mne/io/pick.py b/mne/io/pick.py index 45eac70bdfb..3a0b1395849 100644 --- a/mne/io/pick.py +++ b/mne/io/pick.py @@ -383,7 +383,7 @@ def pick_types(info, meg=None, eeg=False, stim=False, eog=False, ecg=False, exclude = _check_info_exclude(info, exclude) nchan = info['nchan'] - pick = np.zeros(nchan, dtype=np.bool) + pick = np.zeros(nchan, dtype=bool) _check_meg_type(ref_meg, allow_auto=True) _check_meg_type(meg) diff --git a/mne/io/tests/test_compensator.py b/mne/io/tests/test_compensator.py index 3401630b1ac..a06debfec49 100644 --- a/mne/io/tests/test_compensator.py +++ b/mne/io/tests/test_compensator.py @@ -67,7 +67,7 @@ def make_evoked(fname, comp): if comp is not None: raw.apply_gradient_compensation(comp) picks = pick_types(raw.info, meg=True, ref_meg=True) - events = np.array([[0, 0, 1]], dtype=np.int) + events = np.array([[0, 0, 1]], dtype=np.int64) evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks, baseline=None).average() return evoked diff --git a/mne/io/tests/test_raw.py b/mne/io/tests/test_raw.py index 23649f05d47..65dab5f3735 100644 --- a/mne/io/tests/test_raw.py +++ b/mne/io/tests/test_raw.py @@ -16,8 +16,10 @@ from mne import concatenate_raws, create_info, Annotations from mne.datasets import testing -from mne.io import read_raw_fif, RawArray, BaseRaw -from mne.utils import _TempDir, catch_logging, _raw_annot, _stamp_to_dt +from mne.externals.h5io import read_hdf5, write_hdf5 +from mne.io import read_raw_fif, RawArray, BaseRaw, Info, _writing_info_hdf5 +from mne.utils import (_TempDir, catch_logging, _raw_annot, _stamp_to_dt, + object_diff, check_version) from mne.io.meas_info import _get_valid_units from mne.io._digitization import DigPoint @@ -201,6 +203,14 @@ def _test_raw_reader(reader, test_preloading=True, test_kwargs=True, assert_array_equal(times, times_2,) assert_array_equal(data[picks_2], data_2) + # Make sure that writing info to h5 format + # (all fields should be compatible) + if check_version('h5py'): + fname_h5 = op.join(tempdir, 'info.h5') + with _writing_info_hdf5(raw.info): + write_hdf5(fname_h5, raw.info) + new_info = Info(read_hdf5(fname_h5)) + assert object_diff(new_info, raw.info) == '' return raw diff --git a/mne/io/utils.py b/mne/io/utils.py index a0c3d532842..fd573021c2f 100644 --- a/mne/io/utils.py +++ b/mne/io/utils.py @@ -262,7 +262,8 @@ def _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg, misc): kind = ch_kind chan_info = {'cal': cals[idx], 'logno': idx + 1, 'scanno': idx + 1, - 'range': 1.0, 'unit_mul': 0., 'ch_name': ch_name, + 'range': 1.0, 'unit_mul': FIFF.FIFF_UNITM_NONE, + 'ch_name': ch_name, 'unit': FIFF.FIFF_UNIT_V, 'coord_frame': FIFF.FIFFV_COORD_HEAD, 'coil_type': coil_type, 'kind': kind, 'loc': np.zeros(12)} diff --git a/mne/label.py b/mne/label.py index 6a5f3a375bf..c1adaa827e8 100644 --- a/mne/label.py +++ b/mne/label.py @@ -999,7 +999,7 @@ def write_label(filename, label, verbose=None): with open(filename, 'wb') as fid: n_vertices = len(label.vertices) - data = np.zeros((n_vertices, 5), dtype=np.float) + data = np.zeros((n_vertices, 5), dtype=np.float64) data[:, 0] = label.vertices data[:, 1:4] = 1e3 * label.pos data[:, 4] = label.values @@ -1895,7 +1895,7 @@ def _read_annot(fname): np.fromfile(fid, '>c', length) # discard orig_tab names = list() - ctab = np.zeros((n_entries, 5), np.int) + ctab = np.zeros((n_entries, 5), np.int64) for i in range(n_entries): name_length = np.fromfile(fid, '>i4', 1)[0] name = np.fromfile(fid, "|S%d" % name_length, 1)[0] @@ -1909,7 +1909,7 @@ def _read_annot(fname): if ctab_version != 2: raise Exception('Color table version not supported') n_entries = np.fromfile(fid, '>i4', 1)[0] - ctab = np.zeros((n_entries, 5), np.int) + ctab = np.zeros((n_entries, 5), np.int64) length = np.fromfile(fid, '>i4', 1)[0] np.fromfile(fid, "|S%d" % length, 1) # Orig table path entries_to_read = np.fromfile(fid, '>i4', 1)[0] @@ -2387,7 +2387,7 @@ def write_labels_to_annot(labels, subject=None, parc=None, overwrite=False, 'specify subject and subjects_dir parameters.') # Create annot and color table array to write - annot = np.empty(n_vertices, dtype=np.int) + annot = np.empty(n_vertices, dtype=np.int64) annot[:] = -1 # create the annotation ids from the colors annot_id_coding = np.array((1, 2 ** 8, 2 ** 16)) diff --git a/mne/minimum_norm/tests/test_inverse.py b/mne/minimum_norm/tests/test_inverse.py index f19d1f9892c..2c7f44f6d17 100644 --- a/mne/minimum_norm/tests/test_inverse.py +++ b/mne/minimum_norm/tests/test_inverse.py @@ -335,15 +335,28 @@ def test_localization_bias_fixed(bias_params_fixed, method, lower, upper, ('eLORETA', 99, 100, 0.8, 0.2), ('eLORETA', 99, 100, 0.8, 0.001), ]) +@pytest.mark.parametrize('pick_ori', (None, 'vector')) def test_localization_bias_loose(bias_params_fixed, method, lower, upper, - depth, loose): + depth, loose, pick_ori): """Test inverse localization bias for loose minimum-norm solvers.""" + if pick_ori == 'vector' and method == 'eLORETA': # works, but save cycles + return evoked, fwd, noise_cov, _, want = bias_params_fixed fwd = convert_forward_solution(fwd, surf_ori=False, force_fixed=False) assert not is_fixed_orient(fwd) inv_loose = make_inverse_operator(evoked.info, fwd, noise_cov, loose=loose, depth=depth) - loc = apply_inverse(evoked, inv_loose, lambda2, method).data + loc = apply_inverse( + evoked, inv_loose, lambda2, method, pick_ori=pick_ori) + if pick_ori is not None: + assert loc.data.ndim == 3 + loc, directions = loc.project('pca', src=fwd['src']) + abs_cos_sim = np.abs(np.sum( + directions * inv_loose['source_nn'][2::3], axis=1)) + assert np.percentile(abs_cos_sim, 10) > 0.9 # most very aligned + loc = abs(loc).data + else: + loc = loc.data assert (loc >= 0).all() # Compute the percentage of sources for which there is no loc bias: perc = (want == np.argmax(loc, axis=0)).mean() * 100 @@ -535,7 +548,7 @@ def test_orientation_prior(bias_params_free, method, looses, vmin, vmax, [_get_src_nn(s) for s in inv['src']])) vec_stc_surf = np.matmul(rot, vec_stc.data) if 0. in looses: - vec_stc_normal = vec_stc.normal(inv['src']) + vec_stc_normal, _ = vec_stc.project('normal', inv['src']) assert_allclose(stcs[1].data, vec_stc_normal.data) del vec_stc assert_allclose(vec_stc_normal.data, vec_stc_surf[:, 2]) @@ -1065,8 +1078,9 @@ def test_inverse_mixed(all_src_types_inv_evoked): stcs[kind].magnitude().data) assert_allclose(getattr(stcs['mixed'], kind)().magnitude().data, stcs[kind].magnitude().data) - assert_allclose(stcs['mixed'].surface().normal(surf_src).data, - stcs['surface'].normal(surf_src).data) + with pytest.deprecated_call(): + assert_allclose(stcs['mixed'].surface().normal(surf_src).data, + stcs['surface'].normal(surf_src).data) run_tests_if_main() diff --git a/mne/minimum_norm/time_frequency.py b/mne/minimum_norm/time_frequency.py index ee41ffe11fb..65bc9294674 100644 --- a/mne/minimum_norm/time_frequency.py +++ b/mne/minimum_norm/time_frequency.py @@ -191,9 +191,9 @@ def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, with_power, with_plv, pick_ori, decim, verbose=None): """Aux function for induced power and PLV.""" shape, is_free_ori = _prepare_tfr(data, decim, pick_ori, Ws, K, source_ori) - power = np.zeros(shape, dtype=np.float) # power or raw TFR + power = np.zeros(shape, dtype=np.float64) # power or raw TFR # phase lock - plv = np.zeros(shape, dtype=np.complex) if with_plv else None + plv = np.zeros(shape, dtype=np.complex128) if with_plv else None for epoch in data: epoch = epoch[sel] # keep only selected channels @@ -215,9 +215,9 @@ def _compute_pow_plv(data, K, sel, Ws, source_ori, use_fft, Vh, def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape, with_plv, with_power): """Compute single trial TFRs, either ITC, power or raw TFR.""" - tfr_e = np.zeros(shape, dtype=np.float) # power or raw TFR + tfr_e = np.zeros(shape, dtype=np.float64) # power or raw TFR # phase lock - plv_e = np.zeros(shape, dtype=np.complex) if with_plv else None + plv_e = np.zeros(shape, dtype=np.complex128) if with_plv else None n_sources, _, n_times = shape for f, w in enumerate(Ws): tfr_ = cwt(data, [w], use_fft=use_fft, decim=decim) @@ -225,9 +225,9 @@ def _single_epoch_tfr(data, is_free_ori, K, Ws, use_fft, decim, shape, # phase lock and power at freq f if with_plv: - plv_f = np.zeros((n_sources, n_times), dtype=np.complex) + plv_f = np.zeros((n_sources, n_times), dtype=np.complex128) - tfr_f = np.zeros((n_sources, n_times), dtype=np.float) + tfr_f = np.zeros((n_sources, n_times), dtype=np.float64) for k, t in enumerate([np.real(tfr_), np.imag(tfr_)]): sol = np.dot(K, t) diff --git a/mne/morph.py b/mne/morph.py index 5ad9792852d..9dea5f94fc7 100644 --- a/mne/morph.py +++ b/mne/morph.py @@ -17,6 +17,7 @@ _BaseVolSourceEstimate, _BaseSourceEstimate, _get_ico_tris) from .source_space import SourceSpaces, _ensure_src from .surface import read_morph_map, mesh_edges, read_surface, _compute_nearest +from .transforms import _angle_between_quats, rot_to_quat from .utils import (logger, verbose, check_version, get_subjects_dir, warn as warn_, fill_doc, _check_option, _validate_type, BunchConst, wrapped_stdout, _check_fname, warn, @@ -33,8 +34,9 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', """Create a SourceMorph from one subject to another. Method is based on spherical morphing by FreeSurfer for surface - cortical estimates [1]_ and Symmetric Diffeomorphic Registration - for volumic data [2]_. + cortical estimates :footcite:`GreveEtAl2013` and + Symmetric Diffeomorphic Registration for volumic data + :footcite:`AvantsEtAl2008`. Parameters ---------- @@ -134,7 +136,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', obtained as described `here `_. For statistical comparisons between hemispheres, use of the symmetric ``fsaverage_sym`` - model is recommended to minimize bias [1]_. + model is recommended to minimize bias :footcite:`GreveEtAl2013`. .. versionadded:: 0.17.0 @@ -143,14 +145,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', References ---------- - .. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M. - R., Fischl B., Brysbaert M. - A Surface-based Analysis of Language Lateralization and Cortical - Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013. - .. [2] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. + .. footbibliography:: """ src_data, kind, src_subject = _get_src_data(src) subject_from = _check_subject_src(subject_from, src_subject) @@ -179,13 +174,13 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', _check_dep(nibabel='2.1.0', dipy='0.10.1') import nibabel as nib - logger.info('volume source space(s) present...') + logger.info('Volume source space(s) present...') # load moving MRI mri_subpath = op.join('mri', 'brain.mgz') mri_path_from = op.join(subjects_dir, subject_from, mri_subpath) - logger.info('loading %s as "from" volume' % mri_path_from) + logger.info(' Loading %s as "from" volume' % mri_path_from) with warnings.catch_warnings(): mri_from = nib.load(mri_path_from) @@ -194,7 +189,7 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', mri_path_to = op.join(subjects_dir, subject_to, mri_subpath) if not op.isfile(mri_path_to): raise IOError('cannot read file: %s' % mri_path_to) - logger.info('loading %s as "to" volume' % mri_path_to) + logger.info(' Loading %s as "to" volume' % mri_path_to) with warnings.catch_warnings(): mri_to = nib.load(mri_path_to) @@ -206,11 +201,15 @@ def compute_source_morph(src, subject_from=None, subject_to='fsaverage', 'mixed source space') else: surf_offset = 2 if src_to.kind == 'mixed' else 0 - src_data['to_vox_map'] = ( - src_to[-1]['shape'], src_to[-1]['src_mri_t']['trans'] * - np.array([[1e3, 1e3, 1e3, 1]]).T) + # All of our computations are in RAS (like img.affine), so we need + # to get the transformation from RAS to the source space + # subsampling of vox (src), not MRI (FreeSurfer surface RAS) to src + src_ras_t = np.dot(src_to[-1]['mri_ras_t']['trans'], + src_to[-1]['src_mri_t']['trans']) + src_ras_t[:3] *= 1e3 + src_data['to_vox_map'] = (src_to[-1]['shape'], src_ras_t) vertices_to_vol = [s['vertno'] for s in src_to[surf_offset:]] - zooms_src_to = np.diag(src_data['to_vox_map'][1])[:3] + zooms_src_to = np.diag(src_to[-1]['src_mri_t']['trans'])[:3] * 1000 assert (zooms_src_to[0] == zooms_src_to).all() zooms_src_to = tuple(zooms_src_to) @@ -312,7 +311,7 @@ class SourceMorph(object): Number of levels (``len(niter_sdr)``) and number of iterations per level - for each successive stage of iterative refinement - to perform the Symmetric Diffeomorphic Registration (sdr) - transform [2]_. + transform :footcite:`AvantsEtAl2008`. spacing : int | list | None See :func:`mne.compute_source_morph`. smooth : int | str | None @@ -321,7 +320,7 @@ class SourceMorph(object): Morph across hemisphere. morph_mat : scipy.sparse.csr_matrix The sparse surface morphing matrix for spherical surface - based morphing [1]_. + based morphing :footcite:`GreveEtAl2013`. vertices_to : list of ndarray The destination surface vertices. shape : tuple @@ -344,14 +343,7 @@ class SourceMorph(object): References ---------- - .. [1] Greve D. N., Van der Haegen L., Cai Q., Stufflebeam S., Sabuncu M. - R., Fischl B., Brysbaert M. - A Surface-based Analysis of Language Lateralization and Cortical - Asymmetry. Journal of Cognitive Neuroscience 25(9), 1477-1492, 2013. - .. [2] Avants, B. B., Epstein, C. L., Grossman, M., & Gee, J. C. (2009). - Symmetric Diffeomorphic Image Registration with Cross- Correlation: - Evaluating Automated Labeling of Elderly and Neurodegenerative - Brain, 12(1), 26-41. + .. footbibliography:: """ def __init__(self, subject_from, subject_to, kind, zooms, @@ -476,14 +468,16 @@ def _morph_one_vol(self, one): img_to, self.affine, _get_zooms_orig(self), self.zooms) # morph data - img_to = self.sdr_morph.transform(self.pre_affine.transform(img_to)) + img_to = self.pre_affine.transform(img_to) + if self.sdr_morph is not None: + img_to = self.sdr_morph.transform(img_to) # subselect the correct cube if src_to is provided if self.src_data['to_vox_map'] is not None: # order=0 (nearest) should be fine since it's just subselecting - img_to = _get_img_fdata(resample_from_to( - SpatialImage(img_to, self.affine), - self.src_data['to_vox_map'], order=0)) + img_to = SpatialImage(img_to, self.affine) + img_to = resample_from_to(img_to, self.src_data['to_vox_map'], 1) + img_to = _get_img_fdata(img_to) # reshape to nvoxel x nvol: # in the MNE definition of volume source spaces, @@ -809,7 +803,7 @@ def _interpolate_data(stc, morph, mri_resolution, mri_space, output): vols = np.zeros((np.prod(shape[:3]), shape[3]), order='F') # flatten n_vertices_seen = 0 for this_inuse in inuse: - this_inuse = this_inuse.astype(np.bool) + this_inuse = this_inuse.astype(bool) n_vertices = np.sum(this_inuse) stc_slice = slice(n_vertices_seen, n_vertices_seen + n_vertices) vols[this_inuse] = stc.data[stc_slice] @@ -889,7 +883,7 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): mri_to = nib.Nifti1Image(mri_to_res, mri_to_res_affine) affine = mri_to.affine - mri_to = _get_img_fdata(mri_to) # to ndarray + mri_to = _get_img_fdata(mri_to).copy() # to ndarray mri_to /= mri_to.max() mri_from_affine = mri_from.affine # get mri_from to world transform mri_from = _get_img_fdata(mri_from) # to ndarray @@ -919,6 +913,13 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): rigid = affreg.optimize( mri_to, mri_from, transforms.RigidTransform3D(), None, affine, mri_from_affine, starting_affine=translation.affine) + dist = np.linalg.norm(rigid.affine[:3, 3]) + angle = np.rad2deg(_angle_between_quats( + np.zeros(3), rot_to_quat(rigid.affine[:3, :3]))) + + logger.info(f'Translation: {dist:5.1f} mm') + logger.info(f'Rotation: {angle:5.1f}°') + logger.info('') # affine transform (translation + rotation + scaling) logger.info('Optimizing full affine:') @@ -928,12 +929,33 @@ def _compute_morph_sdr(mri_from, mri_to, niter_affine, niter_sdr, zooms): affine, mri_from_affine, starting_affine=rigid.affine) # compute mapping - sdr = imwarp.SymmetricDiffeomorphicRegistration( - metrics.CCMetric(3), list(niter_sdr)) - logger.info('Optimizing SDR:') - with wrapped_stdout(indent=' '): - sdr_morph = sdr.optimize(mri_to, pre_affine.transform(mri_from)) - shape = tuple(sdr_morph.domain_shape) # should be tuple of int + mri_from_to = pre_affine.transform(mri_from) + shape = tuple(pre_affine.domain_shape) + if len(niter_sdr): + sdr = imwarp.SymmetricDiffeomorphicRegistration( + metrics.CCMetric(3), list(niter_sdr)) + logger.info('Optimizing SDR:') + with wrapped_stdout(indent=' '): + sdr_morph = sdr.optimize(mri_to, pre_affine.transform(mri_from)) + assert shape == tuple(sdr_morph.domain_shape) # should be tuple of int + mri_from_to = sdr_morph.transform(mri_from_to) + else: + sdr_morph = None + + mri_to, mri_from_to = mri_to.ravel(), mri_from_to.ravel() + mri_from_to /= np.linalg.norm(mri_from_to) + mri_to /= np.linalg.norm(mri_to) + r2 = 100 * (mri_to @ mri_from_to) + logger.info(f'Variance explained by morph: {r2:0.1f}%') + + # To debug to_vox_map, this can be used: + # from nibabel.processing import resample_from_to + # mri_from_to = sdr_morph.transform(pre_affine.transform(mri_from)) + # mri_from_to = nib.Nifti1Image(mri_from_to, affine) + # fig1 = mri_from_to.orthoview() + # mri_from_to_cut = resample_from_to(mri_from_to, to_vox_map, 1) + # fig2 = mri_from_to_cut.orthoview() + return shape, zooms, affine, pre_affine, sdr_morph @@ -1232,7 +1254,7 @@ def _morph_mult(data, e, use_sparse, idx_use_data, idx_use_out=None): def _sparse_argmax_nnz_row(csr_mat): """Return index of the maximum non-zero index in each row.""" n_rows = csr_mat.shape[0] - idx = np.empty(n_rows, dtype=np.int) + idx = np.empty(n_rows, dtype=np.int64) for k in range(n_rows): row = csr_mat[k].tocoo() idx[k] = row.col[np.argmax(row.data)] diff --git a/mne/preprocessing/_peak_finder.py b/mne/preprocessing/_peak_finder.py index 88a8d2abdd1..0fa02d959a1 100644 --- a/mne/preprocessing/_peak_finder.py +++ b/mne/preprocessing/_peak_finder.py @@ -102,7 +102,7 @@ def peak_finder(x0, thresh=None, extrema=1, verbose=None): # Preallocate max number of maxima maxPeaks = int(np.ceil(length / 2.0)) - peak_loc = np.zeros(maxPeaks, dtype=np.int) + peak_loc = np.zeros(maxPeaks, dtype=np.int64) peak_mag = np.zeros(maxPeaks) c_ind = 0 # Loop through extrema which should be peaks and then valleys diff --git a/mne/preprocessing/bads.py b/mne/preprocessing/bads.py index bc07ea3fa9b..901df7f9aa9 100644 --- a/mne/preprocessing/bads.py +++ b/mne/preprocessing/bads.py @@ -30,7 +30,7 @@ def _find_outliers(X, threshold=3.0, max_iter=2, tail=0): The outlier indices. """ from scipy.stats import zscore - my_mask = np.zeros(len(X), dtype=np.bool) + my_mask = np.zeros(len(X), dtype=bool) for _ in range(max_iter): X = np.ma.masked_array(X, my_mask) if tail == 0: diff --git a/mne/preprocessing/ecg.py b/mne/preprocessing/ecg.py index 5de4bc79ef1..8742c42fc5f 100644 --- a/mne/preprocessing/ecg.py +++ b/mne/preprocessing/ecg.py @@ -94,7 +94,7 @@ def qrs_detector(sfreq, ecg, thresh_value=0.6, levels=2.5, n_thresh=3, if window[0] > thresh1: max_time = np.argmax(window) time.append(ii + max_time) - nx = np.sum(np.diff(((window > thresh1).astype(np.int) == + nx = np.sum(np.diff(((window > thresh1).astype(np.int64) == 1).astype(int))) numcross.append(nx) rms.append(np.sqrt(sum_squared(window) / window.size)) diff --git a/mne/preprocessing/maxwell.py b/mne/preprocessing/maxwell.py index 90bdbc773e9..d5386b17a46 100644 --- a/mne/preprocessing/maxwell.py +++ b/mne/preprocessing/maxwell.py @@ -1858,7 +1858,7 @@ def _trans_sss_basis(exp, all_coils, trans=None, coil_scale=100.): # st_only @verbose def find_bad_channels_maxwell( - raw, limit=7., duration=5., min_count=5, + raw, limit=7., duration=5., min_count=5, return_scores=False, origin='auto', int_order=8, ext_order=3, calibration=None, cross_talk=None, coord_frame='head', regularize='in', ignore_ref=False, bad_condition='error', head_pos=None, mag_scale=100., @@ -1877,10 +1877,19 @@ def find_bad_channels_maxwell( Detection limit (default is 7.). Smaller values will find more bad channels at increased risk of including good ones. duration : float - Duration into which to window the data for processing. Default is 5. + Duration of the segments into which to slice the data for processing, + in seconds. Default is 5. min_count : int Minimum number of times a channel must show up as bad in a chunk. Default is 5. + return_scores : bool + If ``True``, return a dictionary with scoring information for each + evaluated segment of the data. Default is ``False``. + + .. warning:: This feature is experimental and may change in a future + version of MNE-Python without prior notice. Please + report any problems and enhancement proposals to the + developers. %(maxwell_origin_int_ext_calibration_cross)s %(maxwell_coord)s %(maxwell_reg_ref_cond_pos)s @@ -1896,6 +1905,33 @@ def find_bad_channels_maxwell( flat_chs : list List of MEG channels that were detected as being flat in at least ``min_count`` segments. + scores : dict + A dictionary with information produced by the scoring algorithms. + Only returned when ``return_scores`` is ``True``. It contains the + following keys: + + - ``ch_names`` : ndarray, shape (n_meg,) + The names of the MEG channels. Their order corresponds to the + order of rows in the ``scores`` and ``limits`` arrays. + - ``ch_types`` : ndarray, shape (n_meg,) + The types of the MEG channels in ``ch_names`` (``'mag'``, + ``'grad'``). + - ``bins`` : ndarray, shape (n_windows, 2) + The inclusive window boundaries (start and stop; in seconds) used + to calculate the scores. + - ``scores_flat`` : ndarray, shape (n_meg, n_windows) + The scores for testing whether MEG channels are flat. + - ``limits_flat`` : ndarray, shape (n_meg, 1) + The score thresholds above which a segment was claffified as + "flat". + - ``scores_noisy`` : ndarray, shape (n_meg, n_windows) + The scores for testing whether MEG channels are noisy. + - ``limits_noisy`` : ndarray, shape (n_meg, 1) + The score thresholds above which a segment was claffified as + "noisy". + + .. note:: The scores and limits for channels marked as ``bad`` in the + input data will will be set to ``np.nan``. See Also -------- @@ -1904,10 +1940,11 @@ def find_bad_channels_maxwell( Notes ----- - All arguments after ``raw``, ``limit``, ``duration``, and ``min_count`` - are the same as :func:`~maxwell_filter`, except that the following are - not allowed in this function because they are unused: ``st_duration``, - ``st_correlation``, ``destination``, ``st_fixed``, and ``st_only``. + All arguments after ``raw``, ``limit``, ``duration``, ``min_count``, and + ``return_scores`` are the same as :func:`~maxwell_filter`, except that the + following are not allowed in this function because they are unused: + ``st_duration``, ``st_correlation``, ``destination``, ``st_fixed``, and + ``st_only``. This algorithm, for a given chunk of data: @@ -1973,20 +2010,48 @@ def find_bad_channels_maxwell( if pick in params['grad_picks'] else flat_limits['mag'] for pick in good_meg_picks]) + flat_step = max(20, int(30 * raw.info['sfreq'] / 1000.)) all_flats = set() + + # Prepare variables to return if `return_scores=True`. + bins = np.empty((len(starts), 2)) # To store start, stop of each segment + # We create ndarrays with one row per channel, regardless of channel type + # and whether the channel has been marked as "bad" in info or not. This + # makes indexing in the loop easier. We only filter this down to the subset + # of MEG channels after all processing is done. + ch_names = np.array(raw.ch_names) + ch_types = np.array(raw.get_channel_types()) + + scores_flat = np.full((len(ch_names), len(starts)), np.nan) + scores_noisy = np.full_like(scores_flat, fill_value=np.nan) + + thresh_flat = np.full((len(ch_names), 1), np.nan) + thresh_noisy = np.full_like(thresh_flat, fill_value=np.nan) + for si, (start, stop) in enumerate(zip(starts, stops)): n_iter = 0 orig_data = raw.get_data(None, start, stop, verbose=False) chunk_raw = RawArray( orig_data, params['info'], first_samp=raw.first_samp + start, copy='data', verbose=False) + + t = chunk_raw.times[[0, -1]] + start / raw.info['sfreq'] + logger.info(' Interval %3d: %8.3f - %8.3f' + % ((si + 1,) + tuple(t[[0, -1]]))) + # Flat pass: var < 0.01 fT/cm or 0.01 fT for at 30 ms (or 20 samples) n = stop - start flat_stop = n - (n % flat_step) data = chunk_raw.get_data(good_meg_picks, 0, flat_stop) data.shape = (data.shape[0], -1, flat_step) delta = np.std(data, axis=-1).min(-1) # min std across segments + + # We may want to return this later if `return_scores=True`. + bins[si, :] = t[0], t[-1] + scores_flat[good_meg_picks, si] = delta + thresh_flat[good_meg_picks] = these_limits.reshape(-1, 1) + chunk_flats = delta < these_limits chunk_flats = np.where(chunk_flats)[0] chunk_flats = [raw.ch_names[good_meg_picks[chunk_flat]] @@ -2000,9 +2065,6 @@ def find_bad_channels_maxwell( chunk_noisy = list() params['st_duration'] = int(round( chunk_raw.times[-1] * raw.info['sfreq'])) - t = chunk_raw.times[[0, -1]] + start / raw.info['sfreq'] - logger.info(' Interval %3d: %8.3f - %8.3f' - % ((si + 1,) + tuple(t[[0, -1]]))) for n_iter in range(1, 101): # iteratively exclude the worst ones assert set(raw.info['bads']) & set(chunk_noisy) == set() params['good_mask'][:] = [ @@ -2028,8 +2090,14 @@ def find_bad_channels_maxwell( z = (range_ - mean) / std idx = np.argmax(z) max_ = z[idx] + + # We may want to return this later if `return_scores=True`. + scores_noisy[these_picks, si] = z + thresh_noisy[these_picks] = limit + if max_ < limit: break + name = raw.ch_names[these_picks[idx]] logger.debug(' Bad: %s %0.1f' % (name, max_)) @@ -2040,7 +2108,27 @@ def find_bad_channels_maxwell( key=lambda x: raw.ch_names.index(x)) flat_chs = sorted((f for f, c in flat_chs.items() if c >= min_count), key=lambda x: raw.ch_names.index(x)) + + # Only include MEG channels. + ch_names = ch_names[params['meg_picks']] + ch_types = ch_types[params['meg_picks']] + scores_flat = scores_flat[params['meg_picks']] + thresh_flat = thresh_flat[params['meg_picks']] + scores_noisy = scores_noisy[params['meg_picks']] + thresh_noisy = thresh_noisy[params['meg_picks']] + logger.info(' Static bad channels: %s' % (noisy_chs,)) logger.info(' Static flat channels: %s' % (flat_chs,)) logger.info('[done]') - return noisy_chs, flat_chs + + if return_scores: + scores = dict(ch_names=ch_names, + ch_types=ch_types, + bins=bins, + scores_flat=scores_flat, + limits_flat=thresh_flat, + scores_noisy=scores_noisy, + limits_noisy=thresh_noisy) + return noisy_chs, flat_chs, scores + else: + return noisy_chs, flat_chs diff --git a/mne/preprocessing/tests/test_maxwell.py b/mne/preprocessing/tests/test_maxwell.py index c56667df310..bd0f89a4c8d 100644 --- a/mne/preprocessing/tests/test_maxwell.py +++ b/mne/preprocessing/tests/test_maxwell.py @@ -1061,18 +1061,25 @@ def test_mf_skips(): @testing.requires_testing_data -@pytest.mark.parametrize('fname, bads, annot, add_ch, ignore_ref, want_bads', [ - # Neuromag data tested against MF - (sample_fname, [], False, False, False, ['MEG 2443']), - # add 0111 to test picking, add annot to test it, and prepend chs for idx - (sample_fname, ['MEG 0111'], True, True, False, ['MEG 2443']), - # CTF data seems to be sensitive to linalg lib (?) because some channels - # are very close to the limit, so we just check that one shows up - (ctf_fname_continuous, [], False, False, False, {'BR1-4304'}), - (ctf_fname_continuous, [], False, False, True, ['MLC24-4304']), # faked -]) +@pytest.mark.parametrize( + ('fname', 'bads', 'annot', 'add_ch', 'ignore_ref', 'want_bads', + 'return_scores'), [ + # Neuromag data tested against MF + (sample_fname, [], False, False, False, ['MEG 2443'], False), + # add 0111 to test picking, add annot to test it, and prepend chs for + # idx + (sample_fname, ['MEG 0111'], True, True, False, ['MEG 2443'], False), + # CTF data seems to be sensitive to linalg lib (?) because some + # channels are very close to the limit, so we just check that one shows + # up + (ctf_fname_continuous, [], False, False, False, {'BR1-4304'}, False), + # faked + (ctf_fname_continuous, [], False, False, True, ['MLC24-4304'], False), + # For `return_scores=True` + (sample_fname, ['MEG 0111'], True, True, False, ['MEG 2443'], True) + ]) def test_find_bad_channels_maxwell(fname, bads, annot, add_ch, ignore_ref, - want_bads): + want_bads, return_scores): """Test automatic bad channel detection.""" if fname.endswith('.ds'): raw = read_raw_ctf(fname).load_data() @@ -1086,6 +1093,9 @@ def test_find_bad_channels_maxwell(fname, bads, annot, add_ch, ignore_ref, raw._data[flat_idx] = 0 # MaxFilter didn't have this but doesn't affect it want_flats = [raw.ch_names[flat_idx]] raw.apply_gradient_compensation(0) + + min_count = 5 + if add_ch: raw_eeg = read_raw_fif(fname) raw_eeg.pick_types(meg=False, eeg=True, exclude=()).load_data() @@ -1105,10 +1115,19 @@ def test_find_bad_channels_maxwell(fname, bads, annot, add_ch, ignore_ref, assert step == 1502 raw.annotations.append(step * dt + raw._first_time, dt, 'BAD') with catch_logging() as log: - got_bads, got_flats = find_bad_channels_maxwell( + return_vals = find_bad_channels_maxwell( raw, origin=(0., 0., 0.04), regularize=None, bad_condition='ignore', skip_by_annotation='BAD', verbose=True, - ignore_ref=ignore_ref) + ignore_ref=ignore_ref, min_count=min_count, + return_scores=return_scores) + + if return_scores: + assert len(return_vals) == 3 + got_bads, got_flats, got_scores = return_vals + else: + assert len(return_vals) == 2 + got_bads, got_flats = return_vals + if isinstance(want_bads, list): assert got_bads == want_bads # from MaxFilter else: @@ -1118,5 +1137,54 @@ def test_find_bad_channels_maxwell(fname, bads, annot, add_ch, ignore_ref, assert 'Interval 1: 0.00' in log assert 'Interval 2: 5.00' in log + if return_scores: + meg_chs = raw.copy().pick_types(meg=True, exclude=[]).ch_names + ch_types = raw.get_channel_types(meg_chs) + + assert list(got_scores['ch_names']) == meg_chs + assert list(got_scores['ch_types']) == ch_types + # Check that time is monotonically increasing. + assert (np.diff(got_scores['bins'].flatten()) >= 0).all() + + assert (got_scores['scores_flat'].shape == + got_scores['scores_noisy'].shape == + (len(meg_chs), len(got_scores['bins']))) + + assert (got_scores['limits_flat'].shape == + got_scores['limits_noisy'].shape == + (len(meg_chs), 1)) + + # Check "flat" scores. + scores_flat = got_scores['scores_flat'] + limits_flat = got_scores['limits_flat'] + # The following essentially is just this: + # n_segments_below_limit = (scores_flat < limits_flat).sum(-1) + # made to work with NaN's in the scores. + n_segments_below_limit = np.less( + scores_flat, limits_flat, + where=np.equal(np.isnan(scores_flat), False), + out=np.full_like(scores_flat, fill_value=False)).sum(-1) + + ch_idx = np.where(n_segments_below_limit >= + min(min_count, len(got_scores['bins']))) + flats = set(got_scores['ch_names'][ch_idx]) + assert flats == set(want_flats) + + # Check "noisy" scores. + scores_noisy = got_scores['scores_noisy'] + limits_noisy = got_scores['limits_noisy'] + # The following essentially is just this: + # n_segments_beyond_limit = (scores_noisy > limits_noisy).sum(-1) + # made to work with NaN's in the scores. + n_segments_beyond_limit = np.greater( + scores_noisy, limits_noisy, + where=np.equal(np.isnan(scores_noisy), False), + out=np.full_like(scores_noisy, fill_value=False)).sum(-1) + + ch_idx = np.where(n_segments_beyond_limit >= + min(min_count, len(got_scores['bins']))) + bads = set(got_scores['ch_names'][ch_idx]) + assert bads == set(want_bads) + run_tests_if_main() diff --git a/mne/preprocessing/xdawn.py b/mne/preprocessing/xdawn.py index 89850bddeaf..2e5578304fd 100644 --- a/mne/preprocessing/xdawn.py +++ b/mne/preprocessing/xdawn.py @@ -630,7 +630,7 @@ def _pick_sources(self, data, include, exclude, eid): sources = np.dot(self.filters_[eid], data) if include not in (None, list()): - mask = np.ones(len(sources), dtype=np.bool) + mask = np.ones(len(sources), dtype=bool) mask[np.unique(include)] = False sources[mask] = 0. logger.info('Zeroing out %i Xdawn components' % mask.sum()) diff --git a/mne/simulation/evoked.py b/mne/simulation/evoked.py index 598ea3a5047..8da4bff4db8 100644 --- a/mne/simulation/evoked.py +++ b/mne/simulation/evoked.py @@ -73,7 +73,7 @@ def simulate_evoked(fwd, stc, info, cov, nave=30, iir_filter=None, if nave < np.inf: noise = _simulate_noise_evoked(evoked, cov, iir_filter, random_state) evoked.data += noise.data / math.sqrt(nave) - evoked.nave = np.int(nave) + evoked.nave = np.int64(nave) if cov is not None and cov.get('projs', None): evoked.add_proj(cov['projs']).apply_proj() return evoked diff --git a/mne/simulation/tests/test_raw.py b/mne/simulation/tests/test_raw.py index d8760a809a9..ba097b5b6a8 100644 --- a/mne/simulation/tests/test_raw.py +++ b/mne/simulation/tests/test_raw.py @@ -61,6 +61,7 @@ def _assert_iter_sim(raw_sim, raw_new, new_event_id): assert_array_equal(data_new, data_sim) +@pytest.mark.slowtest def test_iterable(): """Test iterable support for simulate_raw.""" raw = read_raw_fif(raw_fname_short).load_data() diff --git a/mne/simulation/tests/test_source.py b/mne/simulation/tests/test_source.py index d37c02802d1..30806982fb5 100644 --- a/mne/simulation/tests/test_source.py +++ b/mne/simulation/tests/test_source.py @@ -113,7 +113,7 @@ def test_simulate_sparse_stc(_get_fwd_labels): n_times = 10 tmin = 0 tstep = 1e-3 - times = np.arange(n_times, dtype=np.float) * tstep + tmin + times = np.arange(n_times, dtype=np.float64) * tstep + tmin pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels), times, labels=labels, location='center', subject='sample', @@ -221,7 +221,7 @@ def test_simulate_sparse_stc_single_hemi(_get_fwd_labels): n_times = 10 tmin = 0 tstep = 1e-3 - times = np.arange(n_times, dtype=np.float) * tstep + tmin + times = np.arange(n_times, dtype=np.float64) * tstep + tmin stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times, labels=labels_single_hemi, random_state=0) diff --git a/mne/source_estimate.py b/mne/source_estimate.py index b852444891d..c7b0f55e0b8 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -17,7 +17,6 @@ from .cov import Covariance from .evoked import _get_peak from .filter import resample -from .fixes import einsum from .io.constants import FIFF from .surface import read_surface, _get_ico_surface, mesh_edges from .source_space import (_ensure_src, _get_morph_src_reordering, @@ -1786,6 +1785,8 @@ def magnitude(self): data_mag, self.vertices, self.tmin, self.tstep, self.subject, self.verbose) + @deprecated('stc.normal(src) is deprecated and will be removed in 0.22, ' + 'use stc.project("normal", src)[0] instead') @fill_doc def normal(self, src, use_cps=True): """Compute activity orthogonal to the cortex. @@ -1806,13 +1807,97 @@ def normal(self, src, use_cps=True): The source estimate only retaining the activity orthogonal to the cortex. """ - _check_src_normal('normal', src) + return self.project('normal', src, use_cps)[0] + + def _get_src_normals(self, src, use_cps): normals = np.vstack([_get_src_nn(s, use_cps, v) for s, v in - zip(src, self.vertices)]) - data_norm = einsum('ijk,ij->ik', self.data, normals) - return self._scalar_class( + zip(src, self.vertices)]) + return normals + + @fill_doc + def project(self, directions, src=None, use_cps=True): + """Project the data for each vertex in a given direction. + + Parameters + ---------- + directions : ndarray, shape (n_vertices, 3) | str + Can be: + + - ``'normal'`` + Project onto the source space normals. + - ``'pca'`` + SVD will be used to project onto the direction of maximal + power for each source. + - :class:`~numpy.ndarray`, shape (n_vertices, 3) + Projection directions for each source. + src : instance of SourceSpaces | None + The source spaces corresponding to the source estimate. + Not used when ``directions`` is an array, optional when + ``directions='pca'``. + %(use_cps)s + Should be the same value that was used when the forward model + was computed (typically True). + + Returns + ------- + stc : instance of SourceEstimate + The projected source estimate. + directions : ndarray, shape (n_vertices, 3) + The directions that were computed (or just used). + + Notes + ----- + When using SVD, there is a sign ambiguity for the direction of maximal + power. When ``src is None``, the direction is chosen that makes the + resulting time waveform sum positive (i.e., have positive amplitudes). + When ``src`` is provided, the directions are flipped in the direction + of the source normals, i.e., outward from cortex for surface source + spaces and in the +Z / superior direction for volume source spaces. + + .. versionadded:: 0.21 + """ + _validate_type(directions, (str, np.ndarray), 'directions') + _validate_type(src, (None, SourceSpaces), 'src') + if isinstance(directions, str): + _check_option('directions', directions, ('normal', 'pca'), + extra='when str') + + if directions == 'normal': + if src is None: + raise ValueError( + 'If directions="normal", src cannot be None') + _check_src_normal('normal', src) + directions = self._get_src_normals(src, use_cps) + else: + assert directions == 'pca' + x = self.data + if not np.isrealobj(self.data): + _check_option('stc.data.dtype', self.data.dtype, + (np.complex64, np.complex128)) + dtype = \ + np.float32 if x.dtype == np.complex64 else np.float64 + x = x.view(dtype) + assert x.shape[-1] == 2 * self.data.shape[-1] + u, _, v = np.linalg.svd(x, full_matrices=False) + directions = u[:, :, 0] + # The sign is arbitrary, so let's flip it in the direction that + # makes the resulting time series the most positive: + if src is None: + signs = np.sum(v[:, 0].real, axis=1, keepdims=True) + else: + normals = self._get_src_normals(src, use_cps) + signs = np.sum(directions * normals, axis=1, keepdims=True) + assert signs.shape == (self.data.shape[0], 1) + signs = np.sign(signs) + signs[signs == 0] = 1. + directions *= signs + _check_option( + 'directions.shape', directions.shape, [(self.data.shape[0], 3)]) + data_norm = np.matmul(directions[:, np.newaxis], self.data)[:, 0] + stc = self._scalar_class( data_norm, self.vertices, self.tmin, self.tstep, self.subject, self.verbose) + return stc, directions class _BaseVolSourceEstimate(_BaseSourceEstimate): @@ -1822,7 +1907,7 @@ class _BaseVolSourceEstimate(_BaseSourceEstimate): @copy_function_doc_to_method_doc(plot_volume_source_estimates) def plot(self, src, subject=None, subjects_dir=None, mode='stat_map', - bg_img=None, colorbar=True, colormap='auto', clim='auto', + bg_img='T1.mgz', colorbar=True, colormap='auto', clim='auto', transparent='auto', show=True, initial_time=None, initial_pos=None, verbose=None): data = self.magnitude() if self._data_ndim == 3 else self @@ -2725,7 +2810,7 @@ def _get_connectivity_from_edges(edges, n_times, verbose=None): n_vertices = edges.shape[0] logger.info("-- number of connected vertices : %d" % n_vertices) nnz = edges.col.size - aux = n_vertices * np.arange(n_times)[:, None] * np.ones((1, nnz), np.int) + aux = n_vertices * np.tile(np.arange(n_times)[:, None], (1, nnz)) col = (edges.col[None, :] + aux).ravel() row = (edges.row[None, :] + aux).ravel() if n_times > 1: # add temporal edges @@ -2736,7 +2821,7 @@ def _get_connectivity_from_edges(edges, n_times, verbose=None): row = np.concatenate((row, o, d)) col = np.concatenate((col, d, o)) data = np.ones(edges.data.size * n_times + 2 * n_vertices * (n_times - 1), - dtype=np.int) + dtype=np.int64) connectivity = coo_matrix((data, (row, col)), shape=(n_times * n_vertices,) * 2) return connectivity diff --git a/mne/source_space.py b/mne/source_space.py index 82224d9107f..b6922eb61da 100644 --- a/mne/source_space.py +++ b/mne/source_space.py @@ -75,10 +75,8 @@ def _get_lut(fname=None): return np.genfromtxt(fname, dtype=dtype) -def _get_lut_id(lut, label, use_lut): +def _get_lut_id(lut, label): """Convert a label to a LUT ID number.""" - if not use_lut: - return 1 assert isinstance(label, str) mask = (lut['name'] == label) assert mask.sum() == 1 @@ -329,9 +327,15 @@ def export_volume(self, fname, include_surfaces=True, 4x4 transformation matrix (like the ``--trans`` MNE-C option. Must be provided if source spaces are in head coordinates and include_surfaces and mri_resolution are True. - mri_resolution : bool + mri_resolution : bool | str If True, the image is saved in MRI resolution - (e.g. 256 x 256 x 256). + (e.g. 256 x 256 x 256), and each source region (surface or + segmentation volume) filled in completely. If "sparse", only a + single voxel in the high-resolution MRI is filled in for each + source point. + + .. versionchanged:: 0.21.0 + Support for "sparse" was added. use_lut : bool If True, assigns a numeric value to each source space that corresponds to a color on the freesurfer lookup table. @@ -346,6 +350,12 @@ def export_volume(self, fname, include_surfaces=True, This method requires nibabel. """ _check_fname(fname, overwrite) + _validate_type(mri_resolution, (bool, str), 'mri_resolution') + if isinstance(mri_resolution, str): + _check_option('mri_resolution', mri_resolution, ["sparse"], + extra='when mri_resolution is a string') + else: + mri_resolution = bool(mri_resolution) fname = str(fname) # import nibabel or raise error try: @@ -388,170 +398,138 @@ def export_volume(self, fname, include_surfaces=True, lut = _get_lut() # Setup a dictionary of source types - src_types = dict(volume=[], surface=[], discrete=[]) + src_types = dict(volume=[], surface_discrete=[]) # Populate dictionary of source types for src in self: # volume sources if src['type'] == 'vol': src_types['volume'].append(src) - # surface sources - elif src['type'] == 'surf': - src_types['surface'].append(src) - # discrete sources - elif src['type'] == 'discrete': - src_types['discrete'].append(src) - # raise an error if dealing with source type other than volume - # surface or discrete + # surface and discrete sources + elif src['type'] in ('surf', 'discrete'): + src_types['surface_discrete'].append(src) else: raise ValueError('Unrecognized source type: %s.' % src['type']) + # Raise error if there are no volume source spaces + if len(src_types['volume']) == 0: + raise ValueError('Source spaces must contain at least one volume.') + # Get shape, inuse array and interpolation matrix from volume sources - inuse = 0 + src = src_types['volume'][0] + aseg_data = None + if mri_resolution: + # read the mri file used to generate volumes + if mri_resolution is True: + aseg_data = _get_img_fdata(nib.load(src['mri_file'])) + # get the voxel space shape + shape3d = (src['mri_width'], src['mri_depth'], + src['mri_height']) + else: + # get the volume source space shape + # read the shape in reverse order + # (otherwise results are scrambled) + shape3d = src['shape'] + + # calculate affine transform for image (MRI_VOXEL to RAS) + if mri_resolution: + # MRI_VOXEL to MRI transform + transform = src['vox_mri_t'] + else: + # MRI_VOXEL to MRI transform + # NOTE: 'src' indicates downsampled version of MRI_VOXEL + transform = src['src_mri_t'] + + # Figure out how to get from our input source space to output voxels + fro_dst_t = invert_transform(transform) + dest = transform['to'] + if coords == 'head': + head_mri_t = _get_trans(trans, 'head', 'mri')[0] + fro_dst_t = combine_transforms(head_mri_t, fro_dst_t, 'head', dest) + else: + fro_dst_t = fro_dst_t + + # Fill in the volumes + img = np.zeros(shape3d) for ii, vs in enumerate(src_types['volume']): # read the lookup table value for segmented volume if 'seg_name' not in vs: raise ValueError('Volume sources should be segments, ' 'not the entire volume.') # find the color value for this volume - id_ = _get_lut_id(lut, vs['seg_name'], use_lut) - - if ii == 0: - # get the inuse array - if mri_resolution: - # read the mri file used to generate volumes - aseg_data = _get_img_fdata(nib.load(vs['mri_file'])) - # get the voxel space shape - shape3d = (vs['mri_height'], vs['mri_depth'], - vs['mri_width']) - else: - # get the volume source space shape - # read the shape in reverse order - # (otherwise results are scrambled) - shape3d = vs['shape'][2::-1] - if mri_resolution: + use_id = 1. + if mri_resolution is True or use_lut: + id_ = _get_lut_id(lut, vs['seg_name']) + if use_lut: + use_id = id_ + + if mri_resolution == 'sparse': + idx = apply_trans(fro_dst_t, vs['rr'][vs['vertno']]) + idx = tuple(idx.round().astype(int).T) + elif mri_resolution is True: # fill the represented vol # get the values for this volume - use = id_ * (aseg_data == id_).astype(int).ravel('F') + idx = (aseg_data == id_) else: - use = id_ * vs['inuse'] - inuse += use - - # Raise error if there are no volume source spaces - if np.array(inuse).ndim == 0: - raise ValueError('Source spaces must contain at least one volume.') - - # create 3d grid in the MRI_VOXEL coordinate frame - # len of inuse array should match shape regardless of mri_resolution - assert len(inuse) == np.prod(shape3d) - - # setup the image in 3d space - img = inuse.reshape(shape3d).T - - # include surface and/or discrete source spaces - if include_surfaces or include_discrete: - - # setup affine transform for source spaces - if mri_resolution: - # get the MRI to MRI_VOXEL transform - affine = invert_transform(vs['vox_mri_t']) + assert mri_resolution is False + idx = vs['inuse'].reshape(shape3d, order='F').astype(bool) + img[idx] = use_id + + # loop through the surface and discrete source spaces + + # get the surface names (assumes left, right order. may want + # to add these names during source space generation + for src in src_types['surface_discrete']: + val = 1 + if src['type'] == 'surf': + if not include_surfaces: + continue + if use_lut: + surf_name = { + FIFF.FIFFV_MNE_SURF_LEFT_HEMI: 'Left', + FIFF.FIFFV_MNE_SURF_RIGHT_HEMI: 'Right', + }[src['id']] + '-Cerebral-Cortex' + val = _get_lut_id(lut, surf_name) else: - # get the MRI to SOURCE (MRI_VOXEL) transform - affine = invert_transform(vs['src_mri_t']) - - # modify affine if in head coordinates - if coords == 'head': - - # read mri -> head transformation - mri_head_t = _get_trans(trans)[0] - - # get the HEAD to MRI transform - head_mri_t = invert_transform(mri_head_t) - - # combine transforms, from HEAD to MRI_VOXEL - affine = combine_transforms(head_mri_t, affine, - 'head', 'mri_voxel') - - # loop through the surface source spaces - if include_surfaces: - - # get the surface names (assumes left, right order. may want - # to add these names during source space generation - surf_names = ['Left-Cerebral-Cortex', 'Right-Cerebral-Cortex'] - - for i, surf in enumerate(src_types['surface']): - # convert vertex positions from their native space - # (either HEAD or MRI) to MRI_VOXEL space - srf_rr = apply_trans(affine['trans'], surf['rr']) - # convert to numeric indices - ix_orig, iy_orig, iz_orig = srf_rr.T.round().astype(int) - # clip indices outside of volume space - ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1), - 0) - iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1), - 0) - iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1), - 0) - # compare original and clipped indices - n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip, - iz_orig != iz_clip)).any(0).sum() - # generate use warnings for clipping - if n_diff > 0: - warn('%s surface vertices lay outside of volume space.' - ' Consider using a larger volume space.' % n_diff) - # get surface id or use default value - i = _get_lut_id(lut, surf_names[i], use_lut) - # update image to include surface voxels - img[ix_clip, iy_clip, iz_clip] = i - - # loop through discrete source spaces - if include_discrete: - for i, disc in enumerate(src_types['discrete']): - # convert vertex positions from their native space - # (either HEAD or MRI) to MRI_VOXEL space - disc_rr = apply_trans(affine['trans'], disc['rr']) - # convert to numeric indices - ix_orig, iy_orig, iz_orig = disc_rr.T.astype(int) - # clip indices outside of volume space - ix_clip = np.maximum(np.minimum(ix_orig, shape3d[2] - 1), - 0) - iy_clip = np.maximum(np.minimum(iy_orig, shape3d[1] - 1), - 0) - iz_clip = np.maximum(np.minimum(iz_orig, shape3d[0] - 1), - 0) - # compare original and clipped indices - n_diff = np.array((ix_orig != ix_clip, iy_orig != iy_clip, - iz_orig != iz_clip)).any(0).sum() - # generate use warnings for clipping - if n_diff > 0: - warn('%s discrete vertices lay outside of volume ' - 'space. Consider using a larger volume space.' - % n_diff) - # set default value - img[ix_clip, iy_clip, iz_clip] = 1 - if use_lut: - logger.info('Discrete sources do not have values on ' - 'the lookup table. Defaulting to 1.') + assert src['type'] == 'discrete' + if not include_discrete: + continue + if use_lut: + logger.info('Discrete sources do not have values on ' + 'the lookup table. Defaulting to 1.') + # convert vertex positions from their native space + # (either HEAD or MRI) to MRI_VOXEL space + if mri_resolution is True: + use_rr = src['rr'] + else: + assert mri_resolution is False or mri_resolution == 'sparse' + use_rr = src['rr'][src['vertno']] + srf_vox = apply_trans(fro_dst_t['trans'], use_rr) + # convert to numeric indices + ix_, iy_, iz_ = srf_vox.T.round().astype(int) + # clip indices outside of volume space + ix = np.clip(ix_, 0, shape3d[0] - 1), + iy = np.clip(iy_, 0, shape3d[1] - 1) + iz = np.clip(iz_, 0, shape3d[2] - 1) + # compare original and clipped indices + n_diff = ((ix_ != ix) | (iy_ != iy) | (iz_ != iz)).sum() + # generate use warnings for clipping + if n_diff > 0: + warn(f'{n_diff} {src["kind"]} vertices lay outside of volume ' + f'space. Consider using a larger volume space.') + # get surface id or use default value + # update image to include surface voxels + img[ix, iy, iz] = val - # calculate affine transform for image (MRI_VOXEL to RAS) - if mri_resolution: - # MRI_VOXEL to MRI transform - transform = vs['vox_mri_t'] - else: - # MRI_VOXEL to MRI transform - # NOTE: 'src' indicates downsampled version of MRI_VOXEL - transform = vs['src_mri_t'] if dest == 'mri': # combine with MRI to RAS transform - transform = combine_transforms(transform, vs['mri_ras_t'], - transform['from'], - vs['mri_ras_t']['to']) + transform = combine_transforms( + transform, vs['mri_ras_t'], + transform['from'], vs['mri_ras_t']['to']) # now setup the affine for volume image affine = transform['trans'].copy() # make sure affine converts from m to mm affine[:3] *= 1e3 - # save volume data - # setup image for file if fname.endswith(('.nii', '.nii.gz')): # save as nifit # setup the nifti header @@ -825,7 +803,7 @@ def _read_one_source_space(fid, this): if tag is None: raise ValueError('Vertex data not found') - res['rr'] = tag.data.astype(np.float) # double precision for mayavi + res['rr'] = tag.data.astype(np.float64) # double precision for mayavi if res['rr'].shape[0] != res['np']: raise ValueError('Vertex information is incorrect') @@ -857,7 +835,7 @@ def _read_one_source_space(fid, this): tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NUSE) if tag is None: res['nuse'] = 0 - res['inuse'] = np.zeros(res['nuse'], dtype=np.int) + res['inuse'] = np.zeros(res['nuse'], dtype=np.int64) res['vertno'] = None else: res['nuse'] = int(tag.data) @@ -865,7 +843,7 @@ def _read_one_source_space(fid, this): if tag is None: raise ValueError('Source selection information missing') - res['inuse'] = tag.data.astype(np.int).T + res['inuse'] = tag.data.astype(np.int64).T if len(res['inuse']) != res['np']: raise ValueError('Incorrect number of entries in source space ' 'selection') diff --git a/mne/stats/cluster_level.py b/mne/stats/cluster_level.py index f6286cfa7f2..727e9460229 100644 --- a/mne/stats/cluster_level.py +++ b/mne/stats/cluster_level.py @@ -119,7 +119,7 @@ def _get_clusters_spatial(s, neighbors): # s is a vector of spatial indices that are significant, like: # s = np.where(x_in)[0] # for x_in representing a single time-instant - r = np.ones(s.shape, np.bool) + r = np.ones(s.shape, bool) clusters = list() next_ind = 0 if s.size > 0 else -1 while next_ind >= 0: diff --git a/mne/surface.py b/mne/surface.py index b50a6612795..dbea0c7709a 100644 --- a/mne/surface.py +++ b/mne/surface.py @@ -617,7 +617,7 @@ def _fread3(fobj): def _fread3_many(fobj, n): """Read 3-byte ints from an open binary file object.""" b1, b2, b3 = np.fromfile(fobj, ">u1", - 3 * n).reshape(-1, 3).astype(np.int).T + 3 * n).reshape(-1, 3).astype(np.int64).T return (b1 << 16) + (b2 << 8) + b3 @@ -626,16 +626,15 @@ def read_curvature(filepath, binary=True): Parameters ---------- - filepath: str + filepath : str Input path to the .curv file. - binary: bool + binary : bool Specify if the output array is to hold binary values. Defaults to True. Returns ------- - curv: array, shape=(n_vertices,) + curv : array, shape=(n_vertices,) The curvature values loaded from the user given file. - """ with open(filepath, "rb") as fobj: magic = _fread3(fobj) @@ -647,13 +646,14 @@ def read_curvature(filepath, binary=True): _fread3(fobj) curv = np.fromfile(fobj, ">i2", vnum) / 100 if binary: - return 1 - np.array(curv != 0, np.int) + return 1 - np.array(curv != 0, np.int64) else: return curv @verbose -def read_surface(fname, read_metadata=False, return_dict=False, verbose=None): +def read_surface(fname, read_metadata=False, return_dict=False, + file_format='auto', verbose=None): """Load a Freesurfer surface mesh in triangular format. Parameters @@ -661,7 +661,9 @@ def read_surface(fname, read_metadata=False, return_dict=False, verbose=None): fname : str The name of the file containing the surface. read_metadata : bool - Read metadata as key-value pairs. + Read metadata as key-value pairs. Only works when reading a FreeSurfer + surface file. For .obj files this dictionary will be empty. + Valid keys: * 'head' : array of int @@ -678,6 +680,13 @@ def read_surface(fname, read_metadata=False, return_dict=False, verbose=None): return_dict : bool If True, a dictionary with surface parameters is returned. + file_format : 'auto' | 'freesurfer' | 'obj' + File format to use. Can be 'freesurfer' to read a FreeSurfer surface + file, or 'obj' to read a Wavefront .obj file (common format for + importing in other software), or 'auto' to attempt to infer from the + file name. Defaults to 'auto'. + + .. versionadded:: 0.21.0 %(verbose)s Returns @@ -698,13 +707,63 @@ def read_surface(fname, read_metadata=False, return_dict=False, verbose=None): read_tri """ fname = _check_fname(fname, 'read', True) - ret = _get_read_geometry()(fname, read_metadata=read_metadata) + _check_option('file_format', file_format, ['auto', 'freesurfer', 'obj']) + + if file_format == 'auto': + _, ext = op.splitext(fname) + if ext.lower() == '.obj': + file_format = 'obj' + else: + file_format = 'freesurfer' + + if file_format == 'freesurfer': + ret = _get_read_geometry()(fname, read_metadata=read_metadata) + elif file_format == 'obj': + ret = _read_wavefront_obj(fname) + if read_metadata: + ret += (dict(),) + if return_dict: ret += (dict(rr=ret[0], tris=ret[1], ntri=len(ret[1]), use_tris=ret[1], np=len(ret[0])),) return ret +def _read_wavefront_obj(fname): + """Read a surface form a Wavefront .obj file. + + Parameters + ---------- + fname : str + Name of the .obj file to read. + + Returns + ------- + coords : ndarray, shape (n_points, 3) + The XYZ coordinates of each vertex. + faces : ndarray, shape (n_faces, 3) + For each face of the mesh, the integer indices of the vertices that + make up the face. + """ + coords = [] + faces = [] + with open(fname) as f: + for line in f: + line = line.strip() + if len(line) == 0 or line[0] == "#": + continue + split = line.split() + if split[0] == "v": # vertex + coords.append([float(item) for item in split[1:]]) + elif split[0] == "f": # face + dat = [int(item.split("/")[0]) for item in split[1:]] + if len(dat) != 3: + raise RuntimeError('Only triangle faces allowed.') + # In .obj files, indexing starts at 1 + faces.append([d - 1 for d in dat]) + return np.array(coords), np.array(faces) + + ############################################################################## # SURFACE CREATION @@ -918,7 +977,7 @@ def _decimate_surface_spacing(surf, spacing): def write_surface(fname, coords, faces, create_stamp='', volume_info=None, - overwrite=False): + file_format='auto', overwrite=False): """Write a triangular Freesurfer surface mesh. Accepts the same data format as is returned by read_surface(). @@ -950,6 +1009,13 @@ def write_surface(fname, coords, faces, create_stamp='', volume_info=None, * 'cras' : array of float, shape (3,) .. versionadded:: 0.13.0 + file_format : 'auto' | 'freesurfer' | 'obj' + File format to use. Can be 'freesurfer' to write a FreeSurfer surface + file, or 'obj' to write a Wavefront .obj file (common format for + importing in other software), or 'auto' to attempt to infer from the + file name. Defaults to 'auto'. + + .. versionadded:: 0.21.0 overwrite : bool If True, overwrite the file if it exists. @@ -959,33 +1025,52 @@ def write_surface(fname, coords, faces, create_stamp='', volume_info=None, read_tri """ fname = _check_fname(fname, overwrite=overwrite) - try: - import nibabel as nib - has_nibabel = True - except ImportError: - has_nibabel = False - if has_nibabel and LooseVersion(nib.__version__) > LooseVersion('2.1.0'): - nib.freesurfer.io.write_geometry(fname, coords, faces, - create_stamp=create_stamp, - volume_info=volume_info) - return - if len(create_stamp.splitlines()) > 1: - raise ValueError("create_stamp can only contain one line") - - with open(fname, 'wb') as fid: - fid.write(pack('>3B', 255, 255, 254)) - strs = ['%s\n' % create_stamp, '\n'] - strs = [s.encode('utf-8') for s in strs] - fid.writelines(strs) - vnum = len(coords) - fnum = len(faces) - fid.write(pack('>2i', vnum, fnum)) - fid.write(np.array(coords, dtype='>f4').tobytes()) - fid.write(np.array(faces, dtype='>i4').tobytes()) - - # Add volume info, if given - if volume_info is not None and len(volume_info) > 0: - fid.write(_serialize_volume_info(volume_info)) + _check_option('file_format', file_format, ['auto', 'freesurfer', 'obj']) + + if file_format == 'auto': + _, ext = op.splitext(fname) + if ext.lower() == '.obj': + file_format = 'obj' + else: + file_format = 'freesurfer' + + if file_format == 'freesurfer': + try: + import nibabel as nib + has_nibabel = LooseVersion(nib.__version__) > LooseVersion('2.1.0') + except ImportError: + has_nibabel = False + if has_nibabel: + nib.freesurfer.io.write_geometry(fname, coords, faces, + create_stamp=create_stamp, + volume_info=volume_info) + return + if len(create_stamp.splitlines()) > 1: + raise ValueError("create_stamp can only contain one line") + + with open(fname, 'wb') as fid: + fid.write(pack('>3B', 255, 255, 254)) + strs = ['%s\n' % create_stamp, '\n'] + strs = [s.encode('utf-8') for s in strs] + fid.writelines(strs) + vnum = len(coords) + fnum = len(faces) + fid.write(pack('>2i', vnum, fnum)) + fid.write(np.array(coords, dtype='>f4').tobytes()) + fid.write(np.array(faces, dtype='>i4').tobytes()) + + # Add volume info, if given + if volume_info is not None and len(volume_info) > 0: + fid.write(_serialize_volume_info(volume_info)) + + elif file_format == 'obj': + with open(fname, 'w') as fid: + for line in create_stamp.splitlines(): + fid.write(f'# {line}\n') + for v in coords: + fid.write(f'v {v[0]} {v[1]} {v[2]}\n') + for f in faces: + fid.write(f'f {f[0] + 1} {f[1] + 1} {f[2] + 1}\n') ############################################################################### diff --git a/mne/tests/test_bem.py b/mne/tests/test_bem.py index ab79f9b9b0a..146dec476a6 100644 --- a/mne/tests/test_bem.py +++ b/mne/tests/test_bem.py @@ -245,28 +245,34 @@ def test_fit_sphere_to_headshape(): # Top of the head (extra point) {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EXTRA, + 'ident': 0, 'r': np.array([0.0, 0.0, 1.0])}, # EEG points # Fz {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EEG, + 'ident': 0, 'r': np.array([0, .72, .69])}, # F3 {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EEG, + 'ident': 1, 'r': np.array([-.55, .67, .50])}, # F4 {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EEG, + 'ident': 2, 'r': np.array([.55, .67, .50])}, # Cz {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EEG, + 'ident': 3, 'r': np.array([0.0, 0.0, 1.0])}, # Pz {'coord_frame': FIFF.FIFFV_COORD_HEAD, 'kind': FIFF.FIFFV_POINT_EEG, + 'ident': 4, 'r': np.array([0, -.72, .69])}, ] for d in dig: diff --git a/mne/tests/test_chpi.py b/mne/tests/test_chpi.py index bf6aa2b84f2..4a8ff728ec8 100644 --- a/mne/tests/test_chpi.py +++ b/mne/tests/test_chpi.py @@ -270,6 +270,7 @@ def test_calculate_chpi_positions_vv(): _calculate_chpi_positions(raw) +@pytest.mark.slowtest def test_calculate_chpi_positions_artemis(): """Test on 5k artemis data.""" raw = read_raw_artemis123(art_fname, preload=True) diff --git a/mne/tests/test_cov.py b/mne/tests/test_cov.py index 6ba1fd7c11c..8232f43be58 100644 --- a/mne/tests/test_cov.py +++ b/mne/tests/test_cov.py @@ -147,7 +147,7 @@ def test_cov_order(): whitener_2, w_ch_names_2, n_nzero_2 = compute_whitener( cov_reorder, info, return_rank=True) assert_array_equal(w_ch_names_2, w_ch_names) - assert_allclose(whitener_2, whitener) + assert_allclose(whitener_2, whitener, rtol=1e-6) assert n_nzero == n_nzero_2 # with pca assert n_nzero < whitener.shape[0] @@ -160,7 +160,7 @@ def test_cov_order(): evoked = read_evokeds(ave_fname)[0] evoked_white = whiten_evoked(evoked, cov) evoked_white_2 = whiten_evoked(evoked, cov_reorder) - assert_allclose(evoked_white_2.data, evoked_white.data) + assert_allclose(evoked_white_2.data, evoked_white.data, atol=1e-7) def _assert_reorder(cov_new, cov_orig, order): diff --git a/mne/tests/test_docstring_parameters.py b/mne/tests/test_docstring_parameters.py index edd466342da..4aee7b43610 100644 --- a/mne/tests/test_docstring_parameters.py +++ b/mne/tests/test_docstring_parameters.py @@ -36,6 +36,7 @@ 'mne.simulation', 'mne.source_estimate', 'mne.source_space', + 'mne.surface', 'mne.stats', 'mne.time_frequency', 'mne.time_frequency.tfr', diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 19c30b93e90..f1e5163907e 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -1174,8 +1174,9 @@ def test_evoked_io_from_epochs(tmpdir): picks=picks, decim=5) evoked = epochs.average() evoked.info['proj_name'] = '' # Test that empty string shortcuts to None. - evoked.save(op.join(tempdir, 'evoked-ave.fif')) - evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0] + fname_temp = op.join(tempdir, 'evoked-ave.fif') + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] assert_equal(evoked2.info['proj_name'], None) assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, @@ -1185,8 +1186,8 @@ def test_evoked_io_from_epochs(tmpdir): epochs = Epochs(raw, events[:4], event_id, 0.1, tmax, picks=picks, baseline=(0.1, 0.2), decim=5) evoked = epochs.average() - evoked.save(op.join(tempdir, 'evoked-ave.fif')) - evoked2 = read_evokeds(op.join(tempdir, 'evoked-ave.fif'))[0] + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) @@ -1198,6 +1199,25 @@ def test_evoked_io_from_epochs(tmpdir): assert_allclose(evoked.data, evoked2.data, rtol=1e-4, atol=1e-20) assert_allclose(evoked.times, evoked2.times, rtol=1e-4, atol=1e-20) + # should work when one channel type is changed to a non-data ch + picks = pick_types(raw.info, meg=True, eeg=True) + epochs = Epochs(raw, events[:4], event_id, -0.2, tmax, + picks=picks, baseline=(0.1, 0.2), decim=5) + with pytest.warns(RuntimeWarning, match='unit for.*changed from'): + epochs.set_channel_types({epochs.ch_names[0]: 'syst'}) + evokeds = list() + for picks in (None, 'all'): + evoked = epochs.average(picks) + evokeds.append(evoked) + evoked.save(fname_temp) + evoked2 = read_evokeds(fname_temp)[0] + start = 1 if picks is None else 0 + for ev in (evoked, evoked2): + assert ev.ch_names == epochs.ch_names[start:] + assert_allclose(ev.data, epochs.get_data().mean(0)[start:]) + with pytest.raises(ValueError, match='.*nchan.* must match'): + write_evokeds(fname_temp, evokeds) + def test_evoked_standard_error(tmpdir): """Test calculation and read/write of standard error.""" diff --git a/mne/tests/test_morph.py b/mne/tests/test_morph.py index 0497d2f781b..6ba0785746f 100644 --- a/mne/tests/test_morph.py +++ b/mne/tests/test_morph.py @@ -23,7 +23,7 @@ from mne.minimum_norm import (apply_inverse, read_inverse_operator, make_inverse_operator) from mne.source_space import (get_volume_labels_from_aseg, _get_mri_info_data, - _get_atlas_values) + _get_atlas_values, _add_interpolator) from mne.utils import (run_tests_if_main, requires_nibabel, check_version, requires_dipy, requires_h5py) from mne.fixes import _get_args @@ -39,16 +39,19 @@ 'sample_audvis_trunc-meg-vol-7-meg-inv.fif') fname_fwd_vol = op.join(sample_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif') -fname_vol = op.join(sample_dir, - 'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w') +fname_vol_w = op.join(sample_dir, + 'sample_audvis_trunc-grad-vol-7-fwd-sensmap-vol.w') fname_inv_surf = op.join(sample_dir, 'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif') fname_fmorph = op.join(data_path, 'MEG', 'sample', 'fsaverage_audvis_trunc-meg') fname_smorph = op.join(sample_dir, 'sample_audvis_trunc-meg') fname_t1 = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz') +fname_vol = op.join(subjects_dir, 'sample', 'bem', 'sample-volume-7mm-src.fif') fname_brain = op.join(subjects_dir, 'sample', 'mri', 'brain.mgz') fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') +fname_fs_vol = op.join(subjects_dir, 'fsaverage', 'bem', + 'fsaverage-vol7-nointerp-src.fif.gz') fname_aseg_fs = op.join(subjects_dir, 'fsaverage', 'mri', 'aseg.mgz') fname_stc = op.join(sample_dir, 'fsaverage_audvis_trunc-meg') @@ -196,6 +199,16 @@ def test_surface_source_morph_round_trip(smooth, lower, upper, n_warn): stc_back = morph_back.apply(stc_fs) corr = np.corrcoef(stc.data.ravel(), stc_back.data.ravel())[0, 1] assert lower <= corr <= upper + # check the round-trip power + assert_power_preserved(stc, stc_back) + + +def assert_power_preserved(orig, new, limits=(1., 1.05)): + """Assert that the power is preserved during a round-trip morph.""" + __tracebackhide__ = True + power_ratio = np.linalg.norm(orig.data) / np.linalg.norm(new.data) + min_, max_ = limits + assert min_ < power_ratio < max_, 'Power ratio' @requires_h5py @@ -245,7 +258,7 @@ def test_surface_vector_source_morph(tmpdir): assert isinstance(source_morph_surf.apply(stc_surf), SourceEstimate) # degenerate - stc_vol = read_source_estimate(fname_vol, 'sample') + stc_vol = read_source_estimate(fname_vol_w, 'sample') with pytest.raises(TypeError, match='stc_from must be an instance'): source_morph_surf.apply(stc_vol) @@ -259,7 +272,7 @@ def test_volume_source_morph(tmpdir): """Test volume source estimate morph, special cases and exceptions.""" import nibabel as nib inverse_operator_vol = read_inverse_operator(fname_inv_vol) - stc_vol = read_source_estimate(fname_vol, 'sample') + stc_vol = read_source_estimate(fname_vol_w, 'sample') # check for invalid input type with pytest.raises(TypeError, match='src must be'): @@ -284,7 +297,7 @@ def test_volume_source_morph(tmpdir): with pytest.raises(ValueError, match='Only surface.*sparse morph'): compute_source_morph(src=src, sparse=True, subjects_dir=subjects_dir) - # terrible quality buts fast + # terrible quality but fast zooms = 20 kwargs = dict(zooms=zooms, niter_sdr=(1,), niter_affine=(1,)) source_morph_vol = compute_source_morph( @@ -433,6 +446,87 @@ def test_volume_source_morph(tmpdir): source_morph_vol.apply(stc_vol_bad) +@requires_h5py +@requires_nibabel() +@requires_dipy() +@pytest.mark.slowtest +@testing.requires_testing_data +@pytest.mark.parametrize('subject_from, subject_to, lower, upper', [ + ('sample', 'fsaverage', 8.5, 9), + ('fsaverage', 'fsaverage', 7, 7.5), + ('sample', 'sample', 6, 7), +]) +def test_volume_source_morph_round_trip( + tmpdir, subject_from, subject_to, lower, upper): + """Test volume source estimate morph round-trips well.""" + import nibabel as nib + from nibabel.processing import resample_from_to + src = dict() + if 'sample' in (subject_from, subject_to): + src['sample'] = mne.read_source_spaces(fname_vol) + src['sample'][0]['subject_his_id'] = 'sample' + assert src['sample'][0]['nuse'] == 4157 + if 'fsaverage' in (subject_from, subject_to): + # Created to save space with: + # + # bem = op.join(op.dirname(mne.__file__), 'data', 'fsaverage', + # 'fsaverage-inner_skull-bem.fif') + # src_fsaverage = mne.setup_volume_source_space( + # 'fsaverage', pos=7., bem=bem, mindist=0, + # subjects_dir=subjects_dir, add_interpolator=False) + # mne.write_source_spaces(fname_fs_vol, src_fsaverage, overwrite=True) + # + # For speed we do it without the interpolator because it's huge. + src['fsaverage'] = mne.read_source_spaces(fname_fs_vol) + src['fsaverage'][0].update( + vol_dims=np.array([23, 29, 25]), seg_name='brain') + _add_interpolator(src['fsaverage'], True) + assert src['fsaverage'][0]['nuse'] == 6379 + src_to, src_from = src[subject_to], src[subject_from] + del src + # No SDR just for speed once everything works + kwargs = dict(niter_sdr=(), niter_affine=(1,), + subjects_dir=subjects_dir, verbose=True) + morph_from_to = compute_source_morph( + src=src_from, src_to=src_to, subject_to=subject_to, **kwargs) + morph_to_from = compute_source_morph( + src=src_to, src_to=src_from, subject_to=subject_from, **kwargs) + use = np.linspace(0, src_from[0]['nuse'] - 1, 10).round().astype(int) + stc_from = VolSourceEstimate( + np.eye(src_from[0]['nuse'])[:, use], [src_from[0]['vertno']], 0, 1) + stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from)) + maxs = np.argmax(stc_from_rt.data, axis=0) + src_rr = src_from[0]['rr'][src_from[0]['vertno']] + dists = 1000 * np.linalg.norm(src_rr[use] - src_rr[maxs], axis=1) + mu = np.mean(dists) + assert lower <= mu < upper # fsaverage=7.97; 25.4 without src_ras_t fix + # check that pre_affine is close to identity when subject_to==subject_from + if subject_to == subject_from: + for morph in (morph_to_from, morph_from_to): + assert_allclose( + morph.pre_affine.affine, np.eye(4), atol=1e-2) + # check that power is more or less preserved + ratio = stc_from.data.size / stc_from_rt.data.size + limits = ratio * np.array([1, 1.2]) + stc_from.crop(0, 0)._data.fill(1.) + stc_from_rt = morph_to_from.apply(morph_from_to.apply(stc_from)) + assert_power_preserved(stc_from, stc_from_rt, limits=limits) + # before and after morph, check the proportion of vertices + # that are inside and outside the brainmask.mgz + brain = nib.load(op.join(subjects_dir, subject_from, 'mri', 'brain.mgz')) + mask = _get_img_fdata(brain) > 0 + if subject_from == subject_to == 'sample': + for stc in [stc_from, stc_from_rt]: + img = stc.as_volume(src_from, mri_resolution=True) + img = nib.Nifti1Image(_get_img_fdata(img)[:, :, :, 0], img.affine) + img = _get_img_fdata(resample_from_to(img, brain, order=1)) + assert img.shape == mask.shape + in_ = img[mask].astype(bool).mean() + out = img[~mask].astype(bool).mean() + assert 0.97 < in_ < 0.98 + assert out < 0.02 + + @pytest.mark.slowtest @testing.requires_testing_data def test_morph_stc_dense(): diff --git a/mne/tests/test_proj.py b/mne/tests/test_proj.py index 837c0938092..3b5790aea9d 100644 --- a/mne/tests/test_proj.py +++ b/mne/tests/test_proj.py @@ -175,7 +175,7 @@ def test_compute_proj_epochs(): p2_data = p2['data']['data'] * np.sign(p2['data']['data'][0, 0]) if bad_ch in p1['data']['col_names']: bad = p1['data']['col_names'].index('MEG 2443') - mask = np.ones(p1_data.size, dtype=np.bool) + mask = np.ones(p1_data.size, dtype=bool) mask[bad] = False p1_data = p1_data[:, mask] p2_data = p2_data[:, mask] diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index d78c759e9c9..0ff1ece8060 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -11,6 +11,7 @@ assert_allclose, assert_equal) import pytest from scipy import sparse +from scipy.optimize import fmin_cobyla from mne import (stats, SourceEstimate, VectorSourceEstimate, VolSourceEstimate, Label, read_source_spaces, @@ -988,7 +989,7 @@ def test_spatio_temporal_src_connectivity(): def test_to_data_frame(): """Test stc Pandas exporter.""" n_vert, n_times = 10, 5 - vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)] + vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc_surf = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') @@ -1010,7 +1011,7 @@ def test_to_data_frame(): def test_to_data_frame_index(index): """Test index creation in stc Pandas exporter.""" n_vert, n_times = 10, 5 - vertices = [np.arange(n_vert, dtype=np.int), np.empty(0, dtype=np.int)] + vertices = [np.arange(n_vert, dtype=np.int64), np.empty(0, dtype=np.int64)] data = rng.randn(n_vert, n_times) stc = SourceEstimate(data, vertices=vertices, tmin=0, tstep=1, subject='sample') @@ -1119,23 +1120,32 @@ def test_mixed_stc(tmpdir): (VolVectorSourceEstimate, 'discrete'), (MixedVectorSourceEstimate, 'mixed'), ]) -def test_vec_stc_basic(tmpdir, klass, kind): +@pytest.mark.parametrize('dtype', [ + np.float32, np.float64, np.complex64, np.complex128]) +def test_vec_stc_basic(tmpdir, klass, kind, dtype): """Test (vol)vector source estimate.""" nn = np.array([ [1, 0, 0], [0, 1, 0], - [0, 0, 1], + [np.sqrt(1. / 2.), 0, np.sqrt(1. / 2.)], [np.sqrt(1 / 3.)] * 3 - ]) + ], np.float32) data = np.array([ [1, 0, 0], [0, 2, 0], - [3, 0, 0], + [-3, 0, 0], [1, 1, 1], - ])[:, :, np.newaxis] - magnitudes = np.linalg.norm(data, axis=1)[:, 0] - normals = np.array([1, 2, 0, np.sqrt(3)]) + ], dtype)[:, :, np.newaxis] + amplitudes = np.array([1, 2, 3, np.sqrt(3)], dtype) + magnitudes = amplitudes.copy() + normals = np.array([1, 2, -3. / np.sqrt(2), np.sqrt(3)], dtype) + if dtype in (np.complex64, np.complex128): + data *= 1j + amplitudes *= 1j + normals *= 1j + directions = np.array( + [[1, 0, 0], [0, 1, 0], [-1, 0, 0], [1. / np.sqrt(3)] * 3]) vol_kind = kind if kind in ('discrete', 'vol') else 'vol' vol_src = SourceSpaces([dict(nn=nn, type=vol_kind)]) assert vol_src.kind == dict(vol='volume').get(vol_kind, vol_kind) @@ -1155,20 +1165,35 @@ def test_vec_stc_basic(tmpdir, klass, kind): verts = surf_verts + vol_verts assert src.kind == 'mixed' data = np.tile(data, (2, 1, 1)) + amplitudes = np.tile(amplitudes, 2) magnitudes = np.tile(magnitudes, 2) normals = np.tile(normals, 2) + directions = np.tile(directions, (2, 1)) stc = klass(data, verts, 0, 1, 'foo') + amplitudes = amplitudes[:, np.newaxis] + magnitudes = magnitudes[:, np.newaxis] # Magnitude of the vectors - assert_array_equal(stc.magnitude().data[:, 0], magnitudes) + assert_array_equal(stc.magnitude().data, magnitudes) # Vector components projected onto the vertex normals if kind in ('vol', 'mixed'): with pytest.raises(RuntimeError, match='surface or discrete'): - stc.normal(src) + stc.project('normal', src)[0] else: - normal = stc.normal(src) - assert_array_equal(normal.data[:, 0], normals) + normal = stc.project('normal', src)[0] + assert_allclose(normal.data[:, 0], normals) + + # Maximal-variance component, either to keep amps pos or to align to src-nn + projected, got_directions = stc.project('pca') + assert_allclose(got_directions, directions) + assert_allclose(projected.data, amplitudes) + projected, got_directions = stc.project('pca', src) + flips = np.array([[1], [1], [-1.], [1]]) + if klass is MixedVectorSourceEstimate: + flips = np.tile(flips, (2, 1)) + assert_allclose(got_directions, directions * flips) + assert_allclose(projected.data, amplitudes * flips) out_name = tmpdir.join('temp.h5') stc.save(out_name) @@ -1188,6 +1213,37 @@ def test_vec_stc_basic(tmpdir, klass, kind): klass(data, verts, 0, 1) +@pytest.mark.parametrize('real', (True, False)) +def test_source_estime_project(real): + """Test projecting a source estimate onto direction of max power.""" + n_src, n_times = 4, 100 + rng = np.random.RandomState(0) + data = rng.randn(n_src, 3, n_times) + if not real: + data = data + 1j * rng.randn(n_src, 3, n_times) + assert data.dtype == np.complex128 + else: + assert data.dtype == np.float64 + + # Make sure that the normal we get maximizes the power + # (i.e., minimizes the negative power) + want_nn = np.empty((n_src, 3)) + for ii in range(n_src): + x0 = np.ones(3) + + def objective(x): + x = x / np.linalg.norm(x) + return -np.linalg.norm(np.dot(x, data[ii])) + want_nn[ii] = fmin_cobyla(objective, x0, (), rhobeg=0.1, rhoend=1e-6) + want_nn /= np.linalg.norm(want_nn, axis=1, keepdims=True) + + stc = VolVectorSourceEstimate(data, [np.arange(n_src)], 0, 1) + stc_max, directions = stc.project('pca') + flips = np.sign(np.sum(directions * want_nn, axis=1, keepdims=True)) + directions *= flips + assert_allclose(directions, want_nn, atol=1e-6) + + @pytest.fixture(scope='module', params=[testing._pytest_param()]) def invs(): """Inverses of various amounts of loose.""" @@ -1251,7 +1307,8 @@ def test_vec_stc_inv_fixed(invs, pick_ori): evoked, _, _, _, fixed, fixedish = invs stc_fixed = apply_inverse(evoked, fixed) stc_fixed_vector = apply_inverse(evoked, fixed, pick_ori='vector') - assert_allclose(stc_fixed.data, stc_fixed_vector.normal(fixed['src']).data) + assert_allclose(stc_fixed.data, + stc_fixed_vector.project('normal', fixed['src'])[0].data) stc_fixedish = apply_inverse(evoked, fixedish, pick_ori=pick_ori) if pick_ori == 'vector': assert_allclose(stc_fixed_vector.data, stc_fixedish.data, atol=1e-2) @@ -1259,7 +1316,7 @@ def test_vec_stc_inv_fixed(invs, pick_ori): assert_allclose( abs(stc_fixed).data, stc_fixedish.magnitude().data, atol=1e-2) # ... and when picking the normal (signed) - stc_fixedish = stc_fixedish.normal(fixedish['src']) + stc_fixedish = stc_fixedish.project('normal', fixedish['src'])[0] elif pick_ori is None: stc_fixed = abs(stc_fixed) else: diff --git a/mne/tests/test_source_space.py b/mne/tests/test_source_space.py index 720514cb5ce..eafb56e5bd8 100644 --- a/mne/tests/test_source_space.py +++ b/mne/tests/test_source_space.py @@ -19,6 +19,7 @@ morph_source_spaces, SourceEstimate, make_sphere_model, head_to_mni, read_trans, compute_source_morph, read_bem_solution, read_freesurfer_lut) +from mne.fixes import _get_img_fdata from mne.utils import (requires_nibabel, run_subprocess, modified_env, requires_mne, run_tests_if_main, check_version) @@ -682,13 +683,15 @@ def test_read_volume_from_src(): @requires_nibabel() def test_combine_source_spaces(tmpdir): """Test combining source spaces.""" - rng = np.random.RandomState(0) + import nibabel as nib + rng = np.random.RandomState(2) aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz') - label_names = get_volume_labels_from_aseg(aseg_fname) - volume_labels = rng.choice(label_names, 2) + volume_labels = ['Brain-Stem', 'Right-Hippocampus'] # two fairly large - # get a surface source space (no need to test creation here) - srf = read_source_spaces(fname, patch_stats=False) + # create a sparse surface source space to ensure all get mapped + # when mri_resolution=False + srf = setup_source_space('sample', 'oct3', add_dist=False, + subjects_dir=subjects_dir) # setup 2 volume source spaces vol = setup_volume_source_space('sample', subjects_dir=subjects_dir, @@ -696,7 +699,7 @@ def test_combine_source_spaces(tmpdir): mri=aseg_fname, add_interpolator=False) # setup a discrete source space - rr = rng.randint(0, 20, (100, 3)) * 1e-3 + rr = rng.randint(0, 11, (20, 3)) * 5e-3 nn = np.zeros(rr.shape) nn[:, -1] = 1 pos = {'rr': rr, 'nn': nn} @@ -754,6 +757,22 @@ def test_combine_source_spaces(tmpdir): pytest.raises(ValueError, src_mixed_coord.export_volume, image_fname, verbose='error') + # now actually write it + fname_img = tmpdir.join('img.nii') + for mri_resolution in (False, 'sparse', True): + for src, up in ((vol, 705), + (srf + vol, 27272), + (disc + vol, 705)): + src.export_volume( + fname_img, use_lut=False, + mri_resolution=mri_resolution, overwrite=True) + img_data = _get_img_fdata(nib.load(str(fname_img))) + n_src = img_data.astype(bool).sum() + n_want = sum(s['nuse'] for s in src) + if mri_resolution is True: + n_want += up + assert n_src == n_want, src + @testing.requires_testing_data def test_morph_source_spaces(): diff --git a/mne/tests/test_surface.py b/mne/tests/test_surface.py index 2f8d81622c9..fa25c98b033 100644 --- a/mne/tests/test_surface.py +++ b/mne/tests/test_surface.py @@ -90,7 +90,7 @@ def test_compute_nearest(): """Test nearest neighbor searches.""" x = rng.randn(500, 3) x /= np.sqrt(np.sum(x ** 2, axis=1))[:, None] - nn_true = rng.permutation(np.arange(500, dtype=np.int))[:20] + nn_true = rng.permutation(np.arange(500, dtype=np.int64))[:20] y = x[nn_true] nn1 = _compute_nearest(x, y, method='BallTree') @@ -159,8 +159,8 @@ def test_io_surface(): tempdir = _TempDir() fname_quad = op.join(data_path, 'subjects', 'bert', 'surf', 'lh.inflated.nofix') - fname_tri = op.join(data_path, 'subjects', 'fsaverage', 'surf', - 'lh.inflated') + fname_tri = op.join(data_path, 'subjects', 'sample', 'bem', + 'inner_skull.surf') for fname in (fname_quad, fname_tri): with pytest.warns(None): # no volume info pts, tri, vol_info = read_surface(fname, read_metadata=True) @@ -172,6 +172,16 @@ def test_io_surface(): assert_array_equal(pts, c_pts) assert_array_equal(tri, c_tri) assert_equal(object_diff(vol_info, c_vol_info), '') + if fname != fname_tri: # don't bother testing wavefront for the bigger + continue + + # Test writing/reading a Wavefront .obj file + write_surface(op.join(tempdir, 'tmp.obj'), pts, tri, volume_info=None, + overwrite=True) + c_pts, c_tri = read_surface(op.join(tempdir, 'tmp.obj'), + read_metadata=False) + assert_array_equal(pts, c_pts) + assert_array_equal(tri, c_tri) @testing.requires_testing_data diff --git a/mne/time_frequency/_stft.py b/mne/time_frequency/_stft.py index 2b7f540a578..a414afae6a3 100644 --- a/mne/time_frequency/_stft.py +++ b/mne/time_frequency/_stft.py @@ -65,7 +65,7 @@ def stft(x, wsize, tstep=None, verbose=None): logger.info("Number of frequencies: %d" % n_freq) logger.info("Number of time steps: %d" % n_step) - X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex) + X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex128) if n_signals == 0: return X @@ -143,7 +143,7 @@ def istft(X, tstep=None, Tx=None): T = n_step * tstep - x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float) + x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float64) if n_signals == 0: return x[:, :Tx] @@ -153,7 +153,7 @@ def istft(X, tstep=None, Tx=None): # win = win / norm(win); # Pre-processing for edges - swin = np.zeros(T + wsize - tstep, dtype=np.float) + swin = np.zeros(T + wsize - tstep, dtype=np.float64) for t in range(n_step): swin[t * tstep:t * tstep + wsize] += win ** 2 swin = np.sqrt(swin / wsize) diff --git a/mne/time_frequency/_stockwell.py b/mne/time_frequency/_stockwell.py index 3d1c7b852d6..6777c1ded22 100644 --- a/mne/time_frequency/_stockwell.py +++ b/mne/time_frequency/_stockwell.py @@ -47,7 +47,7 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): k = width # 1 for classical stowckwell transform f_range = np.arange(start_f, stop_f, 1) - windows = np.empty((len(f_range), len(tw)), dtype=np.complex) + windows = np.empty((len(f_range), len(tw)), dtype=np.complex128) for i_f, f in enumerate(f_range): if f == 0.: window = np.ones(len(tw)) @@ -62,7 +62,7 @@ def _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width): def _st(x, start_f, windows): """Compute ST based on Ali Moukadem MATLAB code (used in tests).""" n_samp = x.shape[-1] - ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex) + ST = np.empty(x.shape[:-1] + (len(windows), n_samp), dtype=np.complex128) # do the work Fx = fftpack.fft(x) XF = np.concatenate([Fx, Fx], axis=-1) diff --git a/mne/time_frequency/csd.py b/mne/time_frequency/csd.py index c9505e008ed..f5923c3e4f2 100644 --- a/mne/time_frequency/csd.py +++ b/mne/time_frequency/csd.py @@ -711,7 +711,7 @@ def csd_array_fourier(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, def csd_multitaper(epochs, fmin=0, fmax=np.inf, tmin=None, tmax=None, picks=None, n_fft=None, bandwidth=None, adaptive=False, low_bias=True, projs=None, n_jobs=1, verbose=None): - """Estimate cross-spectral density from epochs using Morlet wavelets. + """Estimate cross-spectral density from epochs using a multitaper method. Parameters ---------- @@ -771,7 +771,7 @@ def csd_array_multitaper(X, sfreq, t0=0, fmin=0, fmax=np.inf, tmin=None, tmax=None, ch_names=None, n_fft=None, bandwidth=None, adaptive=False, low_bias=True, projs=None, n_jobs=1, verbose=None): - """Estimate cross-spectral density from an array using Morlet wavelets. + """Estimate cross-spectral density from an array using a multitaper method. Parameters ---------- @@ -1112,7 +1112,7 @@ def _execute_csd_function(X, times, frequencies, csd_function, params, n_fft, n_freqs = len(frequencies) csds_mean = np.zeros((n_channels * (n_channels + 1) // 2, n_freqs), - dtype=np.complex) + dtype=np.complex128) # Prepare the function that does the actual CSD computation for parallel # execution. diff --git a/mne/time_frequency/tests/test_stft.py b/mne/time_frequency/tests/test_stft.py index 080620d867c..0784ace90b8 100644 --- a/mne/time_frequency/tests/test_stft.py +++ b/mne/time_frequency/tests/test_stft.py @@ -12,7 +12,7 @@ def test_stft(): f = 7. # Hz for T in [127, 128]: # try with even and odd numbers # Test with low frequency signal - t = np.arange(T).astype(np.float) + t = np.arange(T).astype(np.float64) x = np.sin(2 * np.pi * f * t / sfreq) x = np.array([x, x + 1.]) wsize = 128 diff --git a/mne/time_frequency/tests/test_tfr.py b/mne/time_frequency/tests/test_tfr.py index 62f4e2932a0..747d35035bf 100644 --- a/mne/time_frequency/tests/test_tfr.py +++ b/mne/time_frequency/tests/test_tfr.py @@ -285,7 +285,7 @@ def test_tfr_multitaper(): seed = 42 rng = np.random.RandomState(seed) noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times) - t = np.arange(n_times, dtype=np.float) / sfreq + t = np.arange(n_times, dtype=np.float64) / sfreq signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) @@ -302,7 +302,7 @@ def test_tfr_multitaper(): epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id, reject=reject) - freqs = np.arange(35, 70, 5, dtype=np.float) + freqs = np.arange(35, 70, 5, dtype=np.float64) power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0) @@ -655,9 +655,9 @@ def test_compute_tfr(): # Check types if output in ('complex', 'avg_power_itc'): - assert_equal(np.complex, out.dtype) + assert_equal(np.complex128, out.dtype) else: - assert_equal(np.float, out.dtype) + assert_equal(np.float64, out.dtype) assert (np.all(np.isfinite(out))) # Check errors params diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 1cbac6de50e..388b960653a 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -372,7 +372,7 @@ def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet', elif output in ('complex', 'avg_power_itc'): # avg_power_itc is stored as power + 1i * itc to keep a # simple dimensionality - dtype = np.complex + dtype = np.complex128 if ('avg_' in output) or ('itc' in output): out = np.empty((n_chans, n_freqs, n_times), dtype) @@ -497,7 +497,7 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Set output type dtype = np.float if output in ['complex', 'avg_power_itc']: - dtype = np.complex + dtype = np.complex128 # Init outputs decim = _check_decim(decim) @@ -514,7 +514,7 @@ def _time_frequency_loop(X, Ws, output, use_fft, mode, decim): # Inter-trial phase locking is apparently computed per taper... if 'itc' in output: - plf = np.zeros((n_freqs, n_times), dtype=np.complex) + plf = np.zeros((n_freqs, n_times), dtype=np.complex128) # Loop across epochs for epoch_idx, tfr in enumerate(coefs): @@ -591,7 +591,7 @@ def cwt(X, Ws, use_fft=True, mode='same', decim=1): coefs = _cwt(X, Ws, mode, decim=decim, use_fft=use_fft) - tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex) + tfrs = np.empty((n_signals, len(Ws), n_times), dtype=np.complex128) for k, tfr in enumerate(coefs): tfrs[k] = tfr diff --git a/mne/utils/check.py b/mne/utils/check.py index 28cba60d276..352b6e03148 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -557,11 +557,11 @@ def _check_option(parameter, value, allowed_values, extra=''): '{options}, but got {value!r} instead.') allowed_values = list(allowed_values) # e.g., if a dict was given if len(allowed_values) == 1: - options = 'The only allowed value is %r' % allowed_values[0] + options = f'The only allowed value is {repr(allowed_values[0])}' else: options = 'Allowed values are ' - options += ', '.join(['%r' % v for v in allowed_values[:-1]]) - options += ' and %r' % allowed_values[-1] + options += ', '.join([f'{repr(v)}' for v in allowed_values[:-1]]) + options += f' and {repr(allowed_values[-1])}' raise ValueError(msg.format(parameter=parameter, options=options, value=value, extra=extra)) diff --git a/mne/viz/_3d.py b/mne/viz/_3d.py index feac812fb40..299ef72dcf9 100644 --- a/mne/viz/_3d.py +++ b/mne/viz/_3d.py @@ -42,6 +42,7 @@ _ensure_int, _validate_type, _check_option) from .utils import (mne_analyze_colormap, _get_color_list, plt_show, tight_layout, figure_nobar, _check_time_unit) +from .misc import _check_mri from ..bem import (ConductorModel, _bem_find_surface, _surf_dict, _surf_name, read_bem_surfaces) @@ -1463,7 +1464,7 @@ def _plot_mpl_stc(stc, subject=None, surface='inflated', hemi='lh', curv = nib.freesurfer.read_morph_data( op.join(subjects_dir, subject, 'surf', '%s.curv' % hemi))[inuse] - curv = np.clip(np.array(curv > 0, np.int), 0.33, 0.66) + curv = np.clip(np.array(curv > 0, np.int64), 0.33, 0.66) params = dict(ax=ax, stc=stc, coords=coords, faces=faces, hemi_idx=hemi_idx, vertices=vertices, e=e, smoothing_steps=smoothing_steps, n_verts=n_verts, @@ -1717,7 +1718,7 @@ def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh', "figure": figure, "subjects_dir": subjects_dir, "views": views, } - if _get_3d_backend() == "pyvista": + if _get_3d_backend() in ['pyvista', 'notebook']: kwargs["show"] = not time_viewer else: kwargs.update(_check_pysurfer_antialias(Brain)) @@ -1811,10 +1812,20 @@ def _ijk_to_cut_coords(ijk, img): return apply_trans(img.affine, ijk) +def _load_subject_mri(mri, stc, subject, subjects_dir, name): + import nibabel as nib + from nibabel.spatialimages import SpatialImage + _validate_type(mri, ('path-like', SpatialImage), name) + if isinstance(mri, str): + subject = _check_subject(stc.subject, subject, True) + mri = nib.load(_check_mri(mri, subject, subjects_dir)) + return mri + + @verbose def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None, - mode='stat_map', bg_img=None, colorbar=True, - colormap='auto', clim='auto', + mode='stat_map', bg_img='T1.mgz', + colorbar=True, colormap='auto', clim='auto', transparent=None, show=True, initial_time=None, initial_pos=None, verbose=None): @@ -1839,9 +1850,10 @@ def plot_volume_source_estimates(stc, src, subject=None, subjects_dir=None, The plotting mode to use. Either 'stat_map' (default) or 'glass_brain'. For "glass_brain", activation absolute values are displayed after being transformed to a standard MNI brain. - bg_img : instance of SpatialImage | None + bg_img : instance of SpatialImage | str The background image used in the nilearn plotting function. - If None, it is the T1.mgz file that is found in the subjects_dir. + Can also be a string to use the ``bg_img`` file in the subject's + MRI directory (default is ``'T1.mgz'``). Not used in "glass brain" plotting. colorbar : bool, optional If True, display a colorbar on the right of the plots. @@ -2071,11 +2083,9 @@ def _onclick(event, params, verbose=None): bg_img = None # not used else: # stat_map if bg_img is None: - subject = _check_subject(stc.subject, subject, True) - subjects_dir = get_subjects_dir(subjects_dir=subjects_dir, - raise_error=True) - t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz') - bg_img = nib.load(t1_fname) + bg_img = 'T1.mgz' + bg_img = _load_subject_mri( + bg_img, stc, subject, subjects_dir, 'bg_img') if initial_time is None: time_sl = slice(0, None) @@ -2333,10 +2343,8 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', if _get_3d_backend() == "mayavi": from surfer import Brain from surfer import __version__ as surfer_version - kwargs = _check_pysurfer_antialias(Brain) else: # PyVista from ._brain import _Brain as Brain - kwargs = dict() from ..source_estimate import VectorSourceEstimate _validate_type(stc, VectorSourceEstimate, "stc", "Vector Source Estimate") @@ -2365,12 +2373,20 @@ def plot_vector_source_estimates(stc, subject=None, hemi='lh', colormap='hot', smoothing_steps = 1 # Disable smoothing to save time. title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0]) + kwargs = { + "subject_id": subject, "hemi": hemi, "surf": 'white', + "title": title, "cortex": cortex, "size": size, + "background": background, "foreground": foreground, + "figure": figure, "subjects_dir": subjects_dir, + "views": views, "alpha": brain_alpha, + } + if _get_3d_backend() in ['pyvista', 'notebook']: + kwargs["show"] = not time_viewer + else: + kwargs.update(_check_pysurfer_antialias(Brain)) with warnings.catch_warnings(record=True): # traits warnings - brain = Brain(subject, hemi=hemi, surf='white', - title=title, cortex=cortex, size=size, - background=background, foreground=foreground, - figure=figure, subjects_dir=subjects_dir, - views=views, alpha=brain_alpha, **kwargs) + brain = Brain(**kwargs) + del kwargs if scale_factor is None: # Configure the glyphs scale directly width = np.mean([np.ptp(brain.geo[hemi].coords[:, 1]) diff --git a/mne/viz/_brain/_brain.py b/mne/viz/_brain/_brain.py index d3ecda8d6a8..15f931eb3bc 100644 --- a/mne/viz/_brain/_brain.py +++ b/mne/viz/_brain/_brain.py @@ -22,7 +22,8 @@ from ...surface import mesh_edges from ...morph import _hemi_morph from ...label import read_label, _read_annot -from ...utils import _check_option, logger, verbose, fill_doc, _validate_type +from ...utils import (_check_option, logger, verbose, fill_doc, _validate_type, + use_log_level) class _Brain(object): @@ -154,7 +155,7 @@ def __init__(self, subject_id, hemi, surf, title=None, views=['lateral'], offset=True, show_toolbar=False, offscreen=False, interaction=None, units='mm', show=True): - from ..backends.renderer import backend, _get_renderer + from ..backends.renderer import backend, _get_renderer, _get_3d_backend from matplotlib.colors import colorConverter if interaction is not None: @@ -195,6 +196,7 @@ def __init__(self, subject_id, hemi, surf, title=None, 'sequence of ints.') self._size = size if len(size) == 2 else size * 2 # 1-tuple to 2-tuple + self._notebook = (_get_3d_backend() == "notebook") self._hemi = hemi self._units = units self._subject_id = subject_id @@ -539,6 +541,8 @@ def add_data(self, array, fmin=None, fmid=None, fmax=None, self._renderer.set_camera(azimuth=views_dict[v].azim, elevation=views_dict[v].elev) + self._update() + def add_label(self, label, color=None, alpha=1, scalar_thresh=None, borders=False, hemi=None, subdir=None): """Add an ROI label to the image. @@ -666,6 +670,8 @@ def add_label(self, label, color=None, alpha=1, scalar_thresh=None, self._renderer.set_camera(azimuth=views_dict[v].azim, elevation=views_dict[v].elev) + self._update() + def add_foci(self, coords, coords_as_verts=False, map_surface=None, scale_factor=1, color="white", alpha=1, name=None, hemi=None): @@ -863,7 +869,7 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, rgb = np.round(np.multiply(colorConverter.to_rgb(color), 255)) cmap[:, :3] = rgb.astype(cmap.dtype) - ctable = cmap.astype(np.float) / 255. + ctable = cmap.astype(np.float64) / 255. mesh_data = self._renderer.mesh( x=self.geo[hemi].coords[:, 0], @@ -886,6 +892,8 @@ def add_annotation(self, annot, borders=True, alpha=1, hemi=None, None) self.resolve_coincident_topology(actor) + self._update() + def resolve_coincident_topology(self, actor): """Resolve z-fighting of overlapping surfaces.""" mapper = actor.GetMapper() @@ -915,6 +923,7 @@ def show_view(self, view=None, roll=None, distance=None, row=0, col=0, self._renderer.set_camera(azimuth=view.azim, elevation=view.elev) self._renderer.reset_camera() + self._update() def save_image(self, filename, mode='rgb'): """Save view from all panels to disk. @@ -995,10 +1004,11 @@ def set_data_smoothing(self, n_steps): % (len(hemi_data), self.geo[hemi].x.shape[0])) morph_n_steps = 'nearest' if n_steps == 0 else n_steps maps = sparse.eye(len(self.geo[hemi].coords), format='csr') - smooth_mat = _hemi_morph( - self.geo[hemi].faces, - np.arange(len(self.geo[hemi].coords)), - vertices, morph_n_steps, maps, warn=False) + with use_log_level(False): + smooth_mat = _hemi_morph( + self.geo[hemi].faces, + np.arange(len(self.geo[hemi].coords)), + vertices, morph_n_steps, maps, warn=False) self._data[hemi]['smooth_mat'] = smooth_mat self.set_time_point(self._data['time_idx']) self._data['smoothing_steps'] = n_steps @@ -1078,6 +1088,7 @@ def set_time_point(self, time_idx): self.update_glyphs(hemi, vectors) self._current_act_data = np.concatenate(current_act_data) self._data['time_idx'] = time_idx + self._update() def update_glyphs(self, hemi, vectors): from ..backends._pyvista import (_set_colormap_range, @@ -1438,7 +1449,7 @@ def _to_borders(self, label, hemi, borders, restrict_idx=None): edges = mesh_edges(self.geo[hemi].faces) edges = edges.tocoo() border_edges = label[edges.row] != label[edges.col] - show = np.zeros(n_vertices, dtype=np.int) + show = np.zeros(n_vertices, dtype=np.int64) keep_idx = np.unique(edges.row[border_edges]) if isinstance(borders, int): for _ in range(borders): @@ -1475,6 +1486,12 @@ def enable_depth_peeling(self): """Enable depth peeling.""" self._renderer.enable_depth_peeling() + def _update(self): + from ..backends import renderer + if renderer.get_3d_backend() in ['pyvista', 'notebook']: + if self._notebook and self._renderer.figure.display is not None: + self._renderer.figure.display.update() + def _safe_interp1d(x, y, kind='linear', axis=-1, assume_sorted=False): """Work around interp1d not liking singleton dimensions.""" diff --git a/mne/viz/_brain/_notebook.py b/mne/viz/_brain/_notebook.py new file mode 100644 index 00000000000..d7353df3f83 --- /dev/null +++ b/mne/viz/_brain/_notebook.py @@ -0,0 +1,51 @@ +# Authors: Guillaume Favelier +# +# License: Simplified BSD + +from ..backends._notebook \ + import _NotebookInteractor as _PyVistaNotebookInteractor + + +class _NotebookInteractor(_PyVistaNotebookInteractor): + def __init__(self, time_viewer): + self.time_viewer = time_viewer + self.brain = self.time_viewer.brain + super().__init__(self.brain._renderer) + + def configure_controllers(self): + from ipywidgets import IntSlider, FloatSlider, interactive + super().configure_controllers() + # time slider + max_time = len(self.brain._data['time']) - 1 + if max_time >= 1: + self.sliders["time"] = FloatSlider( + value=self.brain._data['time_idx'], + min=0, + max=max_time, + continuous_update=False + ) + self.controllers["time"] = interactive( + self.brain.set_time_point, + time_idx=self.sliders["time"], + ) + # orientation + self.controllers["orientation"] = interactive( + self.set_orientation, + orientation=self.time_viewer.orientation, + ) + # smoothing + self.sliders["smoothing"] = IntSlider( + value=self.brain._data['smoothing_steps'], + min=self.time_viewer.default_smoothing_range[0], + max=self.time_viewer.default_smoothing_range[1], + continuous_update=False + ) + self.controllers["smoothing"] = interactive( + self.brain.set_data_smoothing, + n_steps=self.sliders["smoothing"] + ) + + def set_orientation(self, orientation): + row, col = self.plotter.index_to_loc( + self.plotter._active_renderer_index) + self.brain.show_view(orientation, row=row, col=col) diff --git a/mne/viz/_brain/_timeviewer.py b/mne/viz/_brain/_timeviewer.py index 7c5e9e603c2..e2e8ddb7c01 100644 --- a/mne/viz/_brain/_timeviewer.py +++ b/mne/viz/_brain/_timeviewer.py @@ -86,6 +86,19 @@ def update_plot(self): framealpha=0.5, handlelength=1.) self.canvas.draw() + def set_color(self, bg_color, fg_color): + """Set the widget colors.""" + self.axes.set_facecolor(bg_color) + self.axes.xaxis.label.set_color(fg_color) + self.axes.yaxis.label.set_color(fg_color) + self.axes.spines['top'].set_color(fg_color) + self.axes.spines['bottom'].set_color(fg_color) + self.axes.spines['left'].set_color(fg_color) + self.axes.spines['right'].set_color(fg_color) + self.axes.tick_params(axis='x', colors=fg_color) + self.axes.tick_params(axis='y', colors=fg_color) + self.fig.patch.set_facecolor(bg_color) + def show(self): """Show the canvas.""" self.canvas.show() @@ -291,12 +304,33 @@ def __init__(self, brain, show_traces=False): from ..backends._pyvista import _require_minimum_version _require_minimum_version('0.24') + # shared configuration + self.brain = brain + self.orientation = [ + 'lateral', + 'medial', + 'rostral', + 'caudal', + 'dorsal', + 'ventral', + 'frontal', + 'parietal' + ] + self.default_smoothing_range = [0, 15] + + # detect notebook + if brain._notebook: + self.notebook = True + self.configure_notebook() + return + else: + self.notebook = False + # Default configuration self.playback = False self.visibility = False self.refresh_rate_ms = max(int(round(1000. / 60.)), 1) self.default_scaling_range = [0.2, 2.0] - self.default_smoothing_range = [0, 15] self.default_playback_speed_range = [0.01, 1] self.default_playback_speed_value = 0.05 self.default_status_bar_msg = "Press ? for help" @@ -307,16 +341,6 @@ def __init__(self, brain, show_traces=False): self.icons = dict() self.actions = dict() self.keys = ('fmin', 'fmid', 'fmax') - self.orientation = [ - 'lateral', - 'medial', - 'rostral', - 'caudal', - 'dorsal', - 'ventral', - 'frontal', - 'parietal' - ] self.slider_length = 0.02 self.slider_width = 0.04 self.slider_color = (0.43137255, 0.44313725, 0.45882353) @@ -324,7 +348,6 @@ def __init__(self, brain, show_traces=False): self.slider_tube_color = (0.69803922, 0.70196078, 0.70980392) # Direct access parameters: - self.brain = brain self.brain.time_viewer = self self.plotter = brain._renderer.plotter self.main_menu = self.plotter.main_menu @@ -563,6 +586,10 @@ def set_slider_style(self, slider, show_label=True, show_cap=False): if not show_label: slider_rep.ShowSliderLabelOff() + def configure_notebook(self): + from ._notebook import _NotebookInteractor + self.brain._renderer.figure.display = _NotebookInteractor(self) + def configure_time_label(self): self.time_actor = self.brain._data.get('time_actor') if self.time_actor is not None: @@ -805,6 +832,10 @@ def configure_point_picking(self): vlayout.addWidget(self.mpl_canvas.canvas) vlayout.setStretch(0, self.interactor_stretch) vlayout.setStretch(1, 1) + self.mpl_canvas.set_color( + bg_color=self.brain._bg_color, + fg_color=self.brain._fg_color, + ) self.mpl_canvas.show() # get brain data @@ -1067,7 +1098,7 @@ def plot_time_line(self): self.time_line = self.mpl_canvas.plot_time_line( x=current_time, label='time', - color='black', + color=self.brain._fg_color, lw=1, ) else: diff --git a/mne/viz/_brain/colormap.py b/mne/viz/_brain/colormap.py index a9ef3302fca..23f1fbdb5ab 100644 --- a/mne/viz/_brain/colormap.py +++ b/mne/viz/_brain/colormap.py @@ -12,7 +12,7 @@ def create_lut(cmap, n_colors=256, center=None): """Return a colormap suitable for setting as a LUT.""" from matplotlib import cm cmap = cm.get_cmap(cmap) - lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int) + lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64) return lut @@ -169,5 +169,5 @@ def calculate_lut(lut_table, alpha, fmin, fmid, fmax, center=None, lut_table[:, chan]) lut_table = lut - lut_table = lut_table.astype(np.float) / 255.0 + lut_table = lut_table.astype(np.float64) / 255.0 return lut_table diff --git a/mne/viz/_brain/surface.py b/mne/viz/_brain/surface.py index 30f65b2c3b3..de5b9cfde03 100644 --- a/mne/viz/_brain/surface.py +++ b/mne/viz/_brain/surface.py @@ -151,7 +151,7 @@ def load_curvature(self): """Load in curvature values from the ?h.curv file.""" curv_path = path.join(self.data_path, 'surf', '%s.curv' % self.hemi) self.curv = read_curvature(curv_path, binary=False) - self.bin_curv = np.array(self.curv > 0, np.int) + self.bin_curv = np.array(self.curv > 0, np.int64) # morphometry (curvature) normalization in order to get gray cortex # TODO: delete self.grey_curv after cortex parameter # will be fully supported diff --git a/mne/viz/_brain/tests/test.ipynb b/mne/viz/_brain/tests/test.ipynb new file mode 100644 index 00000000000..6d1d334525f --- /dev/null +++ b/mne/viz/_brain/tests/test.ipynb @@ -0,0 +1,62 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import mne\n", + "from mne.datasets import testing\n", + "data_path = testing.data_path()\n", + "raw_fname = data_path + '/MEG/sample/sample_audvis_trunc_raw.fif'\n", + "subjects_dir = data_path + '/subjects'\n", + "subject = 'sample'\n", + "trans = data_path + '/MEG/sample/sample_audvis_trunc-trans.fif'\n", + "info = mne.io.read_info(raw_fname)\n", + "mne.viz.set_3d_backend('notebook')\n", + "mne.viz.plot_alignment(info, trans, subject=subject, dig=True,\n", + " meg=['helmet', 'sensors'], subjects_dir=subjects_dir,\n", + " surfaces=['head-dense'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import mne\n", + "from mne.datasets import testing\n", + "data_path = testing.data_path()\n", + "sample_dir = os.path.join(data_path, 'MEG', 'sample')\n", + "subjects_dir = os.path.join(data_path, 'subjects')\n", + "fname_stc = os.path.join(sample_dir, 'sample_audvis_trunc-meg')\n", + "stc = mne.read_source_estimate(fname_stc, subject='sample')\n", + "initial_time = 0.13\n", + "mne.viz.set_3d_backend('notebook')\n", + "brain = stc.plot(subjects_dir=subjects_dir, initial_time=initial_time,\n", + " clim=dict(kind='value', pos_lims=[3, 6, 9]),\n", + " time_viewer=True,\n", + " hemi='split')" + ] + } + ], + "metadata": { + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.5" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/mne/viz/_brain/tests/test_notebook.py b/mne/viz/_brain/tests/test_notebook.py new file mode 100644 index 00000000000..d8fa65e6f9b --- /dev/null +++ b/mne/viz/_brain/tests/test_notebook.py @@ -0,0 +1,22 @@ +import os +import pytest + +from mne.datasets import testing +from mne.utils import requires_version + +PATH = os.path.dirname(os.path.realpath(__file__)) + + +@pytest.mark.slowtest +@testing.requires_testing_data +@requires_version('nbformat') +def test_notebook_3d_backend(renderer_notebook): + """Test executing a notebook that should not fail.""" + import nbformat + + notebook = nbformat.read( + os.path.join(PATH, "test.ipynb"), as_version=4, + ) + exec_results = renderer_notebook.execute_notebook(notebook) + if exec_results.exec_error: + raise exec_results.exec_error diff --git a/mne/viz/backends/_notebook.py b/mne/viz/backends/_notebook.py new file mode 100644 index 00000000000..6e5c4b1d0a1 --- /dev/null +++ b/mne/viz/backends/_notebook.py @@ -0,0 +1,165 @@ +# Authors: Guillaume Favelier +# +# License: Simplified BSD + +import matplotlib.pyplot as plt +from contextlib import contextmanager +from ...fixes import nullcontext +from ._pyvista import _Renderer as _PyVistaRenderer + + +class _Renderer(_PyVistaRenderer): + def __init__(self, *args, **kwargs): + from IPython import get_ipython + ipython = get_ipython() + ipython.magic('matplotlib widget') + kwargs["notebook"] = True + super().__init__(*args, **kwargs) + + def show(self): + self.figure.display = _NotebookInteractor(self) + return self.scene() + + +class _NotebookInteractor(object): + def __init__(self, renderer): + from IPython import display + from ipywidgets import HBox, VBox + self.dpi = 90 + self.sliders = dict() + self.controllers = dict() + self.renderer = renderer + self.plotter = self.renderer.plotter + with self.disabled_interactivity(): + self.fig, self.dh = self.screenshot() + self.configure_controllers() + controllers = VBox(list(self.controllers.values())) + layout = HBox([self.fig.canvas, controllers]) + display.display(layout) + + @contextmanager + def disabled_interactivity(self): + state = plt.isinteractive() + plt.ioff() + try: + yield + finally: + if state: + plt.ion() + else: + plt.ioff() + + def screenshot(self): + width, height = self.renderer.figure.store['window_size'] + + fig = plt.figure() + fig.figsize = (width / self.dpi, height / self.dpi) + fig.dpi = self.dpi + fig.canvas.toolbar_visible = False + fig.canvas.header_visible = False + fig.canvas.resizable = False + fig.canvas.callbacks.callbacks.clear() + ax = plt.Axes(fig, [0., 0., 1., 1.]) + ax.set_axis_off() + fig.add_axes(ax) + + dh = ax.imshow(self.plotter.screenshot()) + return fig, dh + + def update(self): + self.dh.set_data(self.plotter.screenshot()) + self.fig.canvas.draw() + + def configure_controllers(self): + from ipywidgets import (interactive, Label, VBox, FloatSlider, + IntSlider, Checkbox) + # subplot + number_of_plots = len(self.plotter.renderers) + if number_of_plots > 1: + self.sliders["subplot"] = IntSlider( + value=number_of_plots - 1, + min=0, + max=number_of_plots - 1, + step=1, + continuous_update=False + ) + self.controllers["subplot"] = VBox([ + Label(value='Select the subplot'), + interactive( + self.set_subplot, + index=self.sliders["subplot"], + ) + ]) + # azimuth + default_azimuth = self.plotter.renderer._azimuth + self.sliders["azimuth"] = FloatSlider( + value=default_azimuth, + min=-180., + max=180., + step=10., + continuous_update=False + ) + # elevation + default_elevation = self.plotter.renderer._elevation + self.sliders["elevation"] = FloatSlider( + value=default_elevation, + min=-180., + max=180., + step=10., + continuous_update=False + ) + # distance + eps = 1e-5 + default_distance = self.plotter.renderer._distance + self.sliders["distance"] = FloatSlider( + value=default_distance, + min=eps, + max=2. * default_distance - eps, + step=default_distance / 10., + continuous_update=False + ) + # camera + self.controllers["camera"] = VBox([ + Label(value='Camera settings'), + interactive( + self.set_camera, + azimuth=self.sliders["azimuth"], + elevation=self.sliders["elevation"], + distance=self.sliders["distance"], + ) + ]) + # continuous update + self.continuous_update_button = Checkbox( + value=False, + description='Continuous update', + disabled=False, + indent=False, + ) + self.controllers["continuous_update"] = interactive( + self.set_continuous_update, + value=self.continuous_update_button + ) + + def set_camera(self, azimuth, elevation, distance): + focalpoint = self.plotter.camera.GetFocalPoint() + self.renderer.set_camera(azimuth, elevation, + distance, focalpoint) + self.update() + + def set_subplot(self, index): + row, col = self.plotter.index_to_loc(index) + self.renderer.subplot(row, col) + figure = self.renderer.figure + default_azimuth = figure.plotter.renderer._azimuth + default_elevation = figure.plotter.renderer._elevation + default_distance = figure.plotter.renderer._distance + self.sliders["azimuth"].value = default_azimuth + self.sliders["elevation"].value = default_elevation + self.sliders["distance"].value = default_distance + + def set_continuous_update(self, value): + for slider in self.sliders.values(): + slider.continuous_update = value + + +_testing_context = nullcontext diff --git a/mne/viz/backends/_pysurfer_mayavi.py b/mne/viz/backends/_pysurfer_mayavi.py index e7adc34c9ae..af1ae21b75c 100644 --- a/mne/viz/backends/_pysurfer_mayavi.py +++ b/mne/viz/backends/_pysurfer_mayavi.py @@ -124,11 +124,11 @@ def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, l_m = surface.module_manager.scalar_lut_manager if colormap.dtype == np.uint8: l_m.lut.table = colormap - elif colormap.dtype == np.float: + elif colormap.dtype == np.float64: l_m.load_lut_from_list(colormap) else: raise TypeError('Expected type for colormap values are' - ' np.float or np.uint8: ' + ' np.float64 or np.uint8: ' '{} was given'.format(colormap.dtype)) surface.actor.property.shading = shading surface.actor.property.backface_culling = backface_culling diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index d7a8fd7ee12..a4289742125 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -137,13 +137,15 @@ class _Renderer(_BaseRenderer): """ def __init__(self, fig=None, size=(600, 600), bgcolor='black', - name="PyVista Scene", show=False, shape=(1, 1)): + name="PyVista Scene", show=False, shape=(1, 1), + notebook=None): from .renderer import MNE_3D_BACKEND_TESTING from .._3d import _get_3d_option figure = _Figure(show=show, title=name, size=size, shape=shape, - background_color=bgcolor, notebook=None) + background_color=bgcolor, notebook=notebook) self.font_family = "arial" self.tube_n_sides = 20 + self.shape = shape antialias = _get_3d_option('antialias') self.antialias = antialias and not MNE_3D_BACKEND_TESTING if isinstance(fig, int): @@ -196,6 +198,8 @@ def ensure_minimum_sizes(self): self.plotter.interactor.setMinimumSize(0, 0) def subplot(self, x, y): + x = np.max([0, np.min([x, self.shape[0] - 1])]) + y = np.max([0, np.min([y, self.shape[1] - 1])]) with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=FutureWarning) self.plotter.subplot(x, y) @@ -233,7 +237,7 @@ def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, rgba = True if isinstance(colormap, np.ndarray): if colormap.dtype == np.uint8: - colormap = colormap.astype(np.float) / 255. + colormap = colormap.astype(np.float64) / 255. from matplotlib.colors import ListedColormap colormap = ListedColormap(colormap) @@ -590,10 +594,13 @@ def _set_3d_view(figure, azimuth, elevation, focalpoint, distance): r = distance else: r = max(bounds[1::2] - bounds[::2]) * 2.0 + distance = r - cen = (bounds[1::2] + bounds[::2]) * 0.5 if focalpoint is not None: cen = np.asarray(focalpoint) + else: + cen = (bounds[1::2] + bounds[::2]) * 0.5 + focalpoint = cen # Now calculate the view_up vector of the camera. If the view up is # close to the 'z' axis, the view plane normal is parallel to the @@ -610,6 +617,10 @@ def _set_3d_view(figure, azimuth, elevation, focalpoint, distance): figure.plotter.camera_position = [ position, cen, view_up] + figure.plotter.renderer._azimuth = azimuth + figure.plotter.renderer._elevation = elevation + figure.plotter.renderer._distance = distance + def _set_3d_title(figure, title, size=16): with warnings.catch_warnings(): diff --git a/mne/viz/backends/_utils.py b/mne/viz/backends/_utils.py index cd248d21d0f..fba8a0f9a46 100644 --- a/mne/viz/backends/_utils.py +++ b/mne/viz/backends/_utils.py @@ -10,7 +10,7 @@ import numpy as np import collections.abc -VALID_3D_BACKENDS = ['mayavi', 'pyvista'] +VALID_3D_BACKENDS = ['mayavi', 'pyvista', 'notebook'] def _get_colormap_from_array(colormap=None, normalized_colormap=False, @@ -36,15 +36,16 @@ def _check_color(color): np_color = np.array(color) if np_color.size % 3 != 0 and np_color.size % 4 != 0: raise ValueError("The expected valid format is RGB or RGBA.") - if np_color.dtype == np.int: + if np_color.dtype in (np.int64, np.int32): if (np_color < 0).any() or (np_color > 255).any(): raise ValueError("Values out of range [0, 255].") - elif np_color.dtype == np.float: + elif np_color.dtype == np.float64: if (np_color < 0.0).any() or (np_color > 1.0).any(): raise ValueError("Values out of range [0.0, 1.0].") else: - raise TypeError("Expected data type is `np.int` or `np.float` but " - "{} was given.".format(np_color.dtype)) + raise TypeError("Expected data type is `np.int64`, `np.int32`, or " + "`np.float64` but {} was given." + .format(np_color.dtype)) else: raise TypeError("Expected type is `str` or iterable but " "{} was given.".format(type(color))) diff --git a/mne/viz/backends/renderer.py b/mne/viz/backends/renderer.py index dbb3a038ca1..348b273fbec 100644 --- a/mne/viz/backends/renderer.py +++ b/mne/viz/backends/renderer.py @@ -17,7 +17,11 @@ MNE_3D_BACKEND_TESTING = False -_backend_name_map = dict(mayavi='._pysurfer_mayavi', pyvista='._pyvista') +_backend_name_map = dict( + mayavi='._pysurfer_mayavi', + pyvista='._pyvista', + notebook='._notebook', +) backend = None diff --git a/mne/viz/backends/tests/_utils.py b/mne/viz/backends/tests/_utils.py index f93259a2596..ec95297ef72 100644 --- a/mne/viz/backends/tests/_utils.py +++ b/mne/viz/backends/tests/_utils.py @@ -24,7 +24,25 @@ def has_mayavi(): """Check that mayavi is installed.""" try: with warnings.catch_warnings(record=True): # traits - from mayavi import mlab # noqa F401 + from mayavi import mlab # noqa: F401 + return True + except ImportError: + return False + + +def has_pyqt5(): + """Check if PyQt5 is installed.""" + try: + import PyQt5 # noqa: F401 + return True + except ImportError: + return False + + +def has_imageio_ffmpeg(): + """Check if imageio-ffmpeg is installed.""" + try: + import imageio_ffmpeg # noqa: F401 return True except ImportError: return False diff --git a/mne/viz/backends/tests/test_renderer.py b/mne/viz/backends/tests/test_renderer.py index ac7252d0dda..370235c3922 100644 --- a/mne/viz/backends/tests/test_renderer.py +++ b/mne/viz/backends/tests/test_renderer.py @@ -154,9 +154,8 @@ def test_3d_backend(renderer): rend.show() -def test_get_3d_backend(): +def test_get_3d_backend(renderer): """Test get_3d_backend function call for side-effects.""" - from mne.viz.backends import renderer # Test twice to ensure the first call had no side-effect orig_backend = renderer.MNE_3D_BACKEND assert renderer.get_3d_backend() == orig_backend diff --git a/mne/viz/circle.py b/mne/viz/circle.py index 840a2f3cf1b..ff00d92bc61 100644 --- a/mne/viz/circle.py +++ b/mne/viz/circle.py @@ -50,7 +50,7 @@ def circular_layout(node_names, node_order, start_pos=90, start_between=True, raise ValueError('node_order has to be the same length as node_names') if group_boundaries is not None: - boundaries = np.array(group_boundaries, dtype=np.int) + boundaries = np.array(group_boundaries, dtype=np.int64) if np.any(boundaries >= n_nodes) or np.any(boundaries < 0): raise ValueError('"group_boundaries" has to be between 0 and ' 'n_nodes - 1.') @@ -78,7 +78,7 @@ def circular_layout(node_names, node_order, start_pos=90, start_between=True, start_pos += group_sep / 2 boundaries = boundaries[1:] if n_group_sep > 1 else None - node_angles = np.ones(n_nodes, dtype=np.float) * node_sep + node_angles = np.ones(n_nodes, dtype=np.float64) * node_sep node_angles[0] = start_pos if boundaries is not None: node_angles[boundaries] += group_sep @@ -325,7 +325,7 @@ def plot_connectivity_circle(con, node_names, indices=None, n_lines=None, # edges: We modulate the noise with the number of connections of the # node and the connection strength, such that the strongest connections # are closer to the node center - nodes_n_con = np.zeros((n_nodes), dtype=np.int) + nodes_n_con = np.zeros((n_nodes), dtype=np.int64) for i, j in zip(indices[0], indices[1]): nodes_n_con[i] += 1 nodes_n_con[j] += 1 diff --git a/mne/viz/evoked.py b/mne/viz/evoked.py index 40bd6216cdf..fad1b99946d 100644 --- a/mne/viz/evoked.py +++ b/mne/viz/evoked.py @@ -290,7 +290,7 @@ def _plot_evoked(evoked, picks, exclude, unit, show, ylim, proj, xlim, hline, picks = np.array([pick for pick in picks if pick not in exclude]) - types = np.array(_get_channel_types(info, picks), np.unicode) + types = np.array(_get_channel_types(info, picks), str) ch_types_used = list() for this_type in _VALID_CHANNEL_TYPES: if this_type in types: @@ -2317,7 +2317,7 @@ def click_func( invert_y, vlines, tmin, tmax, units, skip_axlabel) # add inset scalp plot showing location of sensors picked if show_sensors: - _validate_type(show_sensors, (np.int, bool, str, type(None)), + _validate_type(show_sensors, (np.int64, bool, str, type(None)), 'show_sensors', 'numeric, str, None or bool') if not _check_ch_locs(np.array(one_evoked.info['chs'])[pos_picks]): warn('Cannot find channel coordinates in the supplied Evokeds. ' diff --git a/mne/viz/tests/test_3d.py b/mne/viz/tests/test_3d.py index 7cca0d77f61..a30aeb2111e 100644 --- a/mne/viz/tests/test_3d.py +++ b/mne/viz/tests/test_3d.py @@ -105,6 +105,7 @@ def test_plot_head_positions(): @testing.requires_testing_data @requires_pysurfer @traits_test +@pytest.mark.slowtest def test_plot_sparse_source_estimates(renderer_interactive): """Test plotting of (sparse) source estimates.""" sample_src = read_source_spaces(src_fname) @@ -135,7 +136,7 @@ def test_plot_sparse_source_estimates(renderer_interactive): stc_data = np.zeros((len(inds), n_time)) stc_data[0, 1] = 1. stc_data[1, 4] = 2. - vertices = [vertices[inds], np.empty(0, dtype=np.int)] + vertices = [vertices[inds], np.empty(0, dtype=np.int64)] stc = SourceEstimate(stc_data, vertices, 1, 1) surf = plot_sparse_source_estimates(sample_src, stc, bgcolor=(1, 1, 1), opacity=0.5, high_resolution=False) @@ -146,6 +147,7 @@ def test_plot_sparse_source_estimates(renderer_interactive): @testing.requires_testing_data @traits_test +@pytest.mark.slowtest def test_plot_evoked_field(renderer): """Test plotting evoked field.""" evoked = read_evokeds(evoked_fname, condition='Left Auditory', @@ -603,13 +605,14 @@ def test_snapshot_brain_montage(renderer): @requires_dipy() @requires_nibabel() @requires_version('nilearn', '0.4') -@pytest.mark.parametrize('mode, stype, init_t, want_t, init_p, want_p', [ - ('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7)), - ('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3)), - ('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9.0, 19.9)), - ('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7))]) +@pytest.mark.parametrize( + 'mode, stype, init_t, want_t, init_p, want_p, bg_img', [ + ('glass_brain', 's', None, 2, None, (-30.9, 18.4, 56.7), None), + ('stat_map', 'vec', 1, 1, None, (15.7, 16.0, -6.3), None), + ('glass_brain', 'vec', None, 1, (10, -10, 20), (6.6, -9., 19.9), None), + ('stat_map', 's', 1, 1, (-10, 5, 10), (-12.3, 2.0, 7.7), 'brain.mgz')]) def test_plot_volume_source_estimates(mode, stype, init_t, want_t, - init_p, want_p): + init_p, want_p, bg_img): """Test interactive plotting of volume source estimates.""" forward = read_forward_solution(fwd_fname) sample_src = forward['src'] @@ -632,7 +635,7 @@ def test_plot_volume_source_estimates(mode, stype, init_t, want_t, fig = stc.plot( sample_src, subject='sample', subjects_dir=subjects_dir, mode=mode, initial_time=init_t, initial_pos=init_p, - verbose=True) + bg_img=bg_img, verbose=True) log = log.getvalue() want_str = 't = %0.3f s' % want_t assert want_str in log, (want_str, init_t) @@ -642,6 +645,10 @@ def test_plot_volume_source_estimates(mode, stype, init_t, want_t, _fake_click(fig, fig.axes[ax_idx], (0.3, 0.5)) fig.canvas.key_press_event('left') fig.canvas.key_press_event('shift+right') + if bg_img is not None: + with pytest.raises(FileNotFoundError, match='MRI file .* not found'): + stc.plot(sample_src, subject='sample', subjects_dir=subjects_dir, + mode='stat_map', bg_img='junk.mgz') @pytest.mark.slowtest # can be slow on OSX diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py index 8c9963823bb..3c02346bad2 100644 --- a/mne/viz/tests/test_epochs.py +++ b/mne/viz/tests/test_epochs.py @@ -282,7 +282,7 @@ def test_plot_butterfly(): sfreq = 1000. data = np.sin(rng.randn(n_epochs, n_channels, n_times)) events = np.array([np.arange(n_epochs), [0] * n_epochs, np.ones([n_epochs], - dtype=np.int)]).T + dtype=np.int64)]).T chanlist = ['eeg' if chan < n_channels // 3 else 'ecog' if chan < n_channels // 2 else 'seeg' for chan in range(n_channels)] diff --git a/mne/viz/tests/test_ica.py b/mne/viz/tests/test_ica.py index 08fc5aa478f..26a5fd6ce3e 100644 --- a/mne/viz/tests/test_ica.py +++ b/mne/viz/tests/test_ica.py @@ -106,6 +106,7 @@ def test_plot_ica_components(): plt.close('all') +@pytest.mark.slowtest @requires_sklearn def test_plot_ica_properties(): """Test plotting of ICA properties.""" @@ -199,7 +200,7 @@ def test_plot_ica_sources(): assert_array_equal(ica.exclude, [1]) plt.close('all') - # dtype can change int->np.int after load, test it explicitly + # dtype can change int->np.int64 after load, test it explicitly ica.n_components_ = np.int64(ica.n_components_) fig = ica.plot_sources(raw) # also test mouse clicks @@ -247,6 +248,7 @@ def test_plot_ica_sources(): plt.close('all') +@pytest.mark.slowtest @requires_sklearn def test_plot_ica_overlay(): """Test plotting of ICA cleaning.""" diff --git a/mne/viz/utils.py b/mne/viz/utils.py index b03b64d9ab2..b6f29af6a03 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -2963,7 +2963,7 @@ def _plot_masked_image(ax, data, times, mask=None, yvals=None, if mask.all(): t_end = ", all points masked)" else: - fraction = 1 - (np.float(mask.sum()) / np.float(mask.size)) + fraction = 1 - (np.float64(mask.sum()) / np.float64(mask.size)) t_end = ", %0.3g%% of points masked)" % (fraction * 100,) else: t_end = ")" diff --git a/requirements.txt b/requirements.txt index 3942cf53428..07f9edc036e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ numpy scipy matplotlib -pyqt5<5.14 +pyqt5 pyqt5-sip sip scikit-learn @@ -33,5 +33,5 @@ flake8 https://github.com/mcmtroffaes/sphinxcontrib-bibtex/archive/29694f215b39d64a31b845aafd9ff2ae9329494f.zip imageio>=2.6.1 imageio-ffmpeg>=0.4.1 -pyvista==0.24 +pyvista==0.24.3 tqdm diff --git a/server_environment.yml b/server_environment.yml new file mode 100644 index 00000000000..9c27f9e6266 --- /dev/null +++ b/server_environment.yml @@ -0,0 +1,23 @@ +name: base +channels: +- conda-forge/label/vtk_dev +- conda-forge +- defaults +dependencies: +- python>=3.7 +- pip +- mesalib +- ffmpeg +- vtk +- numpy +- scipy +- traits +- pip: + - mne + - matplotlib + - ipympl + - jupyter + - pyvista==0.24 + - ipywidgets + - nbformat + - pytest-notebook diff --git a/tutorials/machine-learning/plot_sensors_decoding.py b/tutorials/machine-learning/plot_sensors_decoding.py index 8534cb6b00c..6e4261eb3c7 100644 --- a/tutorials/machine-learning/plot_sensors_decoding.py +++ b/tutorials/machine-learning/plot_sensors_decoding.py @@ -40,6 +40,7 @@ data_path = sample.data_path() +subjects_dir = data_path + '/subjects' raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' tmin, tmax = -0.200, 0.500 event_id = {'Auditory/Left': 1, 'Visual/Left': 3} # just use two @@ -393,7 +394,8 @@ inv = mne.minimum_norm.make_inverse_operator( evoked_time_gen.info, fwd, cov, loose=0.) stc = mne.minimum_norm.apply_inverse(evoked_time_gen, inv, 1. / 9., 'dSPM') -brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1) +brain = stc.plot(hemi='split', views=('lat', 'med'), initial_time=0.1, + subjects_dir=subjects_dir) ############################################################################### # Source-space decoding diff --git a/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py b/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py index 539000e8ef8..25c04716f1f 100644 --- a/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py +++ b/tutorials/preprocessing/plot_60_maxwell_filtering_sss.py @@ -16,7 +16,12 @@ """ import os +import matplotlib.pyplot as plt +import seaborn as sns +import pandas as pd +import numpy as np import mne +from mne.preprocessing import find_bad_channels_maxwell sample_data_folder = mne.datasets.sample.data_path() sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample', @@ -106,18 +111,100 @@ raw.info['bads'] = [] raw_check = raw.copy() raw_check.pick_types(meg=True, exclude=()).load_data().filter(None, 40) -auto_noisy_chs, auto_flat_chs = mne.preprocessing.find_bad_channels_maxwell( +auto_noisy_chs, auto_flat_chs, auto_scores = find_bad_channels_maxwell( raw_check, cross_talk=crosstalk_file, calibration=fine_cal_file, - verbose=True) + return_scores=True, verbose=True) print(auto_noisy_chs) # we should find them! print(auto_flat_chs) # none for this dataset -raw.info['bads'].extend(auto_noisy_chs + auto_flat_chs) ############################################################################### -# But this algorithm is not perfect. For example, it misses ``MEG 2313``, -# which has some flux jumps, because there are not enough flux jumps in the -# recording. So it can still be useful to manually inspect and mark bad -# channels: +# Now we can update the list of bad channels in the dataset. + +bads = [*raw.info['bads'], *auto_noisy_chs, *auto_flat_chs] +bads = sorted(bads) +raw.info['bads'] = bads + +############################################################################### +# We called `~mne.preprocessing.find_bad_channels_maxwell` with the optional +# keyword argument ``return_scores=True``, causing the function to return a +# dictionary of all data related to the scoring used to classify channels as +# noisy or flat. This information can be used to produce diagnostic figures. +# +# In the following, we will generate such visualizations for +# the automated detection of *noisy* gradiometer channels. + +# Only select the data forgradiometer channels. +ch_type = 'grad' +ch_subset = auto_scores['ch_types'] == ch_type +ch_names = auto_scores['ch_names'][ch_subset] +scores = auto_scores['scores_noisy'][ch_subset] +limits = auto_scores['limits_noisy'][ch_subset] +bins = auto_scores['bins'] # The the windows that were evaluated. +# We will label each segment by its start and stop time, with up to 3 +# digits before and 3 digits after the decimal place (1 ms precision). +bin_labels = [f'{start:3.3f} – {stop:3.3f}' + for start, stop in bins] + +# We store the data in a Pandas DataFrame. The seaborn heatmap function +# we will call below will then be able to automatically assign the correct +# labels to all axes. +data_to_plot = pd.DataFrame(data=scores, + columns=pd.Index(bin_labels, name='Time (s)'), + index=pd.Index(ch_names, name='Channel')) + +# First, plot the "raw" scores. +fig, ax = plt.subplots(1, 2, figsize=(12, 8)) +fig.suptitle(f'Automated noisy channel detection: {ch_type}', + fontsize=16, fontweight='bold') +sns.heatmap(data=data_to_plot, cmap='Reds', cbar_kws=dict(label='Score'), + ax=ax[0]) +[ax[0].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') + for x in range(1, len(bins))] +ax[0].set_title('All Scores', fontweight='bold') + +# Now, adjust the color range to highlight segments that exceeded the limit. +sns.heatmap(data=data_to_plot, + vmin=np.nanmin(limits), # bads in input data have NaN limits + cmap='Reds', cbar_kws=dict(label='Score'), ax=ax[1]) +[ax[1].axvline(x, ls='dashed', lw=0.25, dashes=(25, 15), color='gray') + for x in range(1, len(bins))] +ax[1].set_title('Scores > Limit', fontweight='bold') + +# The figure title should not overlap with the subplots. +fig.tight_layout(rect=[0, 0.03, 1, 0.95]) + +############################################################################### +# +# .. note:: You can use the very same code as above to produce figures for +# *flat* channel detection. Simply replace the word "noisy" with +# "flat", and replace ``vmin=np.nanmin(limits)`` with +# ``vmax=np.nanmax(limits)``. +# +# You can see the un-altered scores for each channel and time segment in the +# left subplots, and thresholded scores – those which exceeded a certain limit +# of noisiness – in the right subplots. While the right subplot is entirely +# white for the magnetometers, we can see a horizontal line extending all the +# way from left to right for the gradiometers. This line corresponds to channel +# ``MEG 2443``, which was reported as auto-detected noisy channel in the step +# above. But we can also see another channel exceeding the limits, apparently +# in a more transient fashion. It was therefore *not* detected as bad, because +# the number of segments in which it exceeded the limits was less than 5, +# which MNE-Python uses by default. +# +# .. note:: You can request a different number of segments that must be +# found to be problematic before +# `~mne.preprocessing.find_bad_channels_maxwell` reports them as bad. +# To do this, pass the keyword argument ``min_count`` to the +# function. + +############################################################################### +# Obviously, this algorithm is not perfect. Specifically, on closer inspection +# of the raw data after looking at the diagnostic plots above, it becomes clear +# that the channel exceeding the "noise" limits in some segments without +# qualifying as "bad", in fact contains some flux jumps. There were just not +# *enough* flux jumps in the recording for our automated procedure to report +# the channel as bad. So it can still be useful to manually inspect and mark +# bad channels. The channel in question is ``MEG 2313``. Let's mark it as bad: raw.info['bads'] += ['MEG 2313'] # from manual inspection diff --git a/tutorials/source-modeling/plot_beamformer_lcmv.py b/tutorials/source-modeling/plot_beamformer_lcmv.py index 6b3ad76578f..6ce86b0208b 100644 --- a/tutorials/source-modeling/plot_beamformer_lcmv.py +++ b/tutorials/source-modeling/plot_beamformer_lcmv.py @@ -9,6 +9,7 @@ .. contents:: Page contents :local: :depth: 2 + """ # Author: Britta Westner # diff --git a/tutorials/source-modeling/plot_fix_bem_in_blender.py b/tutorials/source-modeling/plot_fix_bem_in_blender.py new file mode 100644 index 00000000000..61e9bc483e5 --- /dev/null +++ b/tutorials/source-modeling/plot_fix_bem_in_blender.py @@ -0,0 +1,137 @@ +""" +Editing BEM surfaces in Blender +=============================== + +Sometimes when creating a BEM model the surfaces need manual correction because +of a series of problems that can arise (e.g. intersection between surfaces). +Here, we will see how this can be achieved by exporting the surfaces to the 3D +modeling program `Blender `_, editing them, and +re-importing them. + +This tutorial is based on https://github.com/ezemikulan/blender_freesurfer by +Ezequiel Mikulan. +""" + +# Authors: Marijn van Vliet +# Ezequiel Mikulan +# +# License: BSD (3-clause) + +# sphinx_gallery_thumbnail_path = '_static/blender_import_obj/blender_import_obj2.jpg' # noqa + +import os +import os.path as op +import shutil +import mne + +data_path = mne.datasets.sample.data_path() +subjects_dir = op.join(data_path, 'subjects') +bem_dir = op.join(subjects_dir, 'sample', 'bem') + +############################################################################### +# Exporting surfaces to Blender +# ----------------------------- +# +# In this tutorial, we are working with the MNE-Sample set, for which the +# surfaces have no issues. To demonstrate how to fix problematic surfaces, we +# are going to manually place one of the inner-skull vertices outside the +# outer-skill mesh. +# +# We then convert the surfaces to `.obj +# `_ files and create a new +# folder called ``conv`` inside the FreeSurfer subject folder to keep them in. + +# Put the converted surfaces in a separate 'conv' folder +conv_dir = op.join(subjects_dir, 'sample', 'conv') +os.makedirs(conv_dir, exist_ok=True) + +# Load the inner skull surface and create a problem +coords, faces = mne.read_surface(op.join(bem_dir, 'inner_skull.surf')) +coords[0] *= 1.1 # Move the first vertex outside the skull + +# Write the inner skull surface as an .obj file that can be imported by +# Blender. +mne.write_surface(op.join(conv_dir, 'inner_skull.obj'), coords, faces, + overwrite=True) + +# Also convert the outer skull surface. +coords, faces = mne.read_surface(op.join(bem_dir, 'outer_skull.surf')) +mne.write_surface(op.join(conv_dir, 'outer_skull.obj'), coords, faces, + overwrite=True) + +############################################################################### +# Editing in Blender +# ------------------ +# +# We can now open Blender and import the surfaces. Go to *File > Import > +# Wavefront (.obj)*. Navigate to the ``conv`` folder and select the file you +# want to import. Make sure to select the *Keep Vert Order* option. You can +# also select the *Y Forward* option to load the axes in the correct direction +# (RAS): +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj1.jpg +# :width: 800 +# :alt: Importing .obj files in Blender +# +# For convenience, you can save these settings by pressing the ``+`` button +# next to *Operator Presets*. +# +# Repeat the procedure for all surfaces you want to import (e.g. inner_skull +# and outer_skull). +# +# You can now edit the surfaces any way you like. See the +# `Beginner Blender Tutorial Series +# `_ +# to learn how to use Blender. Specifically, `part 2 +# `_ will teach you how to +# use the basic editing tools you need to fix the surface. +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj2.jpg +# :width: 800 +# :alt: Editing surfaces in Blender +# +# Using the fixed surfaces in MNE-Python +# -------------------------------------- +# +# In Blender, you can export a surface as an .obj file by selecting it and go +# to *File > Export > Wavefront (.obj)*. You need to again select the *Y +# Forward* option and check the *Keep Vertex Order* box. +# +# .. image:: ../../_static/blender_import_obj/blender_import_obj3.jpg +# :width: 200 +# :alt: Exporting .obj files in Blender +# +# +# Each surface needs to be exported as a separate file. We recommend saving +# them in the ``conv`` folder and ending the file name with ``_fixed.obj``, +# although this is not strictly necessary. +# +# In order to be able to run this tutorial script top to bottom, we here +# simulate the edits you did manually in Blender using Python code: + +coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull.obj')) +coords[0] /= 1.1 # Move the first vertex back inside the skull +mne.write_surface(op.join(conv_dir, 'inner_skull_fixed.obj'), coords, faces, + overwrite=True) + +############################################################################### +# Back in Python, you can read the fixed .obj files and save them as +# FreeSurfer .surf files. For the :func:`mne.make_bem_model` function to find +# them, they need to be saved using their original names in the ``surf`` +# folder, e.g. ``surf/inner_skull.surf``. Be sure to first backup the original +# surfaces in case you make a mistake! + +# Read the fixed surface +coords, faces = mne.read_surface(op.join(conv_dir, 'inner_skull_fixed.obj')) + +# Backup the original surface +shutil.copy(op.join(bem_dir, 'inner_skull.surf'), + op.join(bem_dir, 'inner_skull_orig.surf')) + +# Overwrite the original surface with the fixed version +mne.write_surface(op.join(bem_dir, 'inner_skull.surf'), coords, faces, + overwrite=True) + +############################################################################### +# That's it! You are ready to continue with your analysis pipeline (e.g. +# running :func:`mne.make_bem_model`).