From 432249ee6304b07a8ecece2b310a65c594c59ae9 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 6 Dec 2023 12:53:49 -0500 Subject: [PATCH 01/37] ENH: Add polyphase resampling (#12268) Co-authored-by: Daniel McCloy --- doc/Makefile | 2 +- doc/changes/devel.rst | 1 + ...dataset_sgskip.py => spm_faces_dataset.py} | 107 +++-------- examples/decoding/receptive_field_mtrf.py | 11 +- .../source_power_spectrum_opm.py | 8 +- mne/cuda.py | 2 +- mne/filter.py | 166 ++++++++++++------ mne/io/base.py | 22 ++- mne/io/fiff/tests/test_raw_fiff.py | 66 +++---- mne/source_estimate.py | 27 ++- mne/tests/test_filter.py | 39 ++-- mne/tests/test_source_estimate.py | 116 ++++++------ mne/utils/docs.py | 58 ++++-- .../preprocessing/30_filtering_resampling.py | 53 +++++- 14 files changed, 409 insertions(+), 269 deletions(-) rename examples/datasets/{spm_faces_dataset_sgskip.py => spm_faces_dataset.py} (60%) diff --git a/doc/Makefile b/doc/Makefile index 70d7429f4ad..3c251069045 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -76,6 +76,6 @@ doctest: "results in _build/doctest/output.txt." view: - @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/_build/html/index.html')" + @python -c "import webbrowser; webbrowser.open_new_tab('file://$(PWD)/_build/html/sg_execution_times.html')" show: view diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index eaf1cb881ad..fadd872e621 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -36,6 +36,7 @@ Enhancements ~~~~~~~~~~~~ - Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python`` (:gh:`12218` by :newcontrib:`Florian Hofer`) - We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) +- Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) Bugs ~~~~ diff --git a/examples/datasets/spm_faces_dataset_sgskip.py b/examples/datasets/spm_faces_dataset.py similarity index 60% rename from examples/datasets/spm_faces_dataset_sgskip.py rename to examples/datasets/spm_faces_dataset.py index 1357fc513b6..32df7d1a9ed 100644 --- a/examples/datasets/spm_faces_dataset_sgskip.py +++ b/examples/datasets/spm_faces_dataset.py @@ -5,15 +5,8 @@ From raw data to dSPM on SPM Faces dataset ========================================== -Runs a full pipeline using MNE-Python: - - - artifact removal - - averaging Epochs - - forward model computation - - source reconstruction using dSPM on the contrast : "faces - scrambled" - -.. note:: This example does quite a bit of processing, so even on a - fast machine it can take several minutes to complete. +Runs a full pipeline using MNE-Python. This example does quite a bit of processing, so +even on a fast machine it can take several minutes to complete. """ # Authors: Alexandre Gramfort # Denis Engemann @@ -21,12 +14,6 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -# %% - -# sphinx_gallery_thumbnail_number = 10 - -import matplotlib.pyplot as plt - import mne from mne import combine_evoked, io from mne.datasets import spm_face @@ -40,109 +27,72 @@ spm_path = data_path / "MEG" / "spm" # %% -# Load and filter data, set up epochs +# Load data, filter it, and fit ICA. raw_fname = spm_path / "SPM_CTF_MEG_example_faces1_3D.ds" - raw = io.read_raw_ctf(raw_fname, preload=True) # Take first run # Here to save memory and time we'll downsample heavily -- this is not # advised for real data as it can effectively jitter events! -raw.resample(120.0, npad="auto") - -picks = mne.pick_types(raw.info, meg=True, exclude="bads") -raw.filter(1, 30, method="fir", fir_design="firwin") +raw.resample(100) +raw.filter(1.0, None) # high-pass +reject = dict(mag=5e-12) +ica = ICA(n_components=0.95, max_iter="auto", random_state=0) +ica.fit(raw, reject=reject) +# compute correlation scores, get bad indices sorted by score +eog_epochs = create_eog_epochs(raw, ch_name="MRT31-2908", reject=reject) +eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name="MRT31-2908") +ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on +ica.plot_components(eog_inds) # view topographic sensitivity of components +ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar +ica.plot_overlay(eog_epochs.average()) # inspect artifact removal +# %% +# Epoch data and apply ICA. events = mne.find_events(raw, stim_channel="UPPT001") - -# plot the events to get an idea of the paradigm -mne.viz.plot_events(events, raw.info["sfreq"]) - event_ids = {"faces": 1, "scrambled": 2} - tmin, tmax = -0.2, 0.6 -baseline = None # no baseline as high-pass is applied -reject = dict(mag=5e-12) - epochs = mne.Epochs( raw, events, event_ids, tmin, tmax, - picks=picks, - baseline=baseline, + picks="meg", + baseline=None, preload=True, reject=reject, ) - -# Fit ICA, find and remove major artifacts -ica = ICA(n_components=0.95, max_iter="auto", random_state=0) -ica.fit(raw, decim=1, reject=reject) - -# compute correlation scores, get bad indices sorted by score -eog_epochs = create_eog_epochs(raw, ch_name="MRT31-2908", reject=reject) -eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name="MRT31-2908") -ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on -ica.plot_components(eog_inds) # view topographic sensitivity of components -ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar -ica.plot_overlay(eog_epochs.average()) # inspect artifact removal +del raw ica.apply(epochs) # clean data, default in place - evoked = [epochs[k].average() for k in event_ids] - contrast = combine_evoked(evoked, weights=[-1, 1]) # Faces - scrambled - evoked.append(contrast) - for e in evoked: e.plot(ylim=dict(mag=[-400, 400])) -plt.show() - -# estimate noise covarariance -noise_cov = mne.compute_covariance(epochs, tmax=0, method="shrunk", rank=None) - # %% -# Visualize fields on MEG helmet - -# The transformation here was aligned using the dig-montage. It's included in -# the spm_faces dataset and is named SPM_dig_montage.fif. -trans_fname = spm_path / "SPM_CTF_MEG_example_faces1_3D_raw-trans.fif" - -maps = mne.make_field_map( - evoked[0], trans_fname, subject="spm", subjects_dir=subjects_dir, n_jobs=None -) - -evoked[0].plot_field(maps, time=0.170, time_viewer=False) - -# %% -# Look at the whitened evoked daat +# Estimate noise covariance and look at the whitened evoked data +noise_cov = mne.compute_covariance(epochs, tmax=0, method="shrunk", rank=None) evoked[0].plot_white(noise_cov) # %% # Compute forward model +trans_fname = spm_path / "SPM_CTF_MEG_example_faces1_3D_raw-trans.fif" src = subjects_dir / "spm" / "bem" / "spm-oct-6-src.fif" bem = subjects_dir / "spm" / "bem" / "spm-5120-5120-5120-bem-sol.fif" forward = mne.make_forward_solution(contrast.info, trans_fname, src, bem) # %% -# Compute inverse solution +# Compute inverse solution and plot + +# sphinx_gallery_thumbnail_number = 8 snr = 3.0 lambda2 = 1.0 / snr**2 -method = "dSPM" - -inverse_operator = make_inverse_operator( - contrast.info, forward, noise_cov, loose=0.2, depth=0.8 -) - -# Compute inverse solution on contrast -stc = apply_inverse(contrast, inverse_operator, lambda2, method, pick_ori=None) -# stc.save('spm_%s_dSPM_inverse' % contrast.comment) - -# Plot contrast in 3D with mne.viz.Brain if available +inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov) +stc = apply_inverse(contrast, inverse_operator, lambda2, method="dSPM", pick_ori=None) brain = stc.plot( hemi="both", subjects_dir=subjects_dir, @@ -150,4 +100,3 @@ views=["ven"], clim={"kind": "value", "lims": [3.0, 6.0, 9.0]}, ) -# brain.save_image('dSPM_map.png') diff --git a/examples/decoding/receptive_field_mtrf.py b/examples/decoding/receptive_field_mtrf.py index 24b459f192f..8dc04630753 100644 --- a/examples/decoding/receptive_field_mtrf.py +++ b/examples/decoding/receptive_field_mtrf.py @@ -17,7 +17,7 @@ .. _figure 1: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F1 .. _figure 2: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F2 .. _figure 5: https://www.frontiersin.org/articles/10.3389/fnhum.2016.00604/full#F5 -""" # noqa: E501 +""" # Authors: Chris Holdgraf # Eric Larson @@ -26,9 +26,6 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -# %% -# sphinx_gallery_thumbnail_number = 3 - from os.path import join import matplotlib.pyplot as plt @@ -58,8 +55,8 @@ speech = data["envelope"].T sfreq = float(data["Fs"].item()) sfreq /= decim -speech = mne.filter.resample(speech, down=decim, npad="auto") -raw = mne.filter.resample(raw, down=decim, npad="auto") +speech = mne.filter.resample(speech, down=decim, method="polyphase") +raw = mne.filter.resample(raw, down=decim, method="polyphase") # Read in channel positions and create our MNE objects from the raw data montage = mne.channels.make_standard_montage("biosemi128") @@ -131,6 +128,8 @@ # across the scalp. We will recreate `figure 1`_ and `figure 2`_ from # :footcite:`CrosseEtAl2016`. +# sphinx_gallery_thumbnail_number = 3 + # Print mean coefficients across all time delays / channels (see Fig 1) time_plot = 0.180 # For highlighting a specific time. fig, ax = plt.subplots(figsize=(4, 8), layout="constrained") diff --git a/examples/time_frequency/source_power_spectrum_opm.py b/examples/time_frequency/source_power_spectrum_opm.py index dd142138784..11168cc08a5 100644 --- a/examples/time_frequency/source_power_spectrum_opm.py +++ b/examples/time_frequency/source_power_spectrum_opm.py @@ -58,16 +58,16 @@ raw_erms = dict() new_sfreq = 60.0 # Nyquist frequency (30 Hz) < line noise freq (50 Hz) raws["vv"] = mne.io.read_raw_fif(vv_fname, verbose="error") # ignore naming -raws["vv"].load_data().resample(new_sfreq) +raws["vv"].load_data().resample(new_sfreq, method="polyphase") raws["vv"].info["bads"] = ["MEG2233", "MEG1842"] raw_erms["vv"] = mne.io.read_raw_fif(vv_erm_fname, verbose="error") -raw_erms["vv"].load_data().resample(new_sfreq) +raw_erms["vv"].load_data().resample(new_sfreq, method="polyphase") raw_erms["vv"].info["bads"] = ["MEG2233", "MEG1842"] raws["opm"] = mne.io.read_raw_fif(opm_fname) -raws["opm"].load_data().resample(new_sfreq) +raws["opm"].load_data().resample(new_sfreq, method="polyphase") raw_erms["opm"] = mne.io.read_raw_fif(opm_erm_fname) -raw_erms["opm"].load_data().resample(new_sfreq) +raw_erms["opm"].load_data().resample(new_sfreq, method="polyphase") # Make sure our assumptions later hold assert raws["opm"].info["sfreq"] == raws["vv"].info["sfreq"] diff --git a/mne/cuda.py b/mne/cuda.py index b4aa7c37bf3..7d7634a6e4e 100644 --- a/mne/cuda.py +++ b/mne/cuda.py @@ -330,7 +330,7 @@ def _fft_resample(x, new_len, npads, to_removes, cuda_dict=None, pad="reflect_li Number of samples to remove after resampling. cuda_dict : dict Dictionary constructed using setup_cuda_multiply_repeated(). - %(pad)s + %(pad_resample)s The default is ``'reflect_limited'``. .. versionadded:: 0.15 diff --git a/mne/filter.py b/mne/filter.py index 528128822b8..3d9b3ecc7da 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -5,6 +5,7 @@ from collections import Counter from copy import deepcopy from functools import partial +from math import gcd import numpy as np from scipy import fft, signal @@ -1898,12 +1899,13 @@ def resample( x, up=1.0, down=1.0, - npad=100, + *, axis=-1, - window="boxcar", + window="auto", n_jobs=None, - pad="reflect_limited", - *, + pad="auto", + npad=100, + method="fft", verbose=None, ): """Resample an array. @@ -1918,15 +1920,18 @@ def resample( Factor to upsample by. down : float Factor to downsample by. - %(npad)s axis : int Axis along which to resample (default is the last axis). %(window_resample)s %(n_jobs_cuda)s - %(pad)s - The default is ``'reflect_limited'``. + ``n_jobs='cuda'`` is only supported when ``method="fft"``. + %(pad_resample_auto)s .. versionadded:: 0.15 + %(npad_resample)s + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -1936,26 +1941,16 @@ def resample( Notes ----- - This uses (hopefully) intelligent edge padding and frequency-domain - windowing improve scipy.signal.resample's resampling method, which + When using ``method="fft"`` (default), + this uses (hopefully) intelligent edge padding and frequency-domain + windowing improve :func:`scipy.signal.resample`'s resampling method, which we have adapted for our use here. Choices of npad and window have important consequences, and the default choices should work well for most natural signals. - - Resampling arguments are broken into "up" and "down" components for future - compatibility in case we decide to use an upfirdn implementation. The - current implementation is functionally equivalent to passing - up=up/down and down=1. """ - # check explicitly for backwards compatibility - if not isinstance(axis, int): - err = ( - "The axis parameter needs to be an integer (got %s). " - "The axis parameter was missing from this function for a " - "period of time, you might be intending to specify the " - "subsequent window parameter." % repr(axis) - ) - raise TypeError(err) + _validate_type(method, str, "method") + _validate_type(pad, str, "pad") + _check_option("method", method, ("fft", "polyphase")) # make sure our arithmetic will work x = _check_filterable(x, "resampled", "resample") @@ -1963,31 +1958,88 @@ def resample( del up, down if axis < 0: axis = x.ndim + axis - orig_last_axis = x.ndim - 1 - if axis != orig_last_axis: - x = x.swapaxes(axis, orig_last_axis) - orig_shape = x.shape - x_len = orig_shape[-1] - if x_len == 0: - warn("x has zero length along last axis, returning a copy of x") + if x.shape[axis] == 0: + warn(f"x has zero length along axis={axis}, returning a copy of x") return x.copy() - bad_msg = 'npad must be "auto" or an integer' + + # prep for resampling along the last axis (swap axis with last then reshape) + out_shape = list(x.shape) + out_shape.pop(axis) + out_shape.append(final_len) + x = np.atleast_2d(x.swapaxes(axis, -1).reshape((-1, x.shape[axis]))) + + # do the resampling using FFT or polyphase methods + kwargs = dict(pad=pad, window=window, n_jobs=n_jobs) + if method == "fft": + y = _resample_fft(x, npad=npad, ratio=ratio, final_len=final_len, **kwargs) + else: + up, down, kwargs["window"] = _prep_polyphase( + ratio, x.shape[-1], final_len, window + ) + half_len = len(window) // 2 + logger.info( + f"Polyphase resampling locality: ±{half_len} input sample{_pl(half_len)}" + ) + y = _resample_polyphase(x, up=up, down=down, **kwargs) + assert y.shape[-1] == final_len + + # restore dimensions (reshape then swap axis with last) + y = y.reshape(out_shape).swapaxes(axis, -1) + + return y + + +def _prep_polyphase(ratio, x_len, final_len, window): + if isinstance(window, str) and window == "auto": + window = ("kaiser", 5.0) # SciPy default + up = final_len + down = x_len + g_ = gcd(up, down) + up = up // g_ + down = down // g_ + # Figure out our signal locality and design window (adapted from SciPy) + if not isinstance(window, (list, np.ndarray)): + # Design a linear-phase low-pass FIR filter + max_rate = max(up, down) + f_c = 1.0 / max_rate # cutoff of FIR filter (rel. to Nyquist) + half_len = 10 * max_rate # reasonable cutoff for sinc-like function + window = signal.firwin(2 * half_len + 1, f_c, window=window) + return up, down, window + + +def _resample_polyphase(x, *, up, down, pad, window, n_jobs): + if pad == "auto": + pad = "reflect" + kwargs = dict(padtype=pad, window=window, up=up, down=down) + _validate_type( + n_jobs, (None, "int-like"), "n_jobs", extra="when method='polyphase'" + ) + parallel, p_fun, n_jobs = parallel_func(signal.resample_poly, n_jobs) + if n_jobs == 1: + y = signal.resample_poly(x, axis=-1, **kwargs) + else: + y = np.array(parallel(p_fun(x_, **kwargs) for x_ in x)) + return y + + +def _resample_fft(x_flat, *, ratio, final_len, pad, window, npad, n_jobs): + x_len = x_flat.shape[-1] + pad = "reflect_limited" if pad == "auto" else pad + if (isinstance(window, str) and window == "auto") or window is None: + window = "boxcar" if isinstance(npad, str): - if npad != "auto": - raise ValueError(bad_msg) + _check_option("npad", npad, ("auto",), extra="when a string") # Figure out reasonable pad that gets us to a power of 2 min_add = min(x_len // 8, 100) * 2 npad = 2 ** int(np.ceil(np.log2(x_len + min_add))) - x_len npad, extra = divmod(npad, 2) npads = np.array([npad, npad + extra], int) else: - if npad != int(npad): - raise ValueError(bad_msg) + npad = _ensure_int(npad, "npad", extra="or 'auto'") npads = np.array([npad, npad], int) del npad # prep for resampling now - x_flat = x.reshape((-1, x_len)) orig_len = x_len + npads.sum() # length after padding new_len = max(int(round(ratio * orig_len)), 1) # length after resampling to_removes = [int(round(ratio * npads[0]))] @@ -1997,15 +2049,12 @@ def resample( # assert np.abs(to_removes[1] - to_removes[0]) <= int(np.ceil(ratio)) # figure out windowing function - if window is not None: - if callable(window): - W = window(fft.fftfreq(orig_len)) - elif isinstance(window, np.ndarray) and window.shape == (orig_len,): - W = window - else: - W = fft.ifftshift(signal.get_window(window, orig_len)) + if callable(window): + W = window(fft.fftfreq(orig_len)) + elif isinstance(window, np.ndarray) and window.shape == (orig_len,): + W = window else: - W = np.ones(orig_len) + W = fft.ifftshift(signal.get_window(window, orig_len)) W *= float(new_len) / float(orig_len) # figure out if we should use CUDA @@ -2015,7 +2064,7 @@ def resample( # use of the 'flat' window is recommended for minimal ringing parallel, p_fun, n_jobs = parallel_func(_fft_resample, n_jobs) if n_jobs == 1: - y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x.dtype) + y = np.zeros((len(x_flat), new_len - to_removes.sum()), dtype=x_flat.dtype) for xi, x_ in enumerate(x_flat): y[xi] = _fft_resample(x_, new_len, npads, to_removes, cuda_dict, pad) else: @@ -2024,12 +2073,6 @@ def resample( ) y = np.array(y) - # Restore the original array shape (modified for resampling) - y.shape = orig_shape[:-1] + (y.shape[1],) - if axis != orig_last_axis: - y = y.swapaxes(axis, orig_last_axis) - assert y.shape[axis] == final_len - return y @@ -2635,11 +2678,12 @@ def filter( def resample( self, sfreq, + *, npad="auto", - window="boxcar", + window="auto", n_jobs=None, pad="edge", - *, + method="fft", verbose=None, ): """Resample data. @@ -2656,11 +2700,12 @@ def resample( %(npad)s %(window_resample)s %(n_jobs_cuda)s - %(pad)s - The default is ``'edge'``, which pads with the edge values of each - vector. + %(pad_resample)s .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -2691,7 +2736,14 @@ def resample( _check_preload(self, "inst.resample") self._data = resample( - self._data, sfreq, o_sfreq, npad, window=window, n_jobs=n_jobs, pad=pad + self._data, + sfreq, + o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, ) lowpass = self.info.get("lowpass") lowpass = np.inf if lowpass is None else lowpass diff --git a/mne/io/base.py b/mne/io/base.py index 95ba7038865..652b747a8ac 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -1260,12 +1260,14 @@ def notch_filter( def resample( self, sfreq, + *, npad="auto", - window="boxcar", + window="auto", stim_picks=None, n_jobs=None, events=None, - pad="reflect_limited", + pad="auto", + method="fft", verbose=None, ): """Resample all channels. @@ -1294,7 +1296,7 @@ def resample( ---------- sfreq : float New sample rate to use. - %(npad)s + %(npad_resample)s %(window_resample)s stim_picks : list of int | None Stim channels. These channels are simply subsampled or @@ -1307,10 +1309,12 @@ def resample( An optional event matrix. When specified, the onsets of the events are resampled jointly with the data. NB: The input events are not modified, but a new array is returned with the raw instead. - %(pad)s - The default is ``'reflect_limited'``. + %(pad_resample_auto)s .. versionadded:: 0.15 + %(method_resample)s + + .. versionadded:: 1.7 %(verbose)s Returns @@ -1364,7 +1368,13 @@ def resample( ) kwargs = dict( - up=sfreq, down=o_sfreq, npad=npad, window=window, n_jobs=n_jobs, pad=pad + up=sfreq, + down=o_sfreq, + npad=npad, + window=window, + n_jobs=n_jobs, + pad=pad, + method=method, ) ratio, n_news = zip( *( diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 5c760735800..2c302eac3ad 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -42,6 +42,7 @@ _record_warnings, assert_and_remove_boundary_annot, assert_object_equal, + catch_logging, requires_mne, run_subprocess, ) @@ -1290,23 +1291,28 @@ def test_resample_equiv(): @pytest.mark.slowtest @testing.requires_testing_data @pytest.mark.parametrize( - "preload, n, npad", + "preload, n, npad, method", [ - (True, 512, "auto"), - (False, 512, 0), + (True, 512, "auto", "fft"), + (True, 512, "auto", "polyphase"), + (False, 512, 0, "fft"), # only test one with non-preload because it's slow ], ) -def test_resample(tmp_path, preload, n, npad): +def test_resample(tmp_path, preload, n, npad, method): """Test resample (with I/O and multiple files).""" + kwargs = dict(npad=npad, method=method) raw = read_raw_fif(fif_fname) raw.crop(0, raw.times[n - 1]) + # Reduce to a few MEG channels and a few stim channels to speed up + n_meg = 5 + raw.pick(raw.ch_names[:n_meg] + raw.ch_names[312:320]) # 10 MEG + 3 STIM + 5 EEG assert len(raw.times) == n if preload: raw.load_data() raw_resamp = raw.copy() sfreq = raw.info["sfreq"] # test parallel on upsample - raw_resamp.resample(sfreq * 2, n_jobs=2, npad=npad) + raw_resamp.resample(sfreq * 2, n_jobs=2, **kwargs) assert raw_resamp.n_times == len(raw_resamp.times) raw_resamp.save(tmp_path / "raw_resamp-raw.fif") raw_resamp = read_raw_fif(tmp_path / "raw_resamp-raw.fif", preload=True) @@ -1315,7 +1321,13 @@ def test_resample(tmp_path, preload, n, npad): assert raw_resamp.get_data().shape[1] == raw_resamp.n_times assert raw.get_data().shape[0] == raw_resamp._data.shape[0] # test non-parallel on downsample - raw_resamp.resample(sfreq, n_jobs=None, npad=npad) + with catch_logging() as log: + raw_resamp.resample(sfreq, n_jobs=None, verbose=True, **kwargs) + log = log.getvalue() + if method == "fft": + assert "locality" not in log + else: + assert "locality" in log assert raw_resamp.info["sfreq"] == sfreq assert raw.get_data().shape == raw_resamp._data.shape assert raw.first_samp == raw_resamp.first_samp @@ -1324,18 +1336,12 @@ def test_resample(tmp_path, preload, n, npad): # works (hooray). Note that the stim channels had to be sub-sampled # without filtering to be accurately preserved # note we have to treat MEG and EEG+STIM channels differently (tols) - assert_allclose( - raw.get_data()[:306, 200:-200], - raw_resamp._data[:306, 200:-200], - rtol=1e-2, - atol=1e-12, - ) - assert_allclose( - raw.get_data()[306:, 200:-200], - raw_resamp._data[306:, 200:-200], - rtol=1e-2, - atol=1e-7, - ) + want_meg = raw.get_data()[:n_meg, 200:-200] + got_meg = raw_resamp._data[:n_meg, 200:-200] + want_non_meg = raw.get_data()[n_meg:, 200:-200] + got_non_meg = raw_resamp._data[n_meg:, 200:-200] + assert_allclose(got_meg, want_meg, rtol=1e-2, atol=1e-12) + assert_allclose(want_non_meg, got_non_meg, rtol=1e-2, atol=1e-7) # now check multiple file support w/resampling, as order of operations # (concat, resample) should not affect our data @@ -1344,9 +1350,9 @@ def test_resample(tmp_path, preload, n, npad): raw3 = raw.copy() raw4 = raw.copy() raw1 = concatenate_raws([raw1, raw2]) - raw1.resample(10.0, npad=npad) - raw3.resample(10.0, npad=npad) - raw4.resample(10.0, npad=npad) + raw1.resample(10.0, **kwargs) + raw3.resample(10.0, **kwargs) + raw4.resample(10.0, **kwargs) raw3 = concatenate_raws([raw3, raw4]) assert_array_equal(raw1._data, raw3._data) assert_array_equal(raw1._first_samps, raw3._first_samps) @@ -1364,12 +1370,12 @@ def test_resample(tmp_path, preload, n, npad): # basic decimation stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ["stim"])) - assert_allclose(raw.resample(8.0, npad=npad)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) + assert_allclose(raw.resample(8.0, **kwargs)._data, [[1, 1, 0, 0, 1, 1, 0, 0]]) # decimation of multiple stim channels raw = RawArray(2 * [stim], create_info(2, len(stim), 2 * ["stim"])) assert_allclose( - raw.resample(8.0, npad=npad, verbose="error")._data, + raw.resample(8.0, **kwargs, verbose="error")._data, [[1, 1, 0, 0, 1, 1, 0, 0], [1, 1, 0, 0, 1, 1, 0, 0]], ) @@ -1377,19 +1383,19 @@ def test_resample(tmp_path, preload, n, npad): # done naively stim = [0, 0, 0, 1, 1, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ["stim"])) - assert_allclose(raw.resample(4.0, npad=npad)._data, [[0, 1, 1, 0]]) + assert_allclose(raw.resample(4.0, **kwargs)._data, [[0, 1, 1, 0]]) # two events are merged in this case (warning) stim = [0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ["stim"])) with pytest.warns(RuntimeWarning, match="become unreliable"): - raw.resample(8.0, npad=npad) + raw.resample(8.0, **kwargs) # events are dropped in this case (warning) stim = [0, 1, 1, 0, 0, 1, 1, 0] raw = RawArray([stim], create_info(1, len(stim), ["stim"])) with pytest.warns(RuntimeWarning, match="become unreliable"): - raw.resample(4.0, npad=npad) + raw.resample(4.0, **kwargs) # test resampling events: this should no longer give a warning # we often have first_samp != 0, include it here too @@ -1400,7 +1406,7 @@ def test_resample(tmp_path, preload, n, npad): first_samp = len(stim) // 2 raw = RawArray([stim], create_info(1, o_sfreq, ["stim"]), first_samp=first_samp) events = find_events(raw) - raw, events = raw.resample(n_sfreq, events=events, npad=npad) + raw, events = raw.resample(n_sfreq, events=events, **kwargs) # Try index into raw.times with resampled events: raw.times[events[:, 0] - raw.first_samp] n_fsamp = int(first_samp * sfreq_ratio) # how it's calc'd in base.py @@ -1425,16 +1431,16 @@ def test_resample(tmp_path, preload, n, npad): # test copy flag stim = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0] raw = RawArray([stim], create_info(1, len(stim), ["stim"])) - raw_resampled = raw.copy().resample(4.0, npad=npad) + raw_resampled = raw.copy().resample(4.0, **kwargs) assert raw_resampled is not raw - raw_resampled = raw.resample(4.0, npad=npad) + raw_resampled = raw.resample(4.0, **kwargs) assert raw_resampled is raw # resample should still work even when no stim channel is present raw = RawArray(np.random.randn(1, 100), create_info(1, 100, ["eeg"])) with raw.info._unlock(): raw.info["lowpass"] = 50.0 - raw.resample(10, npad=npad) + raw.resample(10, **kwargs) assert raw.info["lowpass"] == 5.0 assert len(raw) == 10 diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 213d00e5baa..50734817431 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -819,7 +819,17 @@ def crop(self, tmin=None, tmax=None, include_tmax=True): return self # return self for chaining methods @verbose - def resample(self, sfreq, npad="auto", window="boxcar", n_jobs=None, verbose=None): + def resample( + self, + sfreq, + *, + npad=100, + method="fft", + window="auto", + pad="auto", + n_jobs=None, + verbose=None, + ): """Resample data. If appropriate, an anti-aliasing filter is applied before resampling. @@ -833,8 +843,15 @@ def resample(self, sfreq, npad="auto", window="boxcar", n_jobs=None, verbose=Non Amount to pad the start and end of the data. Can also be "auto" to use a padding that will result in a power-of-two size (can be much faster). - window : str | tuple - Window to use in resampling. See :func:`scipy.signal.resample`. + %(method_resample)s + + .. versionadded:: 1.7 + %(window_resample)s + + .. versionadded:: 1.7 + %(pad_resample_auto)s + + .. versionadded:: 1.7 %(n_jobs)s %(verbose)s @@ -863,7 +880,9 @@ def resample(self, sfreq, npad="auto", window="boxcar", n_jobs=None, verbose=Non data = self.data if data.dtype == np.float32: data = data.astype(np.float64) - self.data = resample(data, sfreq, o_sfreq, npad, n_jobs=n_jobs) + self.data = resample( + data, sfreq, o_sfreq, npad=npad, window=window, n_jobs=n_jobs, method=method + ) # adjust indirectly affected variables self.tstep = 1.0 / sfreq diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index 110a8f136c3..3ab60dba055 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -32,6 +32,8 @@ from mne.io import RawArray, read_raw_fif from mne.utils import catch_logging, requires_mne, run_subprocess, sum_squared +resample_method_parametrize = pytest.mark.parametrize("method", ("fft", "polyphase")) + def test_filter_array(): """Test filtering an array.""" @@ -372,20 +374,27 @@ def test_notch_filters(method, filter_length, line_freq, tol): assert_almost_equal(new_power, orig_power, tol) -def test_resample(): +@resample_method_parametrize +def test_resample(method): """Test resampling.""" rng = np.random.RandomState(0) x = rng.normal(0, 1, (10, 10, 10)) - x_rs = resample(x, 1, 2, 10) + with catch_logging() as log: + x_rs = resample(x, 1, 2, npad=10, method=method, verbose=True) + log = log.getvalue() + if method == "fft": + assert "locality" not in log + else: + assert "locality" in log assert x.shape == (10, 10, 10) assert x_rs.shape == (10, 10, 5) x_2 = x.swapaxes(0, 1) - x_2_rs = resample(x_2, 1, 2, 10) + x_2_rs = resample(x_2, 1, 2, npad=10, method=method) assert_array_equal(x_2_rs.swapaxes(0, 1), x_rs) x_3 = x.swapaxes(0, 2) - x_3_rs = resample(x_3, 1, 2, 10, 0) + x_3_rs = resample(x_3, 1, 2, npad=10, axis=0, method=method) assert_array_equal(x_3_rs.swapaxes(0, 2), x_rs) # make sure we cast to array if necessary @@ -401,12 +410,12 @@ def test_resample_scipy(): err_msg = "%s: %s" % (N, window) x_2_sp = sp_resample(x, 2 * N, window=window) for n_jobs in n_jobs_test: - x_2 = resample(x, 2, 1, 0, window=window, n_jobs=n_jobs) + x_2 = resample(x, 2, 1, npad=0, window=window, n_jobs=n_jobs) assert_allclose(x_2, x_2_sp, atol=1e-12, err_msg=err_msg) new_len = int(round(len(x) * (1.0 / 2.0))) x_p5_sp = sp_resample(x, new_len, window=window) for n_jobs in n_jobs_test: - x_p5 = resample(x, 1, 2, 0, window=window, n_jobs=n_jobs) + x_p5 = resample(x, 1, 2, npad=0, window=window, n_jobs=n_jobs) assert_allclose(x_p5, x_p5_sp, atol=1e-12, err_msg=err_msg) @@ -450,23 +459,25 @@ def test_resamp_stim_channel(): assert new_data.shape[1] == new_data_len -def test_resample_raw(): +@resample_method_parametrize +def test_resample_raw(method): """Test resampling using RawArray.""" x = np.zeros((1, 1001)) sfreq = 2048.0 raw = RawArray(x, create_info(1, sfreq, "eeg")) - raw.resample(128, npad=10) + raw.resample(128, npad=10, method=method) data = raw.get_data() assert data.shape == (1, 63) -def test_resample_below_1_sample(): +@resample_method_parametrize +def test_resample_below_1_sample(method): """Test resampling doesn't yield datapoints.""" # Raw x = np.zeros((1, 100)) sfreq = 1000.0 raw = RawArray(x, create_info(1, sfreq, "eeg")) - raw.resample(5) + raw.resample(5, method=method) assert len(raw.times) == 1 assert raw.get_data().shape[1] == 1 @@ -487,7 +498,13 @@ def test_resample_below_1_sample(): preload=True, verbose=False, ) - epochs.resample(1) + with catch_logging() as log: + epochs.resample(1, method=method, verbose=True) + log = log.getvalue() + if method == "fft": + assert "locality" not in log + else: + assert "locality" in log assert len(epochs.times) == 1 assert epochs.get_data(copy=False).shape[2] == 1 diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index 8c9e7df9389..be31fd1501b 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -558,61 +558,73 @@ def test_stc_arithmetic(): @pytest.mark.slowtest @testing.requires_testing_data -def test_stc_methods(): +@pytest.mark.parametrize("kind", ("scalar", "vector")) +@pytest.mark.parametrize("method", ("fft", "polyphase")) +def test_stc_methods(kind, method): """Test stc methods lh_data, rh_data, bin(), resample().""" - stc_ = read_source_estimate(fname_stc) + stc = read_source_estimate(fname_stc) - # Make a vector version of the above source estimate - x = stc_.data[:, np.newaxis, :] - yz = np.zeros((x.shape[0], 2, x.shape[2])) - vec_stc_ = VectorSourceEstimate( - np.concatenate((x, yz), 1), stc_.vertices, stc_.tmin, stc_.tstep, stc_.subject - ) + if kind == "vector": + # Make a vector version of the above source estimate + x = stc.data[:, np.newaxis, :] + yz = np.zeros((x.shape[0], 2, x.shape[2])) + stc = VectorSourceEstimate( + np.concatenate((x, yz), 1), + stc.vertices, + stc.tmin, + stc.tstep, + stc.subject, + ) - for stc in [stc_, vec_stc_]: - # lh_data / rh_data - assert_array_equal(stc.lh_data, stc.data[: len(stc.lh_vertno)]) - assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno) :]) - - # bin - binned = stc.bin(0.12) - a = np.mean(stc.data[..., : np.searchsorted(stc.times, 0.12)], axis=-1) - assert_array_equal(a, binned.data[..., 0]) - - stc = read_source_estimate(fname_stc) - stc.subject = "sample" - label_lh = read_labels_from_annot( - "sample", "aparc", "lh", subjects_dir=subjects_dir - )[0] - label_rh = read_labels_from_annot( - "sample", "aparc", "rh", subjects_dir=subjects_dir - )[0] - label_both = label_lh + label_rh - for label in (label_lh, label_rh, label_both): - assert isinstance(stc.shape, tuple) and len(stc.shape) == 2 - stc_label = stc.in_label(label) - if label.hemi != "both": - if label.hemi == "lh": - verts = stc_label.vertices[0] - else: # label.hemi == 'rh': - verts = stc_label.vertices[1] - n_vertices_used = len(label.get_vertices_used(verts)) - assert_equal(len(stc_label.data), n_vertices_used) - stc_lh = stc.in_label(label_lh) - pytest.raises(ValueError, stc_lh.in_label, label_rh) - label_lh.subject = "foo" - pytest.raises(RuntimeError, stc.in_label, label_lh) - - stc_new = deepcopy(stc) - o_sfreq = 1.0 / stc.tstep - # note that using no padding for this STC reduces edge ringing... - stc_new.resample(2 * o_sfreq, npad=0) - assert stc_new.data.shape[1] == 2 * stc.data.shape[1] - assert stc_new.tstep == stc.tstep / 2 - stc_new.resample(o_sfreq, npad=0) - assert stc_new.data.shape[1] == stc.data.shape[1] - assert stc_new.tstep == stc.tstep - assert_array_almost_equal(stc_new.data, stc.data, 5) + # lh_data / rh_data + assert_array_equal(stc.lh_data, stc.data[: len(stc.lh_vertno)]) + assert_array_equal(stc.rh_data, stc.data[len(stc.lh_vertno) :]) + + # bin + binned = stc.bin(0.12) + a = np.mean(stc.data[..., : np.searchsorted(stc.times, 0.12)], axis=-1) + assert_array_equal(a, binned.data[..., 0]) + + stc = read_source_estimate(fname_stc) + stc.subject = "sample" + label_lh = read_labels_from_annot( + "sample", "aparc", "lh", subjects_dir=subjects_dir + )[0] + label_rh = read_labels_from_annot( + "sample", "aparc", "rh", subjects_dir=subjects_dir + )[0] + label_both = label_lh + label_rh + for label in (label_lh, label_rh, label_both): + assert isinstance(stc.shape, tuple) and len(stc.shape) == 2 + stc_label = stc.in_label(label) + if label.hemi != "both": + if label.hemi == "lh": + verts = stc_label.vertices[0] + else: # label.hemi == 'rh': + verts = stc_label.vertices[1] + n_vertices_used = len(label.get_vertices_used(verts)) + assert_equal(len(stc_label.data), n_vertices_used) + stc_lh = stc.in_label(label_lh) + pytest.raises(ValueError, stc_lh.in_label, label_rh) + label_lh.subject = "foo" + pytest.raises(RuntimeError, stc.in_label, label_lh) + + stc_new = deepcopy(stc) + o_sfreq = 1.0 / stc.tstep + # note that using no padding for this STC reduces edge ringing... + stc_new.resample(2 * o_sfreq, npad=0, method=method) + assert stc_new.data.shape[1] == 2 * stc.data.shape[1] + assert stc_new.tstep == stc.tstep / 2 + stc_new.resample(o_sfreq, npad=0, method=method) + assert stc_new.data.shape[1] == stc.data.shape[1] + assert stc_new.tstep == stc.tstep + if method == "fft": + # no low-passing so survives round-trip + assert_allclose(stc_new.data, stc.data, atol=1e-5) + else: + # low-passing means we need something more flexible + corr = np.corrcoef(stc_new.data.ravel(), stc.data.ravel())[0, 1] + assert 0.99 < corr < 1 @testing.requires_testing_data diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 806d774f221..6d26d01dc40 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -2245,6 +2245,13 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): docdict["method_psd"] = _method_psd.format("", "") docdict["method_psd_auto"] = _method_psd.format(" | ``'auto'``", "") +docdict["method_resample"] = """ +method : str + Resampling method to use. Can be ``"fft"`` (default) or ``"polyphase"`` + to use FFT-based on polyphase FIR resampling, respectively. These wrap to + :func:`scipy.signal.resample` and :func:`scipy.signal.resample_poly`, respectively. +""" + docdict["mode_eltc"] = """ mode : str Extraction mode, see Notes. @@ -2488,11 +2495,16 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): docdict["npad"] = """ npad : int | str - Amount to pad the start and end of the data. - Can also be ``"auto"`` to use a padding that will result in - a power-of-two size (can be much faster). + Amount to pad the start and end of the data. Can also be ``"auto"`` to use a padding + that will result in a power-of-two size (can be much faster). """ +docdict["npad_resample"] = ( + docdict["npad"] + + """ + Only used when ``method="fft"``. +""" +) docdict["nrows_ncols_ica_components"] = """ nrows, ncols : int | 'auto' The number of rows and columns of topographies to plot. If both ``nrows`` @@ -2698,22 +2710,38 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): # P _pad_base = """ -pad : str - The type of padding to use. Supports all :func:`numpy.pad` ``mode`` - options. Can also be ``"reflect_limited"``, which pads with a - reflected version of each vector mirrored on the first and last values + all :func:`numpy.pad` ``mode`` options. Can also be ``"reflect_limited"``, which + pads with a reflected version of each vector mirrored on the first and last values of the vector, followed by zeros. """ -docdict["pad"] = _pad_base - docdict["pad_fir"] = ( - _pad_base - + """ + """ +pad : str + The type of padding to use. Supports """ + + _pad_base + + """\ Only used for ``method='fir'``. """ ) +docdict["pad_resample"] = ( # used when default is not "auto" + """ +pad : str + The type of padding to use. When ``method="fft"``, supports """ + + _pad_base + + """\ + When ``method="polyphase"``, supports all modes of :func:`scipy.signal.upfirdn`. +""" +) + +docdict["pad_resample_auto"] = ( # used when default is "auto" + docdict["pad_resample"] + + """\ + The default ("auto") means ``'reflect_limited'`` for ``method='fft'`` and + ``'reflect'`` for ``method='polyphase'``. +""" +) docdict["pca_vars_pctf"] = """ pca_vars : array, shape (n_comp,) | list of array The explained variances of the first n_comp SVD components across the @@ -4331,8 +4359,12 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): docdict["window_resample"] = """ window : str | tuple - Frequency-domain window to use in resampling. - See :func:`scipy.signal.resample`. + When ``method="fft"``, this is the *frequency-domain* window to use in resampling, + and should be the same length as the signal; see :func:`scipy.signal.resample` + for details. When ``method="polyphase"``, this is the *time-domain* linear-phase + window to use after upsampling the signal; see :func:`scipy.signal.resample_poly` + for details. The default ``"auto"`` will use ``"boxcar"`` for ``method="fft"`` and + ``("kaiser", 5.0)`` for ``method="polyphase"``. """ # %% diff --git a/tutorials/preprocessing/30_filtering_resampling.py b/tutorials/preprocessing/30_filtering_resampling.py index 530b92741f6..6c118c99180 100644 --- a/tutorials/preprocessing/30_filtering_resampling.py +++ b/tutorials/preprocessing/30_filtering_resampling.py @@ -206,16 +206,59 @@ def add_arrows(axes): # frequency`_ of the desired new sampling rate. This can be clearly seen in the # PSD plot, where a dashed vertical line indicates the filter cutoff; the # original data had an existing lowpass at around 172 Hz (see -# ``raw.info['lowpass']``), and the data resampled from 600 Hz to 200 Hz gets +# ``raw.info['lowpass']``), and the data resampled from ~600 Hz to 200 Hz gets # automatically lowpass filtered at 100 Hz (the `Nyquist frequency`_ for a # target rate of 200 Hz): raw_downsampled = raw.copy().resample(sfreq=200) +# choose n_fft for Welch PSD to make frequency axes similar resolution +n_ffts = [4096, int(round(4096 * 200 / raw.info["sfreq"]))] +fig, axes = plt.subplots(2, 1, sharey=True, layout="constrained", figsize=(10, 6)) +for ax, data, title, n_fft in zip( + axes, [raw, raw_downsampled], ["Original", "Downsampled"], n_ffts +): + fig = data.compute_psd(n_fft=n_fft).plot( + average=True, picks="data", exclude="bads", axes=ax + ) + ax.set(title=title, xlim=(0, 300)) -for data, title in zip([raw, raw_downsampled], ["Original", "Downsampled"]): - fig = data.compute_psd().plot(average=True, picks="data", exclude="bads") - fig.suptitle(title) - plt.setp(fig.axes, xlim=(0, 300)) +# %% +# By default, MNE-Python resamples using ``method="fft"``, which performs FFT-based +# resampling via :func:`scipy.signal.resample`. While efficient and good for most +# biological signals, it has two main potential drawbacks: +# +# 1. It assumes periodicity of the signal. We try to overcome this with appropriate +# signal padding, but some signal leakage may still occur. +# 2. It treats the entire signal as a single block. This means that in general effects +# are not guaranteed to be localized in time, though in practice they often are. +# +# Alternatively, resampling can be performed using ``method="polyphase"`` instead. +# This uses :func:`scipy.signal.resample_poly` under the hood, which in turn utilizes +# a three-step process to resample signals (see :func:`scipy.signal.upfirdn` for +# details). This process guarantees that each resampled output value is only affected by +# input values within a limited range. In other words, output values are guaranteed to +# be a result of a specific set of input values. +# +# In general, using ``method="polyphase"`` can also be faster than ``method="fft"`` in +# cases where the desired sampling rate is an integer factor different from the input +# sampling rate. For example: + +# sphinx_gallery_thumbnail_number = 11 + +n_ffts = [4096, 2048] # factor of 2 smaller n_fft +raw_downsampled_poly = raw.copy().resample( + sfreq=raw.info["sfreq"] / 2.0, + method="polyphase", + verbose=True, +) +fig, axes = plt.subplots(2, 1, sharey=True, layout="constrained", figsize=(10, 6)) +for ax, data, title, n_fft in zip( + axes, [raw, raw_downsampled_poly], ["Original", "Downsampled (polyphase)"], n_ffts +): + data.compute_psd(n_fft=n_fft).plot( + average=True, picks="data", exclude="bads", axes=ax + ) + ax.set(title=title, xlim=(0, 300)) # %% # Because resampling involves filtering, there are some pitfalls to resampling From 854c0eb018beafa2841663bcbbdec3af1b35e73a Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 7 Dec 2023 15:34:20 -0500 Subject: [PATCH 02/37] MAINT: Fix for latest PyVista (#12275) --- mne/filter.py | 5 +++-- mne/io/fiff/tests/test_raw_fiff.py | 4 ++-- mne/tests/test_filter.py | 8 ++++---- mne/viz/backends/_pyvista.py | 9 +++++---- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/mne/filter.py b/mne/filter.py index 3d9b3ecc7da..b9bc92aa9ce 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -1978,7 +1978,8 @@ def resample( ) half_len = len(window) // 2 logger.info( - f"Polyphase resampling locality: ±{half_len} input sample{_pl(half_len)}" + f"Polyphase resampling neighborhood: ±{half_len} " + f"input sample{_pl(half_len)}" ) y = _resample_polyphase(x, up=up, down=down, **kwargs) assert y.shape[-1] == final_len @@ -1997,7 +1998,7 @@ def _prep_polyphase(ratio, x_len, final_len, window): g_ = gcd(up, down) up = up // g_ down = down // g_ - # Figure out our signal locality and design window (adapted from SciPy) + # Figure out our signal neighborhood and design window (adapted from SciPy) if not isinstance(window, (list, np.ndarray)): # Design a linear-phase low-pass FIR filter max_rate = max(up, down) diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index 2c302eac3ad..bb249809f19 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -1325,9 +1325,9 @@ def test_resample(tmp_path, preload, n, npad, method): raw_resamp.resample(sfreq, n_jobs=None, verbose=True, **kwargs) log = log.getvalue() if method == "fft": - assert "locality" not in log + assert "neighborhood" not in log else: - assert "locality" in log + assert "neighborhood" in log assert raw_resamp.info["sfreq"] == sfreq assert raw.get_data().shape == raw_resamp._data.shape assert raw.first_samp == raw_resamp.first_samp diff --git a/mne/tests/test_filter.py b/mne/tests/test_filter.py index 3ab60dba055..36f2da736c3 100644 --- a/mne/tests/test_filter.py +++ b/mne/tests/test_filter.py @@ -383,9 +383,9 @@ def test_resample(method): x_rs = resample(x, 1, 2, npad=10, method=method, verbose=True) log = log.getvalue() if method == "fft": - assert "locality" not in log + assert "neighborhood" not in log else: - assert "locality" in log + assert "neighborhood" in log assert x.shape == (10, 10, 10) assert x_rs.shape == (10, 10, 5) @@ -502,9 +502,9 @@ def test_resample_below_1_sample(method): epochs.resample(1, method=method, verbose=True) log = log.getvalue() if method == "fft": - assert "locality" not in log + assert "neighborhood" not in log else: - assert "locality" in log + assert "neighborhood" in log assert len(epochs.times) == 1 assert epochs.get_data(copy=False).shape[2] == 1 diff --git a/mne/viz/backends/_pyvista.py b/mne/viz/backends/_pyvista.py index c1fb06eb8ff..b5d921f3968 100644 --- a/mne/viz/backends/_pyvista.py +++ b/mne/viz/backends/_pyvista.py @@ -108,7 +108,6 @@ def _init( off_screen=False, notebook=False, splash=False, - multi_samples=None, ): self._plotter = plotter self.display = None @@ -123,7 +122,6 @@ def _init( self.store["shape"] = shape self.store["off_screen"] = off_screen self.store["border"] = False - self.store["multi_samples"] = multi_samples self.store["line_smoothing"] = True self.store["polygon_smoothing"] = True self.store["point_smoothing"] = True @@ -234,12 +232,12 @@ def __init__( notebook=notebook, smooth_shading=smooth_shading, splash=splash, - multi_samples=multi_samples, ) self.font_family = "arial" self.tube_n_sides = 20 self.antialias = _get_3d_option("antialias") self.depth_peeling = _get_3d_option("depth_peeling") + self.multi_samples = multi_samples self.smooth_shading = smooth_shading if isinstance(fig, int): saved_fig = _FIGURES.get(fig) @@ -880,7 +878,10 @@ def _toggle_antialias(self): plotter.disable_anti_aliasing() else: if not bad_system: - plotter.enable_anti_aliasing(aa_type="msaa") + plotter.enable_anti_aliasing( + aa_type="msaa", + multi_samples=self.multi_samples, + ) def remove_mesh(self, mesh_data): actor, _ = mesh_data From 06c90a7982eee3b4747bbf6e6afca71014c3e5bf Mon Sep 17 00:00:00 2001 From: Nikolai Kapralov <4dvlup@gmail.com> Date: Thu, 7 Dec 2023 23:27:16 +0100 Subject: [PATCH 03/37] [MRG] DOC: inform about channel discrepancy in make_lcmv (#12238) --- doc/changes/devel.rst | 1 + doc/changes/names.inc | 2 ++ mne/utils/check.py | 24 ++++++++++++++++++++++-- mne/utils/tests/test_check.py | 19 +++++++++++++------ 4 files changed, 38 insertions(+), 8 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index fadd872e621..ddd70ab22be 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -35,6 +35,7 @@ enhanced experience if it supports it! Enhancements ~~~~~~~~~~~~ - Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python`` (:gh:`12218` by :newcontrib:`Florian Hofer`) +- Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv` (:gh:`12238` by :newcontrib:`Nikolai Kapralov`) - We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) - Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 1085716a697..0d62d247dd3 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -402,6 +402,8 @@ .. _Nikolai Chapochnikov: https://github.com/chapochn +.. _Nikolai Kapralov: https://github.com/ctrltz + .. _Nikolas Chalas: https://github.com/Nichalas .. _Okba Bekhelifi: https://github.com/okbalefthanded diff --git a/mne/utils/check.py b/mne/utils/check.py index 8c2bc5f919d..467bd14e952 100644 --- a/mne/utils/check.py +++ b/mne/utils/check.py @@ -652,7 +652,8 @@ def _check_if_nan(data, msg=" to be plotted"): raise ValueError("Some of the values {} are NaN.".format(msg)) -def _check_info_inv(info, forward, data_cov=None, noise_cov=None): +@verbose +def _check_info_inv(info, forward, data_cov=None, noise_cov=None, verbose=None): """Return good channels common to forward model and covariance matrices.""" from .._fiff.pick import pick_types @@ -696,6 +697,19 @@ def _check_info_inv(info, forward, data_cov=None, noise_cov=None): if noise_cov is not None: ch_names = _compare_ch_names(ch_names, noise_cov.ch_names, noise_cov["bads"]) + # inform about excluding any channels apart from bads and reference + all_bads = info["bads"] + ref_chs + if data_cov is not None: + all_bads += data_cov["bads"] + if noise_cov is not None: + all_bads += noise_cov["bads"] + dropped_nonbads = set(info["ch_names"]) - set(ch_names) - set(all_bads) + if dropped_nonbads: + logger.info( + f"Excluding {len(dropped_nonbads)} channel(s) missing from the " + "provided forward operator and/or covariance matrices" + ) + picks = [info["ch_names"].index(k) for k in ch_names if k in info["ch_names"]] return picks @@ -750,7 +764,13 @@ def _check_one_ch_type(method, info, forward, data_cov=None, noise_cov=None): info_pick = info else: _validate_type(noise_cov, [None, Covariance], "noise_cov") - picks = _check_info_inv(info, forward, data_cov=data_cov, noise_cov=noise_cov) + picks = _check_info_inv( + info, + forward, + data_cov=data_cov, + noise_cov=noise_cov, + verbose=_verbose_safe_false(), + ) info_pick = pick_info(info, picks) ch_types = [_contains_ch_type(info_pick, tt) for tt in ("mag", "grad", "eeg")] if sum(ch_types) > 1: diff --git a/mne/utils/tests/test_check.py b/mne/utils/tests/test_check.py index 4f5f6d5416b..48017b79ae2 100644 --- a/mne/utils/tests/test_check.py +++ b/mne/utils/tests/test_check.py @@ -30,6 +30,7 @@ _safe_input, _suggest, _validate_type, + catch_logging, check_fname, check_random_state, check_version, @@ -141,12 +142,12 @@ def test_check_info_inv(): assert [1, 2] not in picks # covariance matrix data_cov_bads = data_cov.copy() - data_cov_bads["bads"] = data_cov_bads.ch_names[0] + data_cov_bads["bads"] = [data_cov_bads.ch_names[0]] picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads) assert 0 not in picks # noise covariance matrix noise_cov_bads = noise_cov.copy() - noise_cov_bads["bads"] = noise_cov_bads.ch_names[1] + noise_cov_bads["bads"] = [noise_cov_bads.ch_names[1]] picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads) assert 1 not in picks @@ -164,10 +165,16 @@ def test_check_info_inv(): noise_cov = pick_channels_cov( noise_cov, include=[noise_cov.ch_names[ii] for ii in range(7, 12)] ) - picks = _check_info_inv( - epochs.info, forward, noise_cov=noise_cov, data_cov=data_cov - ) - assert list(range(7, 10)) == picks + with catch_logging() as log: + picks = _check_info_inv( + epochs.info, forward, noise_cov=noise_cov, data_cov=data_cov, verbose=True + ) + assert list(range(7, 10)) == picks + + # make sure to inform the user that 7 channels were dropped + # (there are 10 channels in epochs but only 3 were picked) + log = log.getvalue() + assert "Excluding 7 channel(s) missing" in log def test_check_option(): From d00cbb12b9b6070a713ac67fcba19e7443c71ef7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Fri, 8 Dec 2023 13:42:32 +0100 Subject: [PATCH 04/37] Use hatchling as build backend (#12269) --- MANIFEST.in | 86 ------------------------------------------- Makefile | 5 +-- azure-pipelines.yml | 4 -- doc/changes/devel.rst | 1 + pyproject.toml | 71 +++++++++++++---------------------- 5 files changed, 28 insertions(+), 139 deletions(-) delete mode 100644 MANIFEST.in diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 5a06c9c814b..00000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,86 +0,0 @@ -include *.rst -include LICENSE.txt -include SECURITY.md -include mne/__init__.py - -recursive-include examples *.py -recursive-include examples *.txt -recursive-include tutorials *.py -recursive-include tutorials *.txt - -recursive-include mne *.py -recursive-include mne *.pyi -recursive-include mne/data * -recursive-include mne/icons * -recursive-include mne/data/helmets * -recursive-include mne/data/image * -recursive-include mne/data/fsaverage * -include mne/datasets/_fsaverage/root.txt -include mne/datasets/_fsaverage/bem.txt -include mne/datasets/_infant/*.txt -include mne/datasets/_phantom/*.txt -include mne/data/dataset_checksums.txt -include mne/data/eegbci_checksums.txt - -recursive-include mne/html_templates *.html.jinja - -recursive-include mne/channels/data/layouts * -recursive-include mne/channels/data/montages * -recursive-include mne/channels/data/neighbors * - -recursive-include mne/gui/help *.json - -recursive-include mne/html *.js -recursive-include mne/html *.css - -recursive-include mne/report * - -recursive-include mne/io/artemis123/resources * - -recursive-include mne mne/datasets *.csv -include mne/io/edf/gdf_encodes.txt -include mne/datasets/sleep_physionet/SHA1SUMS - -### Exclude - -recursive-exclude examples/MNE-sample-data * -recursive-exclude examples/MNE-testing-data * -recursive-exclude examples/MNE-spm-face * -recursive-exclude examples/MNE-somato-data * -recursive-exclude tools * -exclude tools -exclude Makefile -exclude .coveragerc -exclude *.yml -exclude *.yaml -exclude .git-blame-ignore-revs -exclude ignore_words.txt -exclude .mailmap -exclude codemeta.json -exclude CITATION.cff -recursive-exclude mne *.pyc - -recursive-exclude doc * -recursive-exclude logo * - -exclude CONTRIBUTING.md -exclude CODE_OF_CONDUCT.md -exclude .github -exclude .github/CONTRIBUTING.md -exclude .github/ISSUE_TEMPLATE -exclude .github/ISSUE_TEMPLATE/blank.md -exclude .github/ISSUE_TEMPLATE/bug_report.md -exclude .github/ISSUE_TEMPLATE/feature_request.md -exclude .github/PULL_REQUEST_TEMPLATE.md - -# Test files - -recursive-exclude mne/io/tests/data * -recursive-exclude mne/io/besa/tests/data * -recursive-exclude mne/io/bti/tests/data * -recursive-exclude mne/io/edf/tests/data * -recursive-exclude mne/io/kit/tests/data * -recursive-exclude mne/io/brainvision/tests/data * -recursive-exclude mne/io/egi/tests/data * -recursive-exclude mne/io/nicolet/tests/data * -recursive-exclude mne/preprocessing/tests/data * diff --git a/Makefile b/Makefile index 7d5488258d8..8a79bf966c5 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,7 @@ clean-cache: clean: clean-build clean-pyc clean-so clean-ctags clean-cache wheel: - $(PYTHON) -m build + $(PYTHON) -m build -w sample_data: @python -c "import mne; mne.datasets.sample.data_path(verbose=True);" @@ -54,9 +54,6 @@ pep: pre-commit codespell: # running manually @codespell --builtin clear,rare,informal,names,usage -w -i 3 -q 3 -S $(CODESPELL_SKIPS) --ignore-words=ignore_words.txt --uri-ignore-words-list=bu $(CODESPELL_DIRS) -check-manifest: - check-manifest -q --ignore .circleci/config.yml,doc,logo,mne/io/*/tests/data*,mne/io/tests/data,mne/preprocessing/tests/data,.DS_Store,.git_archival.txt - check-readme: clean wheel twine check dist/* diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 2bfce3b4378..6cac2d5990f 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -64,10 +64,6 @@ stages: make nesting displayName: make nesting condition: always() - - bash: | - make check-manifest - displayName: make check-manifest - condition: always() - bash: | make check-readme displayName: make check-readme diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index ddd70ab22be..da82c6cfc4d 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -38,6 +38,7 @@ Enhancements - Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv` (:gh:`12238` by :newcontrib:`Nikolai Kapralov`) - We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) - Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) +- The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269` by `Richard Höchenberger`_) Bugs ~~~~ diff --git a/pyproject.toml b/pyproject.toml index 7bb17f07570..d401cdca370 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ +[build-system] +requires = ["hatchling", "hatch-vcs"] +build-backend = "hatchling.build" + [project] name = "mne" description = "MNE-Python project for MEG and EEG data analysis." @@ -112,7 +116,6 @@ test = [ "ruff", "numpydoc", "codespell", - "check-manifest", "tomli; python_version<'3.11'", "twine", "wheel", @@ -168,52 +171,30 @@ Documentation = "https://mne.tools/" Forum = "https://mne.discourse.group/" "Source Code" = "https://github.com/mne-tools/mne-python/" -[build-system] -requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.setuptools.packages.find] -where = ["."] -include = ["mne*"] -namespaces = false - -[tool.setuptools_scm] -version_scheme = "release-branch-semver" - -[tool.setuptools] -include-package-data = true - -[tool.setuptools.package-data] -"mne" = [ - "data/eegbci_checksums.txt", - "data/*.sel", - "data/icos.fif.gz", - "data/coil_def*.dat", - "data/helmets/*.fif.gz", - "data/FreeSurferColorLUT.txt", - "data/image/*gif", - "data/image/*lout", - "data/fsaverage/*.fif", - "channels/data/layouts/*.lout", - "channels/data/layouts/*.lay", - "channels/data/montages/*.sfp", - "channels/data/montages/*.txt", - "channels/data/montages/*.elc", - "channels/data/neighbors/*.mat", - "datasets/sleep_physionet/SHA1SUMS", - "datasets/_fsaverage/*.txt", - "datasets/_infant/*.txt", - "datasets/_phantom/*.txt", - "html/*.js", - "html/*.css", - "html_templates/repr/*.jinja", - "html_templates/report/*.jinja", - "icons/*.svg", - "icons/*.png", - "io/artemis123/resources/*.csv", - "io/edf/gdf_encodes.txt", +[tool.hatch.build] +exclude = [ + "/.*", + "/*.yml", + "/*.yaml", + "/*.toml", + "/*.txt", + "/mne/**/tests", + "/logo", + "/doc", + "/tools", + "/tutorials", + "/examples", + "/CITATION.cff", + "/codemeta.json", + "/ignore_words.txt", + "/Makefile", + "/CONTRIBUTING.md", ] +[tool.hatch.version] +source = "vcs" +raw-options = { version_scheme = "release-branch-semver" } + [tool.codespell] ignore-words = "ignore_words.txt" builtin = "clear,rare,informal,names,usage" From 59e50247c8fc4d6d5c968b9d9b6207b7ff5d6b24 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Fri, 8 Dec 2023 19:09:20 +0100 Subject: [PATCH 05/37] Update year and use "official" text (#12278) --- LICENSE.txt | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/LICENSE.txt b/LICENSE.txt index 6d98ee83925..c9197c42f20 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,24 +1,11 @@ -Copyright © 2011-2022, authors of MNE-Python -All rights reserved. +Copyright 2011-2023 MNE-Python authors -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file From fd5008a48a1819034f8cf94dfc0e31f7f1a74ba5 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 8 Dec 2023 15:32:29 -0500 Subject: [PATCH 06/37] BUG: Fix bug with parent dir check (#12282) --- doc/changes/devel.rst | 1 + mne/io/base.py | 7 +++++++ mne/io/fiff/tests/test_raw_fiff.py | 4 ++++ 3 files changed, 12 insertions(+) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index da82c6cfc4d..422754ba4a5 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -45,6 +45,7 @@ Bugs - Allow :func:`mne.viz.plot_compare_evokeds` to plot eyetracking channels, and improve error handling (:gh:`12190` by `Scott Huberty`_) - Fix bug with accessing the last data sample using ``raw[:, -1]`` where an empty array was returned (:gh:`12248` by `Eric Larson`_) - Remove incorrect type hints in :func:`mne.io.read_raw_neuralynx` (:gh:`12236` by `Richard Höchenberger`_) +- Fix bug where parent directory existence was not checked properly in :meth:`mne.io.Raw.save` (:gh:`12282` by `Eric Larson`_) - ``defusedxml`` is now an optional (rather than required) dependency and needed when reading EGI-MFF data, NEDF data, and BrainVision montages (:gh:`12264` by `Eric Larson`_) API changes diff --git a/mne/io/base.py b/mne/io/base.py index 652b747a8ac..6bd92607eb2 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -2563,6 +2563,13 @@ def set_annotations(self, annotations): def _write_raw(raw_fid_writer, fpath, split_naming, overwrite): """Write raw file with splitting.""" dir_path = fpath.parent + _check_fname( + dir_path, + overwrite="read", + must_exist=True, + name="parent directory", + need_dir=True, + ) # We have to create one extra filename here to make the for loop below happy, # but it will raise an error if it actually gets used split_fnames = _make_split_fnames( diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index bb249809f19..329d205e8d3 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -771,6 +771,10 @@ def test_io_raw(tmp_path): sl = slice(inds[0], inds[1]) assert_allclose(data[:, sl], raw[:, sl][0], rtol=1e-6, atol=1e-20) + # missing dir raises informative error + with pytest.raises(FileNotFoundError, match="parent directory does not exist"): + raw.save(tmp_path / "foo" / "test_raw.fif", split_size="1MB") + @pytest.mark.parametrize( "fname_in, fname_out", From 8af33df490f94c3dd628cfc23beafed1a6cc6361 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Fri, 8 Dec 2023 22:57:13 +0100 Subject: [PATCH 07/37] Clean up .gitignore a bit and fix packaging (#12281) Co-authored-by: Eric Larson --- .github/workflows/tests.yml | 1 - .gitignore | 1 - doc/changes/devel.rst | 2 +- examples/visualization/3d_to_2d.py | 9 +- mne/conftest.py | 5 + mne/data/image/custom_layout.lout | 257 --------------------------- mne/data/image/mni_brain.gif | Bin 12051 -> 0 bytes mne/datasets/config.py | 4 +- pyproject.toml | 10 +- tools/github_actions_dependencies.sh | 6 +- tools/github_actions_install.sh | 5 - tools/github_actions_test.sh | 13 +- 12 files changed, 36 insertions(+), 277 deletions(-) delete mode 100644 mne/data/image/custom_layout.lout delete mode 100644 mne/data/image/mni_brain.gif delete mode 100755 tools/github_actions_install.sh diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 1f3f0eb7ea8..3a0517d59e1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -102,7 +102,6 @@ jobs: # Minimal commands on Linux (macOS stalls) - run: ./tools/get_minimal_commands.sh if: ${{ startswith(matrix.os, 'ubuntu') }} - - run: ./tools/github_actions_install.sh - run: ./tools/github_actions_infos.sh # Check Qt - run: ./tools/check_qt_import.sh $MNE_QT_BACKEND diff --git a/.gitignore b/.gitignore index 564599c864a..51707aa39e0 100644 --- a/.gitignore +++ b/.gitignore @@ -41,7 +41,6 @@ MNE-brainstorm-data* physionet-sleep-data* MEGSIM* build -mne/_version.py coverage htmlcov .cache/ diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 422754ba4a5..3fd579ad4be 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -38,7 +38,7 @@ Enhancements - Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv` (:gh:`12238` by :newcontrib:`Nikolai Kapralov`) - We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) - Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) -- The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269` by `Richard Höchenberger`_) +- The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269`, :gh:`12281` by `Richard Höchenberger`_) Bugs ~~~~ diff --git a/examples/visualization/3d_to_2d.py b/examples/visualization/3d_to_2d.py index 6d8e8674fa3..47b223e8396 100644 --- a/examples/visualization/3d_to_2d.py +++ b/examples/visualization/3d_to_2d.py @@ -23,8 +23,6 @@ # Copyright the MNE-Python contributors. # %% -from os.path import dirname -from pathlib import Path import numpy as np from matplotlib import pyplot as plt @@ -43,8 +41,7 @@ ecog_data_fname = subjects_dir / "sample_ecog_ieeg.fif" # We've already clicked and exported -layout_path = Path(dirname(mne.__file__)) / "data" / "image" -layout_name = "custom_layout.lout" +layout_name = subjects_dir / "custom_layout.lout" # %% # Load data @@ -128,10 +125,10 @@ # # Generate a layout from our clicks and normalize by the image # print('Generating and saving layout...') # lt = click.to_layout() -# lt.save(layout_path / layout_name) # save if we want +# lt.save(layout_name) # save if we want # # We've already got the layout, load it -lt = mne.channels.read_layout(layout_path / layout_name, scale=False) +lt = mne.channels.read_layout(layout_name, scale=False) x = lt.pos[:, 0] * float(im.shape[1]) y = (1 - lt.pos[:, 1]) * float(im.shape[0]) # Flip the y-position fig, ax = plt.subplots(layout="constrained") diff --git a/mne/conftest.py b/mne/conftest.py index 40a317b7da9..ba2bfd51dfa 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -984,6 +984,11 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): writer.line(f"{timing.ljust(15)}{name}") +def pytest_report_header(config, startdir): + """Add information to the pytest run header.""" + return f"MNE {mne.__version__} -- {str(Path(mne.__file__).parent)}" + + @pytest.fixture(scope="function", params=("Numba", "NumPy")) def numba_conditional(monkeypatch, request): """Test both code paths on machines that have Numba.""" diff --git a/mne/data/image/custom_layout.lout b/mne/data/image/custom_layout.lout deleted file mode 100644 index ab5b81408cb..00000000000 --- a/mne/data/image/custom_layout.lout +++ /dev/null @@ -1,257 +0,0 @@ - 0.00 0.00 0.01 0.02 -000 0.79 0.46 0.07 0.05 0 -001 0.78 0.48 0.07 0.05 1 -002 0.76 0.51 0.07 0.05 2 -003 0.74 0.53 0.07 0.05 3 -004 0.72 0.55 0.07 0.05 4 -005 0.71 0.57 0.07 0.05 5 -006 0.69 0.59 0.07 0.05 6 -007 0.67 0.62 0.07 0.05 7 -008 0.66 0.64 0.07 0.05 8 -009 0.64 0.66 0.07 0.05 9 -010 0.62 0.68 0.07 0.05 10 -011 0.61 0.69 0.07 0.05 11 -012 0.59 0.71 0.07 0.05 12 -013 0.58 0.73 0.07 0.05 13 -014 0.56 0.75 0.07 0.05 14 -015 0.54 0.77 0.07 0.05 15 -016 0.77 0.44 0.07 0.05 16 -017 0.75 0.46 0.07 0.05 17 -018 0.73 0.49 0.07 0.05 18 -019 0.72 0.51 0.07 0.05 19 -020 0.70 0.54 0.07 0.05 20 -021 0.68 0.56 0.07 0.05 21 -022 0.66 0.58 0.07 0.05 22 -023 0.65 0.60 0.07 0.05 23 -024 0.63 0.62 0.07 0.05 24 -025 0.62 0.64 0.07 0.05 25 -026 0.60 0.66 0.07 0.05 26 -027 0.58 0.68 0.07 0.05 27 -028 0.57 0.70 0.07 0.05 28 -029 0.55 0.71 0.07 0.05 29 -030 0.53 0.73 0.07 0.05 30 -031 0.52 0.75 0.07 0.05 31 -032 0.75 0.42 0.07 0.05 32 -033 0.73 0.45 0.07 0.05 33 -034 0.71 0.47 0.07 0.05 34 -035 0.69 0.50 0.07 0.05 35 -036 0.68 0.52 0.07 0.05 36 -037 0.66 0.54 0.07 0.05 37 -038 0.64 0.57 0.07 0.05 38 -039 0.62 0.58 0.07 0.05 39 -040 0.61 0.61 0.07 0.05 40 -041 0.59 0.62 0.07 0.05 41 -042 0.58 0.64 0.07 0.05 42 -043 0.56 0.66 0.07 0.05 43 -044 0.54 0.68 0.07 0.05 44 -045 0.53 0.70 0.07 0.05 45 -046 0.51 0.72 0.07 0.05 46 -047 0.50 0.74 0.07 0.05 47 -048 0.72 0.41 0.07 0.05 48 -049 0.71 0.43 0.07 0.05 49 -050 0.69 0.46 0.07 0.05 50 -051 0.67 0.48 0.07 0.05 51 -052 0.65 0.50 0.07 0.05 52 -053 0.63 0.52 0.07 0.05 53 -054 0.62 0.55 0.07 0.05 54 -055 0.60 0.57 0.07 0.05 55 -056 0.58 0.59 0.07 0.05 56 -057 0.57 0.61 0.07 0.05 57 -058 0.55 0.63 0.07 0.05 58 -059 0.54 0.65 0.07 0.05 59 -060 0.52 0.67 0.07 0.05 60 -061 0.51 0.69 0.07 0.05 61 -062 0.49 0.71 0.07 0.05 62 -063 0.47 0.73 0.07 0.05 63 -064 0.70 0.39 0.07 0.05 64 -065 0.68 0.41 0.07 0.05 65 -066 0.66 0.44 0.07 0.05 66 -067 0.65 0.46 0.07 0.05 67 -068 0.63 0.49 0.07 0.05 68 -069 0.61 0.51 0.07 0.05 69 -070 0.59 0.53 0.07 0.05 70 -071 0.58 0.55 0.07 0.05 71 -072 0.56 0.57 0.07 0.05 72 -073 0.55 0.59 0.07 0.05 73 -074 0.53 0.61 0.07 0.05 74 -075 0.51 0.64 0.07 0.05 75 -076 0.50 0.66 0.07 0.05 76 -077 0.48 0.68 0.07 0.05 77 -078 0.47 0.69 0.07 0.05 78 -079 0.45 0.72 0.07 0.05 79 -080 0.68 0.38 0.07 0.05 80 -081 0.66 0.40 0.07 0.05 81 -082 0.64 0.42 0.07 0.05 82 -083 0.62 0.44 0.07 0.05 83 -084 0.60 0.47 0.07 0.05 84 -085 0.59 0.49 0.07 0.05 85 -086 0.57 0.51 0.07 0.05 86 -087 0.55 0.54 0.07 0.05 87 -088 0.54 0.56 0.07 0.05 88 -089 0.52 0.58 0.07 0.05 89 -090 0.50 0.60 0.07 0.05 90 -091 0.49 0.62 0.07 0.05 91 -092 0.47 0.64 0.07 0.05 92 -093 0.46 0.66 0.07 0.05 93 -094 0.44 0.68 0.07 0.05 94 -095 0.42 0.70 0.07 0.05 95 -096 0.65 0.36 0.07 0.05 96 -097 0.63 0.38 0.07 0.05 97 -098 0.61 0.41 0.07 0.05 98 -099 0.60 0.43 0.07 0.05 99 -100 0.58 0.45 0.07 0.05 100 -101 0.56 0.47 0.07 0.05 101 -102 0.55 0.50 0.07 0.05 102 -103 0.53 0.52 0.07 0.05 103 -104 0.51 0.54 0.07 0.05 104 -105 0.50 0.56 0.07 0.05 105 -106 0.48 0.58 0.07 0.05 106 -107 0.47 0.61 0.07 0.05 107 -108 0.45 0.63 0.07 0.05 108 -109 0.44 0.65 0.07 0.05 109 -110 0.42 0.67 0.07 0.05 110 -111 0.41 0.69 0.07 0.05 111 -112 0.63 0.34 0.07 0.05 112 -113 0.61 0.36 0.07 0.05 113 -114 0.59 0.39 0.07 0.05 114 -115 0.58 0.41 0.07 0.05 115 -116 0.56 0.43 0.07 0.05 116 -117 0.54 0.46 0.07 0.05 117 -118 0.52 0.48 0.07 0.05 118 -119 0.51 0.51 0.07 0.05 119 -120 0.49 0.52 0.07 0.05 120 -121 0.47 0.55 0.07 0.05 121 -122 0.46 0.57 0.07 0.05 122 -123 0.44 0.59 0.07 0.05 123 -124 0.43 0.61 0.07 0.05 124 -125 0.41 0.63 0.07 0.05 125 -126 0.40 0.65 0.07 0.05 126 -127 0.38 0.67 0.07 0.05 127 -128 0.60 0.32 0.07 0.05 128 -129 0.59 0.35 0.07 0.05 129 -130 0.56 0.37 0.07 0.05 130 -131 0.55 0.39 0.07 0.05 131 -132 0.53 0.42 0.07 0.05 132 -133 0.52 0.44 0.07 0.05 133 -134 0.50 0.46 0.07 0.05 134 -135 0.48 0.49 0.07 0.05 135 -136 0.47 0.51 0.07 0.05 136 -137 0.45 0.53 0.07 0.05 137 -138 0.43 0.56 0.07 0.05 138 -139 0.42 0.57 0.07 0.05 139 -140 0.40 0.60 0.07 0.05 140 -141 0.39 0.61 0.07 0.05 141 -142 0.37 0.63 0.07 0.05 142 -143 0.36 0.66 0.07 0.05 143 -144 0.58 0.31 0.07 0.05 144 -145 0.56 0.33 0.07 0.05 145 -146 0.54 0.35 0.07 0.05 146 -147 0.53 0.38 0.07 0.05 147 -148 0.51 0.40 0.07 0.05 148 -149 0.49 0.42 0.07 0.05 149 -150 0.48 0.45 0.07 0.05 150 -151 0.46 0.47 0.07 0.05 151 -152 0.44 0.49 0.07 0.05 152 -153 0.42 0.51 0.07 0.05 153 -154 0.41 0.53 0.07 0.05 154 -155 0.39 0.56 0.07 0.05 155 -156 0.38 0.58 0.07 0.05 156 -157 0.36 0.60 0.07 0.05 157 -158 0.35 0.62 0.07 0.05 158 -159 0.33 0.64 0.07 0.05 159 -160 0.55 0.29 0.07 0.05 160 -161 0.54 0.32 0.07 0.05 161 -162 0.52 0.34 0.07 0.05 162 -163 0.50 0.36 0.07 0.05 163 -164 0.49 0.38 0.07 0.05 164 -165 0.47 0.41 0.07 0.05 165 -166 0.45 0.43 0.07 0.05 166 -167 0.43 0.45 0.07 0.05 167 -168 0.42 0.48 0.07 0.05 168 -169 0.40 0.50 0.07 0.05 169 -170 0.39 0.52 0.07 0.05 170 -171 0.37 0.54 0.07 0.05 171 -172 0.36 0.56 0.07 0.05 172 -173 0.34 0.58 0.07 0.05 173 -174 0.33 0.60 0.07 0.05 174 -175 0.31 0.62 0.07 0.05 175 -176 0.53 0.27 0.07 0.05 176 -177 0.52 0.30 0.07 0.05 177 -178 0.50 0.32 0.07 0.05 178 -179 0.48 0.34 0.07 0.05 179 -180 0.46 0.37 0.07 0.05 180 -181 0.45 0.39 0.07 0.05 181 -182 0.43 0.41 0.07 0.05 182 -183 0.41 0.43 0.07 0.05 183 -184 0.40 0.46 0.07 0.05 184 -185 0.38 0.48 0.07 0.05 185 -186 0.36 0.50 0.07 0.05 186 -187 0.35 0.53 0.07 0.05 187 -188 0.33 0.55 0.07 0.05 188 -189 0.32 0.57 0.07 0.05 189 -190 0.30 0.59 0.07 0.05 190 -191 0.29 0.61 0.07 0.05 191 -192 0.51 0.26 0.07 0.05 192 -193 0.49 0.28 0.07 0.05 193 -194 0.47 0.31 0.07 0.05 194 -195 0.46 0.33 0.07 0.05 195 -196 0.44 0.35 0.07 0.05 196 -197 0.42 0.37 0.07 0.05 197 -198 0.41 0.40 0.07 0.05 198 -199 0.39 0.42 0.07 0.05 199 -200 0.37 0.44 0.07 0.05 200 -201 0.36 0.46 0.07 0.05 201 -202 0.34 0.49 0.07 0.05 202 -203 0.32 0.51 0.07 0.05 203 -204 0.31 0.53 0.07 0.05 204 -205 0.29 0.55 0.07 0.05 205 -206 0.28 0.57 0.07 0.05 206 -207 0.27 0.59 0.07 0.05 207 -208 0.48 0.24 0.07 0.05 208 -209 0.47 0.26 0.07 0.05 209 -210 0.45 0.28 0.07 0.05 210 -211 0.43 0.31 0.07 0.05 211 -212 0.41 0.33 0.07 0.05 212 -213 0.40 0.35 0.07 0.05 213 -214 0.38 0.38 0.07 0.05 214 -215 0.37 0.40 0.07 0.05 215 -216 0.35 0.42 0.07 0.05 216 -217 0.33 0.45 0.07 0.05 217 -218 0.32 0.47 0.07 0.05 218 -219 0.30 0.49 0.07 0.05 219 -220 0.28 0.51 0.07 0.05 220 -221 0.27 0.53 0.07 0.05 221 -222 0.25 0.55 0.07 0.05 222 -223 0.24 0.58 0.07 0.05 223 -224 0.46 0.23 0.07 0.05 224 -225 0.45 0.25 0.07 0.05 225 -226 0.43 0.27 0.07 0.05 226 -227 0.41 0.29 0.07 0.05 227 -228 0.39 0.31 0.07 0.05 228 -229 0.38 0.34 0.07 0.05 229 -230 0.36 0.36 0.07 0.05 230 -231 0.34 0.38 0.07 0.05 231 -232 0.33 0.41 0.07 0.05 232 -233 0.31 0.43 0.07 0.05 233 -234 0.29 0.45 0.07 0.05 234 -235 0.28 0.47 0.07 0.05 235 -236 0.26 0.50 0.07 0.05 236 -237 0.25 0.52 0.07 0.05 237 -238 0.24 0.54 0.07 0.05 238 -239 0.22 0.56 0.07 0.05 239 -240 0.44 0.21 0.07 0.05 240 -241 0.42 0.23 0.07 0.05 241 -242 0.41 0.25 0.07 0.05 242 -243 0.39 0.27 0.07 0.05 243 -244 0.37 0.30 0.07 0.05 244 -245 0.35 0.32 0.07 0.05 245 -246 0.33 0.34 0.07 0.05 246 -247 0.32 0.37 0.07 0.05 247 -248 0.30 0.39 0.07 0.05 248 -249 0.28 0.41 0.07 0.05 249 -250 0.27 0.43 0.07 0.05 250 -251 0.25 0.46 0.07 0.05 251 -252 0.24 0.48 0.07 0.05 252 -253 0.23 0.50 0.07 0.05 253 -254 0.21 0.52 0.07 0.05 254 -255 0.20 0.54 0.07 0.05 255 diff --git a/mne/data/image/mni_brain.gif b/mne/data/image/mni_brain.gif deleted file mode 100644 index 3d6cc08edbde8d9b2b83cfa9bc687e640efaed51..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12051 zcmaKSWmH_x_GKU;IKe%*yE}y7?rsUvNCOQt4K$VncWB(*-5nAL?(UXAaCfICzc>H) zWgenUfS6N39UVZfB2aM}b1NqcfXLteKhvBvRR1!8*o)JA{AYpcAD_}rU;q_A2QRxN z7dJPRfB*+KAHRSAHyafX7dH-vr zV+(;ei*Ry!czAGl@NzhTZ8*7wg@ym&;NfBaYr*d734)kI*+H(f|7MT@xLShkoFR5j zAgX^D%`KeVAmTKCBmLhgI6D7_7UatDuXH#pog6u#=FXhl99;jT^lw8|)&IY#qvL<9 zT_IY4{}b>3lh{?q(;2|21#oq816u) z+_HSaJi@XvyfT73LfkyUT>RYs#>zNZx;X+skbh&X{tx!!f5rYI21n<=o@D@FJ9mJU z9N5W`>R(BV*!}ld1ph1F|6r~Ddn`i#70dZI7|wsT_W#=Ie^33@&p+M&2>0*cKhg() z{^}k4SJ(*fm*=O)hx@zRo9nB~i}SP7ljEbqgZ;hTo$Wtco4la@iZ!b@%hr1iZ73|{d1afq+ z2in=%SOcsqEzCcEGBY(XHZnBO*VEO}*3#5aS5s9{R#H@umy?x|mij39K|)+iR76-v zP=KG0mxr5+lY^a&m4%s!k%69$mWG;&lHxr%87T=d5g`FS9xe{{J1k5LbTm{Hhvy84b%3_3Isv?}9fKl6Jz14u+df&<8G5 z7H*<;AI5KA-eI7H$QZtI#-NKIWVqZn5g{0`E(k9HMyysXG$*hmYBSxlaZommx-CVj zeBHeg0Y*-Q+-|i{I03#9-o3q>^{_5(8BFQE4M`#1K}Pkky-!J999&u|H53jbEMqXb zv(Ysa*k7%0O@>Fk z6qr{WOvKT}Rn5LMX1>L;VJ8{1_wC3c;S61~j?Vr>xe?u-jtL`4637o+@|umn6q0%% zZMqmxS8e{X(MD}7iH+Y0f#V6<{oboh?um7m@zf68Tat4yTqNn`V8O!;rOJah_tUa8oHU}g~{OXyDM;mkO4`l z)NToPs%yap1PNqi)v{3v^!)&2A~k_zU?+KPv(4R$|TE7qqmvhXcxVJ z{OVE8YBf&o+n&u`frrS)e^z+ki>G|yDf`J+q%~K?4TNNqA=la9V!0; z({G`k`$|FvmHkNo0yFm|9m|_mg5aeZo4IVH*1>fp$QLjM&z&JLzI5VTvV|>K=^Z(s zjlRuN((-!?6Rq4bAuLUBv*a^0?ZS>GFfFx#88`)sx;2mXb0WE}6>mM&w()PSG5xsMx0;th}buYdmmD!Cu1HbFV8Vf=&F0e*I8A-KPS)lP0g7 z^$28Lsp~*=&tH)xLa8aFZch2#1rbC)?8I*>5a2gXcLT1DV2Tn>$|B!3rE^(C;?9P# z;%82a!x`29zi5PHcYeeHRS4^ll2f=h`{tETWZTu2c6e}#D_fCUmy`;{^e;R<9u`-}kPF4wOR6D7byfvyM@;WKX%>2AfHtnndTsI~6mOm+$e z=rqF>6>pd>6HmTb0T;0B@QZ%`KJ$j=>Z4QE(6MzYLkSP&oU-m1{4`k2nk;msust$~ z;C*N|(&c6=a`+Nx;1iebpT;^JCE_Xg=K;EppFh`Ov$d@a`4~y*szPLPjQgSkc3n(i zXgx&L+}wd|DjN%2D1A~zFtID`3`*5$y4=E%FJ7~&iWtF-w<#>&M*Tm{tTxt2Vqt;K zmIxUMkmM4d$22V(7IQuiGH-~1cxrk-;k@(wArIc6a}y({onfd*=ACjHH3`g#aKU^-RCf@t6N zwtToWO7=jsfLDpXPVGi5gd$QI!?mY}^r24cL;Xx@ohNDN=uB|5H1Y`6OgGap2VA~` zZI^2Yg|WYz1^6d27mL(8<(Sk+MfFoiohrHo6j@6oBR7{plTc|mE!(Krl2{e z#2)IiJ4H#px9)(xRyE??Ji3(Vru-2xVhq|tO_RE$&-ZZv9Yr_8agJ%hYIrBRsTiR` zwfj}kTRI>CE4qZ^ZLfx8BlH2S88 zx&7S9a@}{t%jmYnW})9vwMl{)M)s%`8&@~Di2xByVoLh;9K9*Sh9eBGA(7_&8~2C> zHXE}b+eB1R>x8*gBfnO@^*31W5_hjxMPFlB^sDjNchOS=zmhgS<}H$K=5d?95}bFI z>ap3S2YLTk`bCY!2>G5#b8D2g+7q{EKhnA9p1&Y}CGMO~(PlL8IYXHu2Hzwc;t)21 zY=XJ2WPfh2qB9jR~_$X;(m;;?Mn~ue8<)v z9=+Pk!Q*bU5m&ybh;mam>T`{{Ak|N0Oj!RqIk(-uc0B^EWCQ83>zFa>1^W66Xb|bu zeQd`ohRzllnO2m)k9+f>uA!ldU3b|k)utZ+32*43vnlOZ={{QHhNO0ob}SaSOjR=R znlF{$KUQWUjO(Pt5EA<)YGoFgox%8twO(PKMBU}En*6?*H1r^7*LiA|4#~Xzr0jK&P{f)22iw}TF;S-w#MQba4M;*>)~s{ARE?IwfHMm8GTi& zVUKx-s;aRG$Pk-ab(v46xsc*8SKa$ZnpTf*V2i)y#mvVhlV&u`YH6dvPsD>Y%s*VQT&$)tnoK#q^`BLke3=UbM7 z&C63Ks;R=$**6b2ElCIMrfv6AO?r=IK`Qk-N~i*AmUv(VSEn&r0!2A<_gcxJg3lGX+O+kCkvtynN?dP< zfX(sdVRZ81NG=lfoa9U&CU(HCH_&r_uQVB_*d3&h0Dr3iZlVC63%7vIdXG5XPfJN2 z`mv%49@dQ0^x5<5z3ASTUbx_&D%!751}F7 zJQSaHT;ibiOSjaFOpJUsjC1ww4LK&WZs79l8&^oYp86kzPjn6zrmm^aXCE>w0Nx zKM890mjsnH(Xjv&#%*C}%&=L5AdW&2jvXd9MjyQVI19xDsTM1bTxiPm`;xalvKN-G z`@a3q71BcvZ{T%hxxl%<2p0NfHSe12E*p!Rr*En#KmMC|EhW}=DC`}B{4AZnxFNN( zaNHGrl$ZuXpFhYsFnQyRLv^kQ=|GyFVvzhIPrko?^}e>hFGU_%5pY~OT6wMl}%@!D8J%+p!sNm#lz)6iRez12!PX<81U*9uL^s|anvLZ@KeBQNj z6F}xci1FDfE2{1rfhG3oy*0Ue`Vu@<>%dM`oQ@P(AWfGTV%irk^2?U07sOnMCa^$R z#i&o9uS=ub7i{7SrAh?TaQc;^?h&_Aaa59Xe%dC}HtO!v zo8zG67Z}>09$5b{)SjBj;|q_+O3d&2<3mlFL#vdtq8LVymdB>(&TN!A=Sg4PtC+`nX2oq7jR~|124~?aL1*I&l`galY~2?Wfk9q{jq|w#zyMER zlouV5YpcK!Orc~kKkI-l3=Y(bA_eD_eE}*K@hN?)M6qjpTkIlO>fct_5}bM;lvcSF z?%`E{L*g1upYIYc*E|!`Ql^H(Vq(KkemA6eMwG93n=E#Zb!77fvEPFUn7ewX-`=Lw zW`iSWSMuvx2f?PIK3R34EZ?GpNV82m6fw(liU(qsXGY*#vdpgqsT?j*`b}CGzN~@f zR>cxr(PEjauK>7YVGuP;DPW|bA9iPN?X%@SQgajiM~$H zjwcntE*Cu95_o4+_k*E;z00^G&0x*2Qnje=eBVFb)kq+quE#CGE;i$vkuP<63MpD4 zi@p2NqCVL|!gC=a7MwH{%QGv0(Ndo~9b{7;9758j=$o?Wx$-vY4qTHf0CecF^VkWt zSFE`p!O0`e^4}`+U$9yaadt59q2p784>Yjb#x|EI*M`PqCKhY!nc(63-0WoIA$yt# zAbK0bz6wM5iL#g%rGin;S2)lhh=rOmhZ z0!yQaE%z*wRtBhtku!Bm`=;(B;ia-ccL+qyA%3J{C5IU=J579n!RBE-n@k-|Hccl@ zIokGSSafC#u=p+c+JMD1JFCsZoXD9LC*77oOjOi-G`YD+HOa^vu}dF40=W zfY9nNcmNEgq?G8h7QU{X%TbiYaE?w#R9bnIosEGxAfA6w7Mhoz5Ei33Wv-jHC!b22OE_p>uKBUu*R@j3M0IA1K zkWp`5jV%D&J{?8LqU0?pB6|Msw3<`mA00lnZZl0q`uehb5px^$Q-0NsZREXdUHsSB`_wX% zgnqlmX)Dp7bTR%mp2SD_`F+r;MU7BMx;Z8l@{f(LHg)E;cy z%3{6urwROQkJ{QkM6nz_6QXE4djn31k3vCYU_=LgtNp=P=^$*;gO)(Qd$A47I;gj{__3Yr-10+*#us^LlOB86>Z6*o)$j?K#-f@uvE9Wb79 z>j$UL@6%;5*2VTv9vq4qTlF_X*N42+J&0|*gBQ;#z4o@dtgl=E>3urli56rKPJH7k znfbM6lGkcJOyU`Bs|BYS3#T@K#)x>FH+4$xB96#9tx=OA&~Y}^EeD}Li@Z6Fwmo*fbmcB4#nI~@@pOe%-E8j-dp zM0)EDj0Jw#=U2WBni7sO9Vug7o6efnb3o!l((T>HT(&xlC-0g+lpVi@i$yZflnFLz z7qWLP+u5uZbrPYuNlO?zzwv?X?Ca)zXL_^rc724)aS@+&_B+8|RdMzU=9fde0(nF4 zZ0XuKUw~PW=vCN;7J7r2M3BF%i{@R(wed$j_At`4n88$^<@F%O@w}l9&P$O_-3l!O z?<2pUP0r|rTqfDWrtPK!ltqbOW~#q_tv9{YSF30EFQYA_DlA;YQTO)NVpRE_c^24{ zw(;*rhcBLK3rk>f)W1aM)Q8O7Rh6rP^n+|nQGC7^EL2Q8{)(^3y>E@^G%wSS>Rb!n z{<$0R^Efe=Vt|Q1{Raij3r1fb@Q)y74oQrwA=Y_~_}U@X7}La*4ZLCxTII$>a+of$ z6+&}~m`xvgQ)5-NOoUuYI=1u3TzFYoBb!_j=^kYz5#Qk`TIP0m($d>8u*r?7s_JRW3{|kpA^T>cbTw)l}X=&v5hCf)t9>0Z>=8>W;6rOcG_wc7Qd&y z^Q?GjJvA{w+sH3ibfjg=Ski`ZW-qI8LQhQ28$xF{*&g3gMj`;FMNL=Bw~jl6f`2&G zoJE{h8jUDa0}U8#Q~?jdr;r?qd%D!rg6JOFpEK3)$fHl6!SI(~p0>}RAO{Z6)dd?@ zc3S-#Y-A1InAlDfKgdLbuIH_(q0BMU7@jykj7C$Cm7OhdOJwU}m~Q`!0ayuTPNSbS zz~t;(LLvRgBG|TOm4;u()VUZxjP67Q2x~~m5Gtd*eMgjB|GVevDScow*|k~l9gy`v z9f@gJ?Qpc`El$N@^aVXTbVbNh8W-1yG6-4xVa>#k`&5se*{nwZSc)W`j~}nY4aoCvVJ|2L#wMl3>oufw zC139;5)krRtroM929vVkRT#F&xCnePPOi{uv{_{_bB_SUv@6MLDHj}uD~x3wFFlB! zu>iD#S4h|Pu34i8Bi6PdK5-_1tbIv!aTJGA<)zH|HEkmNvX&H+ye09t2PokjTXMeU zeHgAox-H<7x{!d$>5_d|^pB2}h`J>LbhtIVJ(&CxgTyw@n{C^e`nUy!`A<<|h1E~> zMcF6#Y1{3PQJ#sW-Zu!*+?fTu8VN5v@!eqy99yhS-W4+yd`DHR`x5Br{)%+>pS^p; zue7Mgb5x+fzP>bYK4)R`Vmlfq>B!2#PjtR}>Xhd!yPqFubJEJo!YsfbV#0D%jp42W zA8e^$?2K437?lMqDfgkIhVj>f5fWiLSO}QMBP?@_FO+yxP1%{z>(s_nI&zw~e`8YH z$$_&BE*9}>kJ8}T@)}i+SP$csSV+^*!u@8(!!uCfkAES2c;e&wy~ixTCEh~nc~`07 zVe%qb*9`+UWiWS9&f8&xn`wLDTv{&*|%tWV~%vP?aS=5=4Kf>rbs5 zN=RCgfcP}uq<9Ya8G~ZoN4J=HZ#5nAbr=jl)m)RKf;Mi+)Q=mn8M_+jM#Yj@B3TYeGQFg6s8xHj%+wfJ75SuBN8Hoo$Y zqD5wmnq2l&BfeM-?Uy=BpD-EEjkO+#Z?CiXN_$_rvre_9aZ+x+GzGf*l@mL* zO(%$#p17~N=nmGsDY`MKVFBd*`T=vNNW$sy5dS&0b7sT+EAzzfAdO_^)m9R-F0&l< zNS21f0Nb&Nv-fizW%o4PwVb33+x}DpU7kUni6fTXlWKMHqGhpEfqF_1Yb4d;NI>Wa zWAJOJr^S83;?Hc@IO8-p7JDGLZmp(5v!6P5L!cRHD(sZg3VmMgXt3E`Jk*o22Zu6w ziF$@hQ$&I zIb9D8Uo5|-^6L>jXIxv^FWlU6!VmZzO1)w?-1Hy(NUbS_8`?^L53qc_NW%djX2y2{ zqt{2{~xWZv9vl7DHm26<%oE=Mm zG+A-R=Hw|XDZX%DWoh)T6r9qoF!h^-m@~-wD1ft&ey%KtK|&7=@6gf?qme|Kcn6C& zXcFM5H78;}U#EoWTHKvOt<+bj6sw|ahU3y!k@*Ez@VA(`*cK}N>$juw?rh<$To8V6 z9KQk=Px0xzpC}3tTh^JLhR;um12VtqB`m9=Gt(l#IE?}mM;PIg3({ed;*L%{nhGN9 z@BCT%t(ZlnG?!3$5QBlxF99=sIAf^vZoj9HGom4)T3E}6GWKphL=|I9 zk0z}{Q@NjB^E`EG>Dw7uTfx$r+NusE&ve+Nx}vHJ)3Q~2B1YSxU9>y^)vB7d{HH&8 zmCtbtpKz~a{eoIM`YPprF!r%YI2tu@kArP*JNLH}j2ZQyJdu!2GI>weU#QB*b@fQJ zn2HGIsAaa@$CwoDCXpI5SW~Q1G7S8f#zT~ZynP8?v>5VClZyVj(F<+Xt}Dhr7qKF` zImewpOFVr)r!fdVGO6m5k>$gjpp9=3H?a$0`%Lr5=Z{H|0;SQO=Q?ELJq|RDin3Rt zB5KKia|^6`X5*GcfbL^H%`=&*>QIqL%k@4UZjiee{BY|Y`|{Cj8KYc|jR_*4bmL#%pO5$cnYyY;#D9d<_^IN) z*c*r*@v0V ztTWdWr(K4RpEjf@sT6xhePOg5K&4c)G3X?TaoZ7Tuh@oxtlj92I!te&#l_ z`g~a9hGT89(uAQa8ev)H90MVqgjJt)P=MTq>ibVp_4Cyl=RMuD_-?mk!PDQ23j!db zG34xm%}Z5YgP%rEIw&00imU88R%~wDKIC1wjYB8Yi5k>7g+x_2T3J}+X(As-P}@6%%A@VF*Xk^z$3<0*Mar*FPhY5t ze{oGB7Gm89)ofpZViGWtIf5Xc5A`9=+X|d{h;~~vPUqI3i#DYstMLHm7)1S%x7@>v zxHaE2oZF#HDDO@c3M?<%`tK8JX6iP}1XKWJ<3NtcG@$z5x+H2qHQH`O!Gn{1@8Cm6 zDVx$n5Ealp4jWV18I_hDbL1?hfoqNJ6oD4|@=3)^$q@_h9F;(0uPgMR4n7nY340w- zTnmFWY1I_6pf^BI6LJo5_+zlPxn@{k1hm{>3bsj#vIyd02D-yP^H@X!XZL22{8wB0 zTI|nsIB1IJmlPV_52FO)432pqTzh1*6cMvt*8sh1{cYvsTaLk@=LB$=hNEI;oLIrWn7!<3*@!W*$DG%ZV0QJdS#a<)&FSa<6(kC$LW!qS!nOE#ANm_U(`o_#*TDooa~$5MbE2WmtVw5g$bt9 z#AO0#2&y_}7+|E7Su=@FyvM0m?4SCbc20dC%|W7`k0q^T0>|w+3VPw-68yvsuW}Xa zWUX5s6f}20l0RlLb|l|+-Cw@M{*5(jfvVnCF9M9Gs+>V9kC%XN(UfZP(CGM_4If8O zUzSZFF zday|r$BXLXKJj75>f&$V%DkF8s1?yVv>il3jYi5m6Mx3Oo1c;DK9b%o-wM@H9^rX& zy(>%fXT~fB8+qz6ub#Si6zexCzW#JXFQ1dtBtfTxGB>u0w+5dwgUVwp1MjQ~$<<@v z#Sut>(oV@j0HZrbmK8_!iU5&k7KZuX}Tf|+-pkv1tmZ{^-!HFbMeJn=J?wj)C zw0F|oWkw5)19_Wjl>8jK+>|qT0UXy@-)89gG>9VN#3Hb=d~~uFbw9&*2fJq5NekWy z$AFv*x%(z{iFoLePoy{#NtLCgf38#nE!kVs2%i^r*!jku@+Cit0RpUVGbIV^B z6!vp8csiDrdT%i2qNv=UEPRp6e3M3Yoa`#=K==h$>6ztHHFr9JLa>fCiCY{EAIEP0 zYt&2mYj9?Z`auro+`1Z_JOi({J0iD$-O^mz5zglq^u~-bRx$(2%14;w;=D6h`1Qz$Wf;s&U*KATDw&O)Vo5>0=INy zO&x45%`rxb9-m2=-AW3RIN~NcCR?m7(c0_Yeu4o%Lbx@{Rj9!-#<3pCg85k(#t99; zA{^mdERwC09fk?abT)JWBeY+1g9BcZ%xr@Jb#fiB_vwKlYmRl3Lep@w%V73e`zm!6&u46-eLmH6^= zuB-@^{DPBQyw?|12EMVUx^1!QIIbEo=x>qh9Az79(y?u!XL7mMJ_zwqfF>U75#`of zPDU3N5iUP*CShOQ7s6T22zKH%IG1NJ7wkQeO{R6h8$?UKcN`=OLcA?yy~#Ie33Nk? z3Vg|n{&#V@4V!hB-Vg3;cg!aF&9y)h+|xSmL)?Lki`|hP(&;DArjRum4z zV>qihBN5cFun_JrGQx0W!xT@Vg>U>|ea>XLTOQBhkMVc0ektFdG^IPI4Xf|J;+NL1b-|JU%BoX?|_Y$z2tAzkTosd4Lf_rj?OT~K}e5$@8 z;((>><0LL8k0^$Pvx_kkJU%^=v=0P2%%9@2Uf)cl&BZYq!D=;{Uv5e|08#rMaGm>t z4PG5h5&~9%L{+0a&9uA)IAn$Hig4=i!%%A6AuE+|Gx3ZxX2y6y_nGd>H zQqTO@`jU6+mZ@7CUGEm84XPwAW4#L+Lrs{k4tIhiTZ(*QE&FOoARTi<;bvQLXjJG? z6#<;}i4cJthbzgE_(pjVVP1nVm#f|xpX2Y9k!najdc3HFe4V?2h5$5>KpB+}HO9iK zhH2l7^=K;z;VA}ZN+IR7gr1DV1@xlz$0F5?-~dcu_`O$UVp=qL4X+En`TCWB?#f4f z@66a@^wjAm0i2y0_0l0_uP{ts4=u#wclEw2*?_73q@zH1W1I~hq%EzvoW%BeE{QcJ zF_$bt`;v5wH3$_>K>l91omsS%XyLu(15b}}$BF^+y|s3$V@*7%rz;t;&}`>!9FH>Z z+g=g#Bf=0Ox^fGs>zfmmcP1~4DMfgtmwd+ z44grtrqu{F4nBj~t2zW|&-mKW??^YlH~*0!A8zjGBN2Wo6S2rPaNPCqZCxETHB3Mv z_AyWHArc8Dco^MUeLJ#jZhmgX`BWrL5}mX-w7oh(ABWY*Y} zy7K9#4Uw$tt?h_)TIc+n>w8J#+V=8PKMyD2L@hP9%th_l;KX#|c0ncDiqi14rAq#8 z^ywc6Gv)mFlDbbzvhQ89y)*1Jve)oe>P=Upyo>!>JN-20km@G}GS@m6)U`z?7qq)q zGS{i?hJLh;_3`8Hb}k;QZ3vnIqn;hQo>$>(zq<9h?w@}VeTV$c_)RHMNNu*sn6=5e Nz8TW-_fD(m{{oK&3vK`a diff --git a/mne/datasets/config.py b/mne/datasets/config.py index 6778a1e7cc9..b548f5273f2 100644 --- a/mne/datasets/config.py +++ b/mne/datasets/config.py @@ -88,7 +88,7 @@ # respective repos, and make a new release of the dataset on GitHub. Then # update the checksum in the MNE_DATASETS dict below, and change version # here: ↓↓↓↓↓ ↓↓↓ -RELEASES = dict(testing="0.150", misc="0.26") +RELEASES = dict(testing="0.150", misc="0.27") TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' @@ -126,7 +126,7 @@ ) MNE_DATASETS["misc"] = dict( archive_name=f"{MISC_VERSIONED}.tar.gz", # 'mne-misc-data', - hash="md5:868b484fadd73b1d1a3535b7194a0d03", + hash="md5:e343d3a00cb49f8a2f719d14f4758afe", url=( "https://codeload.github.com/mne-tools/mne-misc-data/tar.gz/" f'{RELEASES["misc"]}' diff --git a/pyproject.toml b/pyproject.toml index d401cdca370..fb8757150ac 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -172,6 +172,12 @@ Forum = "https://mne.discourse.group/" "Source Code" = "https://github.com/mne-tools/mne-python/" [tool.hatch.build] +artifacts = [ + "/mne/data/**/*.dat", + "/mne/data/**/*.fif", + "/mne/data/**/*.fif.gz", + "/mne/icons/**/*.png", +] # excluded via .gitignore, but we do want to ship those files exclude = [ "/.*", "/*.yml", @@ -184,12 +190,12 @@ exclude = [ "/tools", "/tutorials", "/examples", - "/CITATION.cff", "/codemeta.json", "/ignore_words.txt", "/Makefile", + "/CITATION.cff", "/CONTRIBUTING.md", -] +] # tracked by git, but we don't want to ship those files [tool.hatch.version] source = "vcs" diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index 69cf6413fb2..9489a95f397 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -3,11 +3,15 @@ set -o pipefail STD_ARGS="--progress-bar off --upgrade" +INSTALL_ARGS="-e" INSTALL_KIND="test_extra,hdf5" if [ ! -z "$CONDA_ENV" ]; then echo "Uninstalling MNE for CONDA_ENV=${CONDA_ENV}" conda remove -c conda-forge --force -yq mne python -m pip uninstall -y mne + if [[ "${RUNNER_OS}" != "Windows" ]]; then + INSTALL_ARGS="" + fi elif [ ! -z "$CONDA_DEPENDENCIES" ]; then echo "Using Mamba to install CONDA_DEPENDENCIES=${CONDA_DEPENDENCIES}" mamba install -y $CONDA_DEPENDENCIES @@ -59,4 +63,4 @@ fi echo "" echo "Installing test dependencies using pip" -python -m pip install $STD_ARGS -e .[$INSTALL_KIND] +python -m pip install $STD_ARGS $INSTALL_ARGS .[$INSTALL_KIND] diff --git a/tools/github_actions_install.sh b/tools/github_actions_install.sh deleted file mode 100755 index f52c193d773..00000000000 --- a/tools/github_actions_install.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -eo pipefail - -pip install -ve . diff --git a/tools/github_actions_test.sh b/tools/github_actions_test.sh index f218197bda6..78cc063d016 100755 --- a/tools/github_actions_test.sh +++ b/tools/github_actions_test.sh @@ -12,6 +12,17 @@ if [ "${MNE_CI_KIND}" == "notebook" ]; then else USE_DIRS="mne/" fi +JUNIT_PATH="junit-results.xml" +if [[ ! -z "$CONDA_ENV" ]] && [[ "${RUNNER_OS}" != "Windows" ]]; then + JUNIT_PATH="$(pwd)/${JUNIT_PATH}" + # Use the installed version after adding all (excluded) test files + cd .. + INSTALL_PATH=$(python -c "import mne, pathlib; print(str(pathlib.Path(mne.__file__).parents[1]))") + echo "Copying tests from $(pwd)/mne-python/mne/ to ${INSTALL_PATH}/mne/" + rsync -a --partial --progress --prune-empty-dirs --exclude="*.pyc" --include="**/" --include="**/tests/*" --include="**/tests/data/**" --exclude="**" ./mne-python/mne/ ${INSTALL_PATH}/mne/ + cd $INSTALL_PATH + echo "Executing from $(pwd)" +fi set -x -pytest -m "${CONDITION}" --tb=short --cov=mne --cov-report xml -vv ${USE_DIRS} +pytest -m "${CONDITION}" --tb=short --cov=mne --cov-report xml --color=yes --junit-xml=$JUNIT_PATH -vv ${USE_DIRS} set +x From 8e500a3f4c8e37136c72e13e060f819b711198f1 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sun, 10 Dec 2023 12:46:55 +0100 Subject: [PATCH 08/37] Remove license text in README (#12284) --- README.rst | 35 +---------------------------------- 1 file changed, 1 insertion(+), 34 deletions(-) diff --git a/README.rst b/README.rst index ca4e08becba..433c6a1d82f 100644 --- a/README.rst +++ b/README.rst @@ -134,40 +134,7 @@ About License ^^^^^^^ -MNE-Python is **BSD-licensed** (BSD-3-Clause): - - This software is OSI Certified Open Source Software. - OSI Certified is a certification mark of the Open Source Initiative. - - Copyright (c) 2011-2022, authors of MNE-Python. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of MNE-Python authors nor the names of any - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - **This software is provided by the copyright holders and contributors - "as is" and any express or implied warranties, including, but not - limited to, the implied warranties of merchantability and fitness for - a particular purpose are disclaimed. In no event shall the copyright - owner or contributors be liable for any direct, indirect, incidental, - special, exemplary, or consequential damages (including, but not - limited to, procurement of substitute goods or services; loss of use, - data, or profits; or business interruption) however caused and on any - theory of liability, whether in contract, strict liability, or tort - (including negligence or otherwise) arising in any way out of the use - of this software, even if advised of the possibility of such - damage.** +MNE-Python is licensed under the BSD-3-Clause license. .. _Documentation: https://mne.tools/dev/ From bf03a03c91bc64b8a9cf72d75a399cfcb89c0662 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Sun, 10 Dec 2023 22:40:51 +0100 Subject: [PATCH 09/37] Simplify spectrum.plot() parameters (#12270) Co-authored-by: Daniel McCloy --- examples/datasets/brainstorm_data.py | 4 +- examples/preprocessing/eeg_csd.py | 4 +- examples/preprocessing/find_ref_artifacts.py | 6 +- mne/io/array/tests/test_array.py | 6 +- mne/report/report.py | 6 +- mne/time_frequency/spectrum.py | 63 +++++++++++-------- mne/time_frequency/tests/test_spectrum.py | 6 +- mne/viz/tests/test_epochs.py | 8 +-- mne/viz/tests/test_raw.py | 31 ++++----- mne/viz/utils.py | 33 +++------- tutorials/clinical/60_sleep.py | 1 + tutorials/epochs/20_visualize_epochs.py | 2 +- tutorials/epochs/30_epochs_metadata.py | 2 +- tutorials/intro/10_overview.py | 14 ++--- .../inverse/80_brainstorm_phantom_elekta.py | 4 +- tutorials/inverse/95_phantom_KIT.py | 2 +- tutorials/io/60_ctf_bst_auditory.py | 8 ++- .../10_preprocessing_overview.py | 2 +- .../preprocessing/30_filtering_resampling.py | 18 ++++-- .../50_artifact_correction_ssp.py | 9 ++- tutorials/preprocessing/59_head_positions.py | 2 +- .../preprocessing/70_fnirs_processing.py | 4 +- tutorials/raw/40_visualize_raw.py | 14 ++--- tutorials/simulation/10_array_objs.py | 2 +- tutorials/time-freq/10_spectrum_class.py | 8 +-- .../time-freq/20_sensors_time_frequency.py | 4 +- 26 files changed, 146 insertions(+), 117 deletions(-) diff --git a/examples/datasets/brainstorm_data.py b/examples/datasets/brainstorm_data.py index 0f32c704284..6331c9f1b29 100644 --- a/examples/datasets/brainstorm_data.py +++ b/examples/datasets/brainstorm_data.py @@ -41,7 +41,9 @@ raw.set_eeg_reference("average", projection=True) # show power line interference and remove it -raw.compute_psd(tmax=60).plot(average=False, picks="data", exclude="bads") +raw.compute_psd(tmax=60).plot( + average=False, amplitude=False, picks="data", exclude="bads" +) raw.notch_filter(np.arange(60, 181, 60), fir_design="firwin") events = mne.find_events(raw, stim_channel="UPPT001") diff --git a/examples/preprocessing/eeg_csd.py b/examples/preprocessing/eeg_csd.py index 73515e1f043..35ba959c34d 100644 --- a/examples/preprocessing/eeg_csd.py +++ b/examples/preprocessing/eeg_csd.py @@ -49,8 +49,8 @@ # %% # Also look at the power spectral densities: -raw.compute_psd().plot(picks="data", exclude="bads") -raw_csd.compute_psd().plot(picks="data", exclude="bads") +raw.compute_psd().plot(picks="data", exclude="bads", amplitude=False) +raw_csd.compute_psd().plot(picks="data", exclude="bads", amplitude=False) # %% # CSD can also be computed on Evoked (averaged) data. diff --git a/examples/preprocessing/find_ref_artifacts.py b/examples/preprocessing/find_ref_artifacts.py index 93b96e89e9c..90e3d1fb0da 100644 --- a/examples/preprocessing/find_ref_artifacts.py +++ b/examples/preprocessing/find_ref_artifacts.py @@ -70,7 +70,7 @@ # %% # The PSD of these data show the noise as clear peaks. -raw.compute_psd(fmax=30).plot(picks="data", exclude="bads") +raw.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) # %% # Run the "together" algorithm. @@ -99,7 +99,7 @@ # %% # Cleaned data: -raw_tog.compute_psd(fmax=30).plot(picks="data", exclude="bads") +raw_tog.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) # %% # Now try the "separate" algorithm. @@ -143,7 +143,7 @@ # %% # Cleaned raw data PSD: -raw_sep.compute_psd(fmax=30).plot(picks="data", exclude="bads") +raw_sep.compute_psd(fmax=30).plot(picks="data", exclude="bads", amplitude=False) ############################################################################## # References diff --git a/mne/io/array/tests/test_array.py b/mne/io/array/tests/test_array.py index e8013d631aa..10b7c834d98 100644 --- a/mne/io/array/tests/test_array.py +++ b/mne/io/array/tests/test_array.py @@ -151,7 +151,9 @@ def test_array_raw(): # plotting raw2.plot() - raw2.compute_psd(tmax=2.0, n_fft=1024).plot(average=True, spatial_colors=False) + raw2.compute_psd(tmax=2.0, n_fft=1024).plot( + average=True, amplitude=False, spatial_colors=False + ) plt.close("all") # epoching @@ -184,5 +186,5 @@ def test_array_raw(): raw = RawArray(data, info) raw.set_montage(montage) spectrum = raw.compute_psd() - spectrum.plot(average=False) # looking for nonexistent layout + spectrum.plot(average=False, amplitude=False) # looking for nonexistent layout spectrum.plot_topo() diff --git a/mne/report/report.py b/mne/report/report.py index 6a37f095c2f..9a547d4f7b6 100644 --- a/mne/report/report.py +++ b/mne/report/report.py @@ -3218,7 +3218,9 @@ def _add_raw( init_kwargs, plot_kwargs = _split_psd_kwargs(kwargs=add_psd) init_kwargs.setdefault("fmax", fmax) plot_kwargs.setdefault("show", False) - fig = raw.compute_psd(**init_kwargs).plot(**plot_kwargs) + with warnings.catch_warnings(): + warnings.simplefilter(action="ignore", category=FutureWarning) + fig = raw.compute_psd(**init_kwargs).plot(**plot_kwargs) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) self._add_figure( fig=fig, @@ -3785,7 +3787,7 @@ def _add_epochs_psd(self, *, epochs, psd, image_format, tags, section, replace): if fmax > 0.5 * epochs.info["sfreq"]: fmax = np.inf - fig = epochs_for_psd.compute_psd(fmax=fmax).plot(show=False) + fig = epochs_for_psd.compute_psd(fmax=fmax).plot(amplitude=False, show=False) _constrain_fig_resolution(fig, max_width=MAX_IMG_WIDTH, max_res=MAX_IMG_RES) duration = round(epoch_duration * len(epochs_for_psd), 1) caption = ( diff --git a/mne/time_frequency/spectrum.py b/mne/time_frequency/spectrum.py index 2b31ca46340..a7a2a753932 100644 --- a/mne/time_frequency/spectrum.py +++ b/mne/time_frequency/spectrum.py @@ -573,7 +573,7 @@ def plot( picks=None, average=False, dB=True, - amplitude="auto", + amplitude=None, xscale="linear", ci="sd", ci_alpha=0.3, @@ -593,52 +593,52 @@ def plot( .. versionchanged:: 1.5 In version 1.5, the default behavior changed so that all - :term:`data channels` (not just "good" data channels) are shown - by default. + :term:`data channels` (not just "good" data channels) are shown by + default. average : bool - Whether to average across channels before plotting. If ``True``, - interactive plotting of scalp topography is disabled, and - parameters ``ci`` and ``ci_alpha`` control the style of the - confidence band around the mean. Default is ``False``. + Whether to average across channels before plotting. If ``True``, interactive + plotting of scalp topography is disabled, and parameters ``ci`` and + ``ci_alpha`` control the style of the confidence band around the mean. + Default is ``False``. %(dB_spectrum_plot)s amplitude : bool | 'auto' Whether to plot an amplitude spectrum (``True``) or power spectrum - (``False``). If ``'auto'``, will plot a power spectrum when - ``dB=True`` and an amplitude spectrum otherwise. Default is - ``'auto'``. + (``False``). If ``'auto'``, will plot a power spectrum when ``dB=True`` and + an amplitude spectrum otherwise. Default is ``'auto'``. + + .. versionchanged:: 1.8 + In version 1.8, the value ``amplitude="auto"`` will be removed. The + default value will change to ``amplitude=False``. %(xscale_plot_psd)s ci : float | 'sd' | 'range' | None - Type of confidence band drawn around the mean when - ``average=True``. If ``'sd'`` the band spans ±1 standard deviation - across channels. If ``'range'`` the band spans the range across - channels at each frequency. If a :class:`float`, it indicates the - (bootstrapped) confidence interval to display, and must satisfy - ``0 < ci <= 100``. If ``None``, no band is drawn. Default is - ``sd``. + Type of confidence band drawn around the mean when ``average=True``. If + ``'sd'`` the band spans ±1 standard deviation across channels. If + ``'range'`` the band spans the range across channels at each frequency. If a + :class:`float`, it indicates the (bootstrapped) confidence interval to + display, and must satisfy ``0 < ci <= 100``. If ``None``, no band is drawn. + Default is ``sd``. ci_alpha : float - Opacity of the confidence band. Must satisfy - ``0 <= ci_alpha <= 1``. Default is 0.3. + Opacity of the confidence band. Must satisfy ``0 <= ci_alpha <= 1``. Default + is 0.3. %(color_plot_psd)s alpha : float | None Opacity of the spectrum line(s). If :class:`float`, must satisfy ``0 <= alpha <= 1``. If ``None``, opacity will be ``1`` when - ``average=True`` and ``0.1`` when ``average=False``. Default is - ``None``. + ``average=True`` and ``0.1`` when ``average=False``. Default is ``None``. %(spatial_colors_psd)s %(sphere_topomap_auto)s %(exclude_spectrum_plot)s .. versionchanged:: 1.5 - In version 1.5, the default behavior changed from - ``exclude='bads'`` to ``exclude=()``. + In version 1.5, the default behavior changed from ``exclude='bads'`` to + ``exclude=()``. %(axes_spectrum_plot_topomap)s %(show)s Returns ------- fig : instance of matplotlib.figure.Figure - Figure with spectra plotted in separate subplots for each channel - type. + Figure with spectra plotted in separate subplots for each channel type. """ # Must nest this _mpl_figure import because of the BACKEND global # stuff @@ -652,10 +652,19 @@ def plot( scalings = _handle_default("scalings", None) titles = _handle_default("titles", None) units = _handle_default("units", None) - if amplitude == "auto": + + depr_message = ( + "The value of `amplitude='auto'` will be removed in MNE 1.8.0, and the new " + "default will be `amplitude=False`." + ) + if amplitude is None or amplitude == "auto": + warn(depr_message, FutureWarning) estimate = "power" if dB else "amplitude" - else: # amplitude is boolean + else: estimate = "amplitude" if amplitude else "power" + + logger.info(f"Plotting {estimate} spectral density ({dB=}).") + # split picks by channel type picks = _picks_to_idx( self.info, picks, "data", exclude=exclude, with_ref_meg=False diff --git a/mne/time_frequency/tests/test_spectrum.py b/mne/time_frequency/tests/test_spectrum.py index 653f0ab1411..26c18529143 100644 --- a/mne/time_frequency/tests/test_spectrum.py +++ b/mne/time_frequency/tests/test_spectrum.py @@ -1,5 +1,6 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. + from functools import partial import numpy as np @@ -270,12 +271,13 @@ def test_spectrum_kwarg_triaging(raw): import matplotlib.pyplot as plt regex = r"legacy plot_psd\(\) method.*unexpected keyword.*'axes'.*Try rewriting" - fig, axes = plt.subplots(1, 2) + _, axes = plt.subplots(1, 2) # `axes` is the new param name: technically only valid for Spectrum.plot() with pytest.warns(RuntimeWarning, match=regex): raw.plot_psd(axes=axes) # `ax` is the correct legacy param name - raw.plot_psd(ax=axes) + with pytest.warns(FutureWarning, match="amplitude='auto'"): + raw.plot_psd(ax=axes) def _check_spectrum_equivalent(spect1, spect2, tmp_path): diff --git a/mne/viz/tests/test_epochs.py b/mne/viz/tests/test_epochs.py index cf1c07b7d85..6dcfdb57bdf 100644 --- a/mne/viz/tests/test_epochs.py +++ b/mne/viz/tests/test_epochs.py @@ -396,9 +396,9 @@ def test_plot_psd_epochs(epochs): """Test plotting epochs psd (+topomap).""" spectrum = epochs.compute_psd() old_defaults = dict(picks="data", exclude="bads") - spectrum.plot(average=True, spatial_colors=False, **old_defaults) - spectrum.plot(average=False, spatial_colors=True, **old_defaults) - spectrum.plot(average=False, spatial_colors=False, **old_defaults) + spectrum.plot(average=True, amplitude=False, spatial_colors=False, **old_defaults) + spectrum.plot(average=False, amplitude=False, spatial_colors=True, **old_defaults) + spectrum.plot(average=False, amplitude=False, spatial_colors=False, **old_defaults) # test plot_psd_topomap errors with pytest.raises(RuntimeError, match="No frequencies in band"): spectrum.plot_topomap(bands=dict(foo=(0, 0.01))) @@ -497,7 +497,7 @@ def test_plot_psd_epochs_ctf(raw_ctf): for dB in [True, False]: spectrum.plot(dB=dB) spectrum.drop_channels(["EEG060"]) - spectrum.plot(spatial_colors=False, average=False, **old_defaults) + spectrum.plot(spatial_colors=False, average=False, amplitude=False, **old_defaults) with pytest.raises(RuntimeError, match="No frequencies in band"): spectrum.plot_topomap(bands=[(0, 0.01, "foo")]) spectrum.plot_topomap() diff --git a/mne/viz/tests/test_raw.py b/mne/viz/tests/test_raw.py index 89619d36e2f..619c79a7111 100644 --- a/mne/viz/tests/test_raw.py +++ b/mne/viz/tests/test_raw.py @@ -938,29 +938,33 @@ def test_plot_raw_psd(raw, raw_orig): spectrum = raw.compute_psd() # deprecation change handler old_defaults = dict(picks="data", exclude="bads") - fig = spectrum.plot(average=False) + fig = spectrum.plot(average=False, amplitude=False) # normal mode - fig = spectrum.plot(average=False, **old_defaults) + fig = spectrum.plot(average=False, amplitude=False, **old_defaults) fig.canvas.callbacks.process( "resize_event", backend_bases.ResizeEvent("resize_event", fig.canvas) ) # specific mode picks = pick_types(spectrum.info, meg="mag", eeg=False)[:4] - spectrum.plot(picks=picks, ci="range", spatial_colors=True, exclude="bads") - raw.compute_psd(tmax=20.0).plot(color="yellow", dB=False, alpha=0.4, **old_defaults) + spectrum.plot( + picks=picks, ci="range", spatial_colors=True, exclude="bads", amplitude=False + ) + raw.compute_psd(tmax=20.0).plot( + color="yellow", dB=False, alpha=0.4, amplitude=True, **old_defaults + ) plt.close("all") # one axes supplied ax = plt.axes() - spectrum.plot(picks=picks, axes=ax, average=True, exclude="bads") + spectrum.plot(picks=picks, axes=ax, average=True, exclude="bads", amplitude=False) plt.close("all") # two axes supplied _, axs = plt.subplots(2) - spectrum.plot(axes=axs, average=True, **old_defaults) + spectrum.plot(axes=axs, average=True, amplitude=False, **old_defaults) plt.close("all") # need 2, got 1 ax = plt.axes() with pytest.raises(ValueError, match="of length 2.*the length is 1"): - spectrum.plot(axes=ax, average=True, **old_defaults) + spectrum.plot(axes=ax, average=True, amplitude=False, **old_defaults) plt.close("all") # topo psd ax = plt.subplot() @@ -981,14 +985,13 @@ def test_plot_raw_psd(raw, raw_orig): # check grad axes title = fig.axes[0].get_title() ylabel = fig.axes[0].get_ylabel() - ends_dB = ylabel.endswith("mathrm{(dB)}$") unit = r"fT/cm/\sqrt{Hz}" if amplitude else "(fT/cm)²/Hz" assert title == "Gradiometers", title assert unit in ylabel, ylabel if dB: - assert ends_dB, ylabel + assert "dB" in ylabel else: - assert not ends_dB, ylabel + assert "dB" not in ylabel # check mag axes title = fig.axes[1].get_title() ylabel = fig.axes[1].get_ylabel() @@ -1006,8 +1009,8 @@ def test_plot_raw_psd(raw, raw_orig): raw = raw_orig.crop(0, 1) picks = pick_types(raw.info, meg=True) spectrum = raw.compute_psd(picks=picks) - spectrum.plot(average=False, **old_defaults) - spectrum.plot(average=True, **old_defaults) + spectrum.plot(average=False, amplitude=False, **old_defaults) + spectrum.plot(average=True, amplitude=False, **old_defaults) plt.close("all") raw.set_channel_types( { @@ -1018,7 +1021,7 @@ def test_plot_raw_psd(raw, raw_orig): }, verbose="error", ) - fig = raw.compute_psd().plot(**old_defaults) + fig = raw.compute_psd().plot(amplitude=False, **old_defaults) assert len(fig.axes) == 10 plt.close("all") @@ -1029,7 +1032,7 @@ def test_plot_raw_psd(raw, raw_orig): raw = RawArray(data, info) picks = pick_types(raw.info, misc=True) spectrum = raw.compute_psd(picks=picks, n_fft=n_fft) - spectrum.plot(spatial_colors=False, picks=picks, exclude="bads") + spectrum.plot(spatial_colors=False, picks=picks, exclude="bads", amplitude=False) plt.close("all") diff --git a/mne/viz/utils.py b/mne/viz/utils.py index dd8323a2f85..180d2b37595 100644 --- a/mne/viz/utils.py +++ b/mne/viz/utils.py @@ -2360,29 +2360,12 @@ def _make_combine_callable(combine): def _convert_psds( psds, dB, estimate, scaling, unit, ch_names=None, first_dim="channel" ): - """Convert PSDs to dB (if necessary) and appropriate units. - - The following table summarizes the relationship between the value of - parameters ``dB`` and ``estimate``, and the type of plot and corresponding - units. - - | dB | estimate | plot | units | - |-------+-------------+------+-------------------| - | True | 'power' | PSD | amp**2/Hz (dB) | - | True | 'amplitude' | ASD | amp/sqrt(Hz) (dB) | - | True | 'auto' | PSD | amp**2/Hz (dB) | - | False | 'power' | PSD | amp**2/Hz | - | False | 'amplitude' | ASD | amp/sqrt(Hz) | - | False | 'auto' | ASD | amp/sqrt(Hz) | - - where amp are the units corresponding to the variable, as specified by - ``unit``. - """ + """Convert PSDs to dB (if necessary) and appropriate units.""" _check_option("first_dim", first_dim, ["channel", "epoch"]) where = np.where(psds.min(1) <= 0)[0] if len(where) > 0: - # Construct a helpful error message, depending on whether the first - # dimension of `psds` are channels or epochs. + # Construct a helpful error message, depending on whether the first dimension of + # `psds` corresponds to channels or epochs. if dB: bad_value = "Infinite" else: @@ -2404,16 +2387,18 @@ def _convert_psds( if estimate == "amplitude": np.sqrt(psds, out=psds) psds *= scaling - ylabel = r"$\mathrm{%s/\sqrt{Hz}}$" % unit + ylabel = rf"$\mathrm{{{unit}/\sqrt{{Hz}}}}$" else: psds *= scaling * scaling if "/" in unit: - unit = "(%s)" % unit - ylabel = r"$\mathrm{%s²/Hz}$" % unit + unit = f"({unit})" + ylabel = rf"$\mathrm{{{unit}²/Hz}}$" if dB: np.log10(np.maximum(psds, np.finfo(float).tiny), out=psds) psds *= 10 - ylabel += r"$\ \mathrm{(dB)}$" + ylabel = r"$\mathrm{dB}\ $" + ylabel + ylabel = "Power (" + ylabel if estimate == "power" else "Amplitude (" + ylabel + ylabel += ")" return ylabel diff --git a/tutorials/clinical/60_sleep.py b/tutorials/clinical/60_sleep.py index 020d00bab7e..25273a0ff2f 100644 --- a/tutorials/clinical/60_sleep.py +++ b/tutorials/clinical/60_sleep.py @@ -219,6 +219,7 @@ axes=ax, show=False, average=True, + amplitude=False, spatial_colors=False, picks="data", exclude="bads", diff --git a/tutorials/epochs/20_visualize_epochs.py b/tutorials/epochs/20_visualize_epochs.py index 69864d19e26..e311b324ee8 100644 --- a/tutorials/epochs/20_visualize_epochs.py +++ b/tutorials/epochs/20_visualize_epochs.py @@ -144,7 +144,7 @@ # :class:`~mne.time_frequency.EpochsSpectrum`'s # :meth:`~mne.time_frequency.EpochsSpectrum.plot` method. -epochs["auditory"].compute_psd().plot(picks="eeg", exclude="bads") +epochs["auditory"].compute_psd().plot(picks="eeg", exclude="bads", amplitude=False) # %% # It is also possible to plot spectral power estimates across sensors as a diff --git a/tutorials/epochs/30_epochs_metadata.py b/tutorials/epochs/30_epochs_metadata.py index 7d5c06871ad..51d551090d4 100644 --- a/tutorials/epochs/30_epochs_metadata.py +++ b/tutorials/epochs/30_epochs_metadata.py @@ -116,7 +116,7 @@ # MNE-Python will try the traditional method first before falling back on rich # metadata querying. -epochs["solenoid"].compute_psd().plot(picks="data", exclude="bads") +epochs["solenoid"].compute_psd().plot(picks="data", exclude="bads", amplitude=False) # %% # One use of the Pandas query string approach is to select specific words for diff --git a/tutorials/intro/10_overview.py b/tutorials/intro/10_overview.py index 20dc532f65a..94b659444b3 100644 --- a/tutorials/intro/10_overview.py +++ b/tutorials/intro/10_overview.py @@ -5,12 +5,12 @@ Overview of MEG/EEG analysis with MNE-Python ============================================ -This tutorial covers the basic EEG/MEG pipeline for event-related analysis: -loading data, epoching, averaging, plotting, and estimating cortical activity -from sensor data. It introduces the core MNE-Python data structures -`~mne.io.Raw`, `~mne.Epochs`, `~mne.Evoked`, and `~mne.SourceEstimate`, and -covers a lot of ground fairly quickly (at the expense of depth). Subsequent -tutorials address each of these topics in greater detail. +This tutorial covers the basic EEG/MEG pipeline for event-related analysis: loading +data, epoching, averaging, plotting, and estimating cortical activity from sensor data. +It introduces the core MNE-Python data structures `~mne.io.Raw`, `~mne.Epochs`, +`~mne.Evoked`, and `~mne.SourceEstimate`, and covers a lot of ground fairly quickly (at +the expense of depth). Subsequent tutorials address each of these topics in greater +detail. We begin by importing the necessary Python modules: """ @@ -79,7 +79,7 @@ # sessions, `~mne.io.Raw.plot` is interactive and allows scrolling, scaling, # bad channel marking, annotations, projector toggling, etc. -raw.compute_psd(fmax=50).plot(picks="data", exclude="bads") +raw.compute_psd(fmax=50).plot(picks="data", exclude="bads", amplitude=False) raw.plot(duration=5, n_channels=30) # %% diff --git a/tutorials/inverse/80_brainstorm_phantom_elekta.py b/tutorials/inverse/80_brainstorm_phantom_elekta.py index 8184badeda3..303be4260d1 100644 --- a/tutorials/inverse/80_brainstorm_phantom_elekta.py +++ b/tutorials/inverse/80_brainstorm_phantom_elekta.py @@ -53,7 +53,9 @@ # noise (five peaks around 300 Hz). Here, we use only the first 30 seconds # to save memory: -raw.compute_psd(tmax=30).plot(average=False, picks="data", exclude="bads") +raw.compute_psd(tmax=30).plot( + average=False, amplitude=False, picks="data", exclude="bads" +) # %% # Our phantom produces sinusoidal bursts at 20 Hz: diff --git a/tutorials/inverse/95_phantom_KIT.py b/tutorials/inverse/95_phantom_KIT.py index 444ae4635fd..6a07658e13a 100644 --- a/tutorials/inverse/95_phantom_KIT.py +++ b/tutorials/inverse/95_phantom_KIT.py @@ -40,7 +40,7 @@ # boxcar windowing of the 11 Hz sinusoid. spectrum = raw.copy().crop(0, 60).compute_psd(n_fft=10000) -fig = spectrum.plot() +fig = spectrum.plot(amplitude=False) fig.axes[0].set_xlim(0, 50) dip_freq = 11.0 fig.axes[0].axvline(dip_freq, color="r", ls="--", lw=2, zorder=4) diff --git a/tutorials/io/60_ctf_bst_auditory.py b/tutorials/io/60_ctf_bst_auditory.py index dd8d9abadf5..450b8237db4 100644 --- a/tutorials/io/60_ctf_bst_auditory.py +++ b/tutorials/io/60_ctf_bst_auditory.py @@ -165,10 +165,14 @@ # saving mode we do the filtering at evoked stage, which is not something you # usually would do. if not use_precomputed: - raw.compute_psd(tmax=np.inf, picks="meg").plot(picks="data", exclude="bads") + raw.compute_psd(tmax=np.inf, picks="meg").plot( + picks="data", exclude="bads", amplitude=False + ) notches = np.arange(60, 181, 60) raw.notch_filter(notches, phase="zero-double", fir_design="firwin2") - raw.compute_psd(tmax=np.inf, picks="meg").plot(picks="data", exclude="bads") + raw.compute_psd(tmax=np.inf, picks="meg").plot( + picks="data", exclude="bads", amplitude=False + ) # %% # We also lowpass filter the data at 100 Hz to remove the hf components. diff --git a/tutorials/preprocessing/10_preprocessing_overview.py b/tutorials/preprocessing/10_preprocessing_overview.py index 483ac653767..d70fa4b4811 100644 --- a/tutorials/preprocessing/10_preprocessing_overview.py +++ b/tutorials/preprocessing/10_preprocessing_overview.py @@ -141,7 +141,7 @@ # use :meth:`~mne.io.Raw.compute_psd` to illustrate. fig = raw.compute_psd(tmax=np.inf, fmax=250).plot( - average=True, picks="data", exclude="bads" + average=True, amplitude=False, picks="data", exclude="bads" ) # add some arrows at 60 Hz and its harmonics: for ax in fig.axes[1:]: diff --git a/tutorials/preprocessing/30_filtering_resampling.py b/tutorials/preprocessing/30_filtering_resampling.py index 6c118c99180..a3be45e1ec2 100644 --- a/tutorials/preprocessing/30_filtering_resampling.py +++ b/tutorials/preprocessing/30_filtering_resampling.py @@ -123,7 +123,7 @@ def add_arrows(axes): - # add some arrows at 60 Hz and its harmonics + """Add some arrows at 60 Hz and its harmonics.""" for ax in axes: freqs = ax.lines[-1].get_xdata() psds = ax.lines[-1].get_ydata() @@ -143,7 +143,9 @@ def add_arrows(axes): ) -fig = raw.compute_psd(fmax=250).plot(average=True, picks="data", exclude="bads") +fig = raw.compute_psd(fmax=250).plot( + average=True, amplitude=False, picks="data", exclude="bads" +) add_arrows(fig.axes[:2]) # %% @@ -159,7 +161,9 @@ def add_arrows(axes): freqs = (60, 120, 180, 240) raw_notch = raw.copy().notch_filter(freqs=freqs, picks=meg_picks) for title, data in zip(["Un", "Notch "], [raw, raw_notch]): - fig = data.compute_psd(fmax=250).plot(average=True, picks="data", exclude="bads") + fig = data.compute_psd(fmax=250).plot( + average=True, amplitude=False, picks="data", exclude="bads" + ) fig.suptitle("{}filtered".format(title), size="xx-large", weight="bold") add_arrows(fig.axes[:2]) @@ -178,7 +182,9 @@ def add_arrows(axes): freqs=freqs, picks=meg_picks, method="spectrum_fit", filter_length="10s" ) for title, data in zip(["Un", "spectrum_fit "], [raw, raw_notch_fit]): - fig = data.compute_psd(fmax=250).plot(average=True, picks="data", exclude="bads") + fig = data.compute_psd(fmax=250).plot( + average=True, amplitude=False, picks="data", exclude="bads" + ) fig.suptitle("{}filtered".format(title), size="xx-large", weight="bold") add_arrows(fig.axes[:2]) @@ -218,7 +224,7 @@ def add_arrows(axes): axes, [raw, raw_downsampled], ["Original", "Downsampled"], n_ffts ): fig = data.compute_psd(n_fft=n_fft).plot( - average=True, picks="data", exclude="bads", axes=ax + average=True, amplitude=False, picks="data", exclude="bads", axes=ax ) ax.set(title=title, xlim=(0, 300)) @@ -256,7 +262,7 @@ def add_arrows(axes): axes, [raw, raw_downsampled_poly], ["Original", "Downsampled (polyphase)"], n_ffts ): data.compute_psd(n_fft=n_fft).plot( - average=True, picks="data", exclude="bads", axes=ax + average=True, amplitude=False, picks="data", exclude="bads", axes=ax ) ax.set(title=title, xlim=(0, 300)) diff --git a/tutorials/preprocessing/50_artifact_correction_ssp.py b/tutorials/preprocessing/50_artifact_correction_ssp.py index 2f5af536a3d..b99d068430b 100644 --- a/tutorials/preprocessing/50_artifact_correction_ssp.py +++ b/tutorials/preprocessing/50_artifact_correction_ssp.py @@ -116,7 +116,14 @@ raw.info["bads"] = ["MEG 2443"] spectrum = empty_room_raw.compute_psd() for average in (False, True): - spectrum.plot(average=average, dB=False, xscale="log", picks="data", exclude="bads") + spectrum.plot( + average=average, + dB=False, + amplitude=True, + xscale="log", + picks="data", + exclude="bads", + ) # %% # Creating the empty-room projectors diff --git a/tutorials/preprocessing/59_head_positions.py b/tutorials/preprocessing/59_head_positions.py index cd1a454fd7b..37ed574132b 100644 --- a/tutorials/preprocessing/59_head_positions.py +++ b/tutorials/preprocessing/59_head_positions.py @@ -37,7 +37,7 @@ data_path = op.join(mne.datasets.testing.data_path(verbose=True), "SSS") fname_raw = op.join(data_path, "test_move_anon_raw.fif") raw = mne.io.read_raw_fif(fname_raw, allow_maxshield="yes").load_data() -raw.compute_psd().plot(picks="data", exclude="bads") +raw.compute_psd().plot(picks="data", exclude="bads", amplitude=False) # %% # We can use `mne.chpi.get_chpi_info` to retrieve the coil frequencies, diff --git a/tutorials/preprocessing/70_fnirs_processing.py b/tutorials/preprocessing/70_fnirs_processing.py index 8b59c6a31ff..cf0b63da311 100644 --- a/tutorials/preprocessing/70_fnirs_processing.py +++ b/tutorials/preprocessing/70_fnirs_processing.py @@ -157,7 +157,9 @@ raw_haemo_unfiltered = raw_haemo.copy() raw_haemo.filter(0.05, 0.7, h_trans_bandwidth=0.2, l_trans_bandwidth=0.02) for when, _raw in dict(Before=raw_haemo_unfiltered, After=raw_haemo).items(): - fig = _raw.compute_psd().plot(average=True, picks="data", exclude="bads") + fig = _raw.compute_psd().plot( + average=True, amplitude=False, picks="data", exclude="bads" + ) fig.suptitle(f"{when} filtering", weight="bold", size="x-large") # %% diff --git a/tutorials/raw/40_visualize_raw.py b/tutorials/raw/40_visualize_raw.py index 0056d90e413..091f44a1493 100644 --- a/tutorials/raw/40_visualize_raw.py +++ b/tutorials/raw/40_visualize_raw.py @@ -5,13 +5,13 @@ Built-in plotting methods for Raw objects ========================================= -This tutorial shows how to plot continuous data as a time series, how to plot -the spectral density of continuous data, and how to plot the sensor locations -and projectors stored in `~mne.io.Raw` objects. +This tutorial shows how to plot continuous data as a time series, how to plot the +spectral density of continuous data, and how to plot the sensor locations and projectors +stored in `~mne.io.Raw` objects. As usual we'll start by importing the modules we need, loading some -:ref:`example data `, and cropping the `~mne.io.Raw` -object to just 60 seconds before loading it into RAM to save memory: +:ref:`example data `, and cropping the `~mne.io.Raw` object to just 60 +seconds before loading it into RAM to save memory: """ # License: BSD-3-Clause # Copyright the MNE-Python contributors. @@ -120,7 +120,7 @@ # object has a :meth:`~mne.time_frequency.Spectrum.plot` method: spectrum = raw.compute_psd() -spectrum.plot(average=True, picks="data", exclude="bads") +spectrum.plot(average=True, picks="data", exclude="bads", amplitude=False) # %% # If the data have been filtered, vertical dashed lines will automatically @@ -134,7 +134,7 @@ # documentation of `~mne.time_frequency.Spectrum.plot` for full details): midline = ["EEG 002", "EEG 012", "EEG 030", "EEG 048", "EEG 058", "EEG 060"] -spectrum.plot(picks=midline, exclude="bads") +spectrum.plot(picks=midline, exclude="bads", amplitude=False) # %% # It is also possible to plot spectral power estimates across sensors as a diff --git a/tutorials/simulation/10_array_objs.py b/tutorials/simulation/10_array_objs.py index a2e94ab1c7a..4367d880207 100644 --- a/tutorials/simulation/10_array_objs.py +++ b/tutorials/simulation/10_array_objs.py @@ -232,4 +232,4 @@ info=info, ) -spectrum.plot(spatial_colors=False) +spectrum.plot(spatial_colors=False, amplitude=False) diff --git a/tutorials/time-freq/10_spectrum_class.py b/tutorials/time-freq/10_spectrum_class.py index c5f8f4fd639..9d7eb9fae5d 100644 --- a/tutorials/time-freq/10_spectrum_class.py +++ b/tutorials/time-freq/10_spectrum_class.py @@ -8,9 +8,9 @@ The Spectrum and EpochsSpectrum classes: frequency-domain data ============================================================== -This tutorial shows how to create and visualize frequency-domain -representations of your data, starting from continuous :class:`~mne.io.Raw`, -discontinuous :class:`~mne.Epochs`, or averaged :class:`~mne.Evoked` data. +This tutorial shows how to create and visualize frequency-domain representations of your +data, starting from continuous :class:`~mne.io.Raw`, discontinuous :class:`~mne.Epochs`, +or averaged :class:`~mne.Evoked` data. As usual we'll start by importing the modules we need, and loading our :ref:`sample dataset `: @@ -122,7 +122,7 @@ # (interpolated scalp topography of power, in specific frequency bands). A few # plot options are demonstrated below; see the docstrings for full details. -evk_spectrum.plot(picks="data", exclude="bads") +evk_spectrum.plot(picks="data", exclude="bads", amplitude=False) evk_spectrum.plot_topo(color="k", fig_facecolor="w", axis_facecolor="w") # %% diff --git a/tutorials/time-freq/20_sensors_time_frequency.py b/tutorials/time-freq/20_sensors_time_frequency.py index c4981b2b1e0..247fdddfab1 100644 --- a/tutorials/time-freq/20_sensors_time_frequency.py +++ b/tutorials/time-freq/20_sensors_time_frequency.py @@ -66,7 +66,9 @@ # %% # Let's first check out all channel types by averaging across epochs. -epochs.compute_psd(fmin=2.0, fmax=40.0).plot(average=True, picks="data", exclude="bads") +epochs.compute_psd(fmin=2.0, fmax=40.0).plot( + average=True, amplitude=False, picks="data", exclude="bads" +) # %% # Now, let's take a look at the spatial distributions of the PSD, averaged From 742065d5dd6c69223cc15bb7aca212e5d1a99988 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 14:35:22 +0000 Subject: [PATCH 10/37] Bump actions/setup-python from 4 to 5 (#12287) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Eric Larson --- .github/workflows/release.yml | 2 +- .github/workflows/tests.yml | 4 ++-- examples/time_frequency/source_power_spectrum_opm.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index dd85f1bb8a4..c34bb80fd38 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -19,7 +19,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.10' - name: Install dependencies diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3a0517d59e1..419595c8354 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,7 +20,7 @@ jobs: timeout-minutes: 3 steps: - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.11' - uses: pre-commit/action@v3.0.0 @@ -84,7 +84,7 @@ jobs: qt: true pyvista: false # Python (if pip) - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} if: startswith(matrix.kind, 'pip') diff --git a/examples/time_frequency/source_power_spectrum_opm.py b/examples/time_frequency/source_power_spectrum_opm.py index 11168cc08a5..8a12b78a9d3 100644 --- a/examples/time_frequency/source_power_spectrum_opm.py +++ b/examples/time_frequency/source_power_spectrum_opm.py @@ -82,7 +82,7 @@ fig = ( raws[kind] .compute_psd(n_fft=n_fft, proj=True) - .plot(picks="data", exclude="bads") + .plot(picks="data", exclude="bads", amplitude=True) ) fig.suptitle(titles[kind]) From 5df4cd6506ca2fb244070865a92bdbba8dabc1c4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Dec 2023 21:18:53 +0000 Subject: [PATCH 11/37] [pre-commit.ci] pre-commit autoupdate (#12288) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 52a3d560fdc..cd6d522d4e7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.6 + rev: v0.1.7 hooks: - id: ruff name: ruff lint mne @@ -13,7 +13,7 @@ repos: # Ruff tutorials and examples - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.6 + rev: v0.1.7 hooks: - id: ruff name: ruff lint tutorials and examples From 4e2a60073c5c42030f825b7495032a91e5d7e722 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Tue, 12 Dec 2023 13:15:01 -0600 Subject: [PATCH 12/37] support different time formats in Annotations.to_data_frame() (#12289) Co-authored-by: Eric Larson --- doc/changes/devel.rst | 1 + mne/annotations.py | 17 ++++++++++++++--- mne/epochs.py | 2 +- mne/evoked.py | 2 +- mne/io/base.py | 4 +++- mne/source_estimate.py | 2 +- mne/tests/test_annotations.py | 11 ++++++++--- mne/time_frequency/tfr.py | 2 +- mne/utils/dataframe.py | 4 ++-- 9 files changed, 32 insertions(+), 13 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 3fd579ad4be..feae12dcbb2 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -39,6 +39,7 @@ Enhancements - We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) - Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) - The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269`, :gh:`12281` by `Richard Höchenberger`_) +- :meth:`mne.Annotations.to_data_frame` can now output different formats for the ``onset`` column: seconds, milliseconds, datetime objects, and timedelta objects. (:gh:`12289` by `Daniel McCloy`_) Bugs ~~~~ diff --git a/mne/annotations.py b/mne/annotations.py index 20ee351e7fa..8a44a84f539 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -38,6 +38,8 @@ _check_fname, _check_option, _check_pandas_installed, + _check_time_format, + _convert_times, _DefaultEventParser, _dt_to_stamp, _is_numeric, @@ -442,9 +444,16 @@ def delete(self, idx): self.description = np.delete(self.description, idx) self.ch_names = np.delete(self.ch_names, idx) - def to_data_frame(self): + @fill_doc + def to_data_frame(self, time_format="datetime"): """Export annotations in tabular structure as a pandas DataFrame. + Parameters + ---------- + %(time_format_df_raw)s + + .. versionadded:: 1.7 + Returns ------- result : pandas.DataFrame @@ -453,12 +462,14 @@ def to_data_frame(self): annotations are channel-specific. """ pd = _check_pandas_installed(strict=True) + valid_time_formats = ["ms", "timedelta", "datetime"] dt = _handle_meas_date(self.orig_time) if dt is None: dt = _handle_meas_date(0) + time_format = _check_time_format(time_format, valid_time_formats, dt) dt = dt.replace(tzinfo=None) - onsets_dt = [dt + timedelta(seconds=o) for o in self.onset] - df = dict(onset=onsets_dt, duration=self.duration, description=self.description) + times = _convert_times(self.onset, time_format, dt) + df = dict(onset=times, duration=self.duration, description=self.description) if self._any_ch_names(): df.update(ch_names=self.ch_names) df = pd.DataFrame(df) diff --git a/mne/epochs.py b/mne/epochs.py index 915670af516..0ae911ac5ae 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -2661,7 +2661,7 @@ def to_data_frame( # prepare extra columns / multiindex mindex = list() times = np.tile(times, n_epochs) - times = _convert_times(self, times, time_format) + times = _convert_times(times, time_format, self.info["meas_date"]) mindex.append(("time", times)) rev_event_id = {v: k for k, v in self.event_id.items()} conditions = [rev_event_id[k] for k in self.events[:, 2]] diff --git a/mne/evoked.py b/mne/evoked.py index 93583edb004..b23a4fc112c 100644 --- a/mne/evoked.py +++ b/mne/evoked.py @@ -1256,7 +1256,7 @@ def to_data_frame( data = _scale_dataframe_data(self, data, picks, scalings) # prepare extra columns / multiindex mindex = list() - times = _convert_times(self, times, time_format) + times = _convert_times(times, time_format, self.info["meas_date"]) mindex.append(("time", times)) # build DataFrame df = _build_data_frame( diff --git a/mne/io/base.py b/mne/io/base.py index 6bd92607eb2..94cd2ffcdd0 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -2271,7 +2271,9 @@ def to_data_frame( data = _scale_dataframe_data(self, data, picks, scalings) # prepare extra columns / multiindex mindex = list() - times = _convert_times(self, times, time_format) + times = _convert_times( + times, time_format, self.info["meas_date"], self.first_time + ) mindex.append(("time", times)) # build DataFrame df = _build_data_frame( diff --git a/mne/source_estimate.py b/mne/source_estimate.py index 50734817431..b2d197d7b2f 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -1398,7 +1398,7 @@ def to_data_frame( if self.subject is not None: default_index = ["subject", "time"] mindex.append(("subject", np.repeat(self.subject, data.shape[0]))) - times = _convert_times(self, times, time_format) + times = _convert_times(times, time_format) mindex.append(("time", times)) # triage surface vs volume source estimates col_names = list() diff --git a/mne/tests/test_annotations.py b/mne/tests/test_annotations.py index 1a351de5527..8f3124d6a30 100644 --- a/mne/tests/test_annotations.py +++ b/mne/tests/test_annotations.py @@ -1416,7 +1416,8 @@ def test_repr(): assert r == "" -def test_annotation_to_data_frame(): +@pytest.mark.parametrize("time_format", (None, "ms", "datetime", "timedelta")) +def test_annotation_to_data_frame(time_format): """Test annotation class to data frame conversion.""" pytest.importorskip("pandas") onset = np.arange(1, 10) @@ -1427,11 +1428,15 @@ def test_annotation_to_data_frame(): onset=onset, duration=durations, description=description, orig_time=0 ) - df = a.to_data_frame() + df = a.to_data_frame(time_format=time_format) for col in ["onset", "duration", "description"]: assert col in df.columns assert df.description[0] == "yy" - assert (df.onset[1] - df.onset[0]).seconds == 1 + want = 1000 if time_format == "ms" else 1 + got = df.onset[1] - df.onset[0] + if time_format in ("datetime", "timedelta"): + got = got.seconds + assert want == got assert df.groupby("description").count().onset["yy"] == 9 diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 400b711512e..279e2c79879 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -1333,7 +1333,7 @@ def to_data_frame( # prepare extra columns / multiindex mindex = list() times = np.tile(times, n_epochs * n_freqs) - times = _convert_times(self, times, time_format) + times = _convert_times(times, time_format, self.info["meas_date"]) mindex.append(("time", times)) freqs = self.freqs freqs = np.tile(np.repeat(freqs, n_times), n_epochs) diff --git a/mne/utils/dataframe.py b/mne/utils/dataframe.py index 599a2f88165..2f70c57c6e7 100644 --- a/mne/utils/dataframe.py +++ b/mne/utils/dataframe.py @@ -35,7 +35,7 @@ def _scale_dataframe_data(inst, data, picks, scalings): return data -def _convert_times(inst, times, time_format): +def _convert_times(times, time_format, meas_date=None, first_time=0): """Convert vector of time in seconds to ms, datetime, or timedelta.""" # private function; pandas already checked in calling function from pandas import to_timedelta @@ -45,7 +45,7 @@ def _convert_times(inst, times, time_format): elif time_format == "timedelta": times = to_timedelta(times, unit="s") elif time_format == "datetime": - times = to_timedelta(times + inst.first_time, unit="s") + inst.info["meas_date"] + times = to_timedelta(times + first_time, unit="s") + meas_date return times From 7ce9aa1789f9ebe928aaa315d993182ad416464e Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Tue, 12 Dec 2023 20:31:27 +0100 Subject: [PATCH 13/37] Unignore files (#12291) Co-authored-by: Eric Larson --- .gitignore | 9 +++++++-- pyproject.toml | 6 ------ 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index 51707aa39e0..118eebd9c76 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,11 @@ junit-results.xml *.tmproj *.png *.dat +# make sure we ship data files +!mne/data/**/*.dat +!mne/data/**/*.fif +!mne/data/**/*.fif.gz +!mne/icons/**/*.png .DS_Store events.eve foo-lh.label @@ -27,7 +32,6 @@ foo.lout bar.lout foobar.lout epochs_data.mat -memmap*.dat tmp-*.w tmtags auto_examples @@ -62,11 +66,11 @@ tutorials/misc/report.h5 tutorials/io/fnirs.csv pip-log.txt .coverage* +!.coveragerc coverage.xml tags doc/coverages doc/samples -doc/*.dat doc/fil-result doc/optipng.exe sg_execution_times.rst @@ -93,6 +97,7 @@ cover .venv/ venv/ *.json +!codemeta.json .hypothesis/ .ruff_cache/ .ipynb_checkpoints/ diff --git a/pyproject.toml b/pyproject.toml index fb8757150ac..c23caa13d06 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -172,12 +172,6 @@ Forum = "https://mne.discourse.group/" "Source Code" = "https://github.com/mne-tools/mne-python/" [tool.hatch.build] -artifacts = [ - "/mne/data/**/*.dat", - "/mne/data/**/*.fif", - "/mne/data/**/*.fif.gz", - "/mne/icons/**/*.png", -] # excluded via .gitignore, but we do want to ship those files exclude = [ "/.*", "/*.yml", From 1034bffde6fe4a360da3fe155b3c16bdc6380b8d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 13 Dec 2023 13:08:17 -0500 Subject: [PATCH 14/37] BUG: Fix passing removed params to command (#12294) --- mne/commands/mne_coreg.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/mne/commands/mne_coreg.py b/mne/commands/mne_coreg.py index b32e8b9e3d7..b0551346e43 100644 --- a/mne/commands/mne_coreg.py +++ b/mne/commands/mne_coreg.py @@ -41,25 +41,6 @@ def run(): default=None, help="FIFF file with digitizer data for coregistration", ) - parser.add_option( - "-t", - "--tabbed", - dest="tabbed", - action="store_true", - default=None, - help="Option for small screens: Combine " - "the data source panel and the coregistration panel " - "into a single panel with tabs.", - ) - parser.add_option( - "--no-guess-mri", - dest="guess_mri_subject", - action="store_false", - default=None, - help="Prevent the GUI from automatically guessing and " - "changing the MRI subject when a new head shape source " - "file is selected.", - ) parser.add_option( "--head-opacity", type=float, @@ -94,20 +75,6 @@ def run(): dest="interaction", help='Interaction style to use, can be "trackball" or ' '"terrain".', ) - parser.add_option( - "--scale", - type=float, - default=None, - dest="scale", - help="Scale factor for the scene.", - ) - parser.add_option( - "--simple-rendering", - action="store_false", - dest="advanced_rendering", - default=None, - help="Use simplified OpenGL rendering", - ) _add_verbose_flag(parser) options, args = parser.parse_args() @@ -134,18 +101,13 @@ def run(): faulthandler.enable() mne.gui.coregistration( - tabbed=options.tabbed, inst=options.inst, subject=options.subject, subjects_dir=subjects_dir, - guess_mri_subject=options.guess_mri_subject, head_opacity=options.head_opacity, head_high_res=head_high_res, trans=trans, - scrollable=None, interaction=options.interaction, - scale=options.scale, - advanced_rendering=options.advanced_rendering, show=True, block=True, verbose=options.verbose, From 35f0ef65d02af33acf55ba01fa5aa62d8697e117 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Thu, 14 Dec 2023 22:03:44 +0100 Subject: [PATCH 15/37] Add return type hints to `read_raw_*()`, `read_epochs()`, `read_annotations()` (#12296) Co-authored-by: Eric Larson --- doc/conf.py | 51 +++++++++++++++++---------------- mne/annotations.py | 6 ++-- mne/epochs.py | 2 +- mne/io/artemis123/artemis123.py | 2 +- mne/io/boxy/boxy.py | 2 +- mne/io/bti/bti.py | 2 +- mne/io/cnt/cnt.py | 2 +- mne/io/ctf/ctf.py | 7 +---- mne/io/curry/curry.py | 2 +- mne/io/edf/edf.py | 6 ++-- mne/io/eeglab/eeglab.py | 2 +- mne/io/egi/egi.py | 2 +- mne/io/eximia/eximia.py | 2 +- mne/io/eyelink/eyelink.py | 2 +- mne/io/fieldtrip/fieldtrip.py | 2 +- mne/io/fiff/raw.py | 2 +- mne/io/fil/fil.py | 4 ++- mne/io/hitachi/hitachi.py | 2 +- mne/io/kit/kit.py | 2 +- mne/io/nedf/nedf.py | 2 +- mne/io/neuralynx/neuralynx.py | 2 +- mne/io/nicolet/nicolet.py | 2 +- mne/io/nihon/nihon.py | 2 +- mne/io/nirx/nirx.py | 4 ++- mne/io/nsx/nsx.py | 2 +- mne/io/persyst/persyst.py | 2 +- mne/io/snirf/_snirf.py | 4 ++- pyproject.toml | 4 +-- 28 files changed, 66 insertions(+), 60 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index 758cb7a529a..3b544f2a03e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -269,6 +269,27 @@ "EOGRegression": "mne.preprocessing.EOGRegression", "Spectrum": "mne.time_frequency.Spectrum", "EpochsSpectrum": "mne.time_frequency.EpochsSpectrum", + "EpochsFIF": "mne.Epochs", + "RawBOXY": "mne.io.Raw", + "RawBrainVision": "mne.io.Raw", + "RawBTi": "mne.io.Raw", + "RawCTF": "mne.io.Raw", + "RawCurry": "mne.io.Raw", + "RawEDF": "mne.io.Raw", + "RawEEGLAB": "mne.io.Raw", + "RawEGI": "mne.io.Raw", + "RawEximia": "mne.io.Raw", + "RawEyelink": "mne.io.Raw", + "RawFIL": "mne.io.Raw", + "RawGDF": "mne.io.Raw", + "RawHitachi": "mne.io.Raw", + "RawKIT": "mne.io.Raw", + "RawNedf": "mne.io.Raw", + "RawNeuralynx": "mne.io.Raw", + "RawNihon": "mne.io.Raw", + "RawNIRX": "mne.io.Raw", + "RawPersyst": "mne.io.Raw", + "RawSNIRF": "mne.io.Raw", # dipy "dipy.align.AffineMap": "dipy.align.imaffine.AffineMap", "dipy.align.DiffeomorphicMap": "dipy.align.imwarp.DiffeomorphicMap", @@ -367,34 +388,12 @@ "n_moments", "n_patterns", "n_new_events", - # Undocumented (on purpose) - "RawKIT", - "RawEximia", - "RawEGI", - "RawEEGLAB", - "RawEDF", - "RawCTF", - "RawBTi", - "RawBrainVision", - "RawCurry", - "RawNIRX", - "RawNeuralynx", - "RawGDF", - "RawSNIRF", - "RawBOXY", - "RawPersyst", - "RawNihon", - "RawNedf", - "RawHitachi", - "RawFIL", - "RawEyelink", # sklearn subclasses "mapping", "to", "any", # unlinkable "CoregistrationUI", - "IntracranialElectrodeLocator", "mne_qt_browser.figure.MNEQtBrowser", # pooch, since its website is unreliable and users will rarely need the links "pooch.Unzip", @@ -779,8 +778,12 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): ("py:class", "None. Remove all items from od."), ] nitpick_ignore_regex = [ - ("py:.*", r"mne\.io\.BaseRaw.*"), - ("py:.*", r"mne\.BaseEpochs.*"), + # Classes whose methods we purposefully do not document + ("py:.*", r"mne\.io\.BaseRaw.*"), # use mne.io.Raw + ("py:.*", r"mne\.BaseEpochs.*"), # use mne.Epochs + # Type hints for undocumented types + ("py:.*", r"mne\.io\..*\.Raw.*"), # RawEDF etc. + ("py:.*", r"mne\.epochs\.EpochsFIF.*"), ( "py:obj", "(filename|metadata|proj|times|tmax|tmin|annotations|ch_names|compensation_grade|filenames|first_samp|first_time|last_samp|n_times|proj|times|tmax|tmin)", diff --git a/mne/annotations.py b/mne/annotations.py index 8a44a84f539..783ee6e1901 100644 --- a/mne/annotations.py +++ b/mne/annotations.py @@ -1150,7 +1150,9 @@ def _write_annotations_txt(fname, annot): @fill_doc -def read_annotations(fname, sfreq="auto", uint16_codec=None, encoding="utf8"): +def read_annotations( + fname, sfreq="auto", uint16_codec=None, encoding="utf8" +) -> Annotations: r"""Read annotations from a file. This function reads a ``.fif``, ``.fif.gz``, ``.vmrk``, ``.amrk``, @@ -1183,7 +1185,7 @@ def read_annotations(fname, sfreq="auto", uint16_codec=None, encoding="utf8"): Returns ------- - annot : instance of Annotations | None + annot : instance of Annotations The annotations. Notes diff --git a/mne/epochs.py b/mne/epochs.py index 0ae911ac5ae..50403345e92 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -3850,7 +3850,7 @@ def _read_one_epoch_file(f, tree, preload): @verbose -def read_epochs(fname, proj=True, preload=True, verbose=None): +def read_epochs(fname, proj=True, preload=True, verbose=None) -> "EpochsFIF": """Read epochs from a fif file. Parameters diff --git a/mne/io/artemis123/artemis123.py b/mne/io/artemis123/artemis123.py index fb7b33e5b6c..64d98c54dc2 100644 --- a/mne/io/artemis123/artemis123.py +++ b/mne/io/artemis123/artemis123.py @@ -23,7 +23,7 @@ @verbose def read_raw_artemis123( input_fname, preload=False, verbose=None, pos_fname=None, add_head_trans=True -): +) -> "RawArtemis123": """Read Artemis123 data as raw object. Parameters diff --git a/mne/io/boxy/boxy.py b/mne/io/boxy/boxy.py index b2afe096f64..a240a1f387e 100644 --- a/mne/io/boxy/boxy.py +++ b/mne/io/boxy/boxy.py @@ -15,7 +15,7 @@ @fill_doc -def read_raw_boxy(fname, preload=False, verbose=None): +def read_raw_boxy(fname, preload=False, verbose=None) -> "RawBOXY": """Reader for an optical imaging recording. This function has been tested using the ISS Imagent I and II systems diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 99a77cd2b8c..91fb2a112fd 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1435,7 +1435,7 @@ def read_raw_bti( eog_ch=("E63", "E64"), preload=False, verbose=None, -): +) -> "RawBTi": """Raw object from 4D Neuroimaging MagnesWH3600 data. .. note:: diff --git a/mne/io/cnt/cnt.py b/mne/io/cnt/cnt.py index 496ed91cd38..78bc15db580 100644 --- a/mne/io/cnt/cnt.py +++ b/mne/io/cnt/cnt.py @@ -174,7 +174,7 @@ def read_raw_cnt( header="auto", preload=False, verbose=None, -): +) -> "RawCNT": """Read CNT data as raw object. .. Note:: diff --git a/mne/io/ctf/ctf.py b/mne/io/ctf/ctf.py index 1d4970624bd..65983258db5 100644 --- a/mne/io/ctf/ctf.py +++ b/mne/io/ctf/ctf.py @@ -33,7 +33,7 @@ @fill_doc def read_raw_ctf( directory, system_clock="truncate", preload=False, clean_names=False, verbose=None -): +) -> "RawCTF": """Raw object from CTF directory. Parameters @@ -55,11 +55,6 @@ def read_raw_ctf( ------- raw : instance of RawCTF The raw data. - See :class:`mne.io.Raw` for documentation of attributes and methods. - - See Also - -------- - mne.io.Raw : Documentation of attributes and methods of RawCTF. Notes ----- diff --git a/mne/io/curry/curry.py b/mne/io/curry/curry.py index e5b8ce02ed3..27fdc3ce7bc 100644 --- a/mne/io/curry/curry.py +++ b/mne/io/curry/curry.py @@ -542,7 +542,7 @@ def _read_annotations_curry(fname, sfreq="auto"): @verbose -def read_raw_curry(fname, preload=False, verbose=None): +def read_raw_curry(fname, preload=False, verbose=None) -> "RawCurry": """Read raw data from Curry files. Parameters diff --git a/mne/io/edf/edf.py b/mne/io/edf/edf.py index 7c02642ec8f..d9a9c7f2711 100644 --- a/mne/io/edf/edf.py +++ b/mne/io/edf/edf.py @@ -1567,7 +1567,7 @@ def read_raw_edf( encoding="utf8", *, verbose=None, -): +) -> RawEDF: """Reader function for EDF and EDF+ files. Parameters @@ -1701,7 +1701,7 @@ def read_raw_bdf( encoding="utf8", *, verbose=None, -): +) -> RawEDF: """Reader function for BDF files. Parameters @@ -1828,7 +1828,7 @@ def read_raw_gdf( include=None, preload=False, verbose=None, -): +) -> RawGDF: """Reader function for GDF files. Parameters diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index f4beee56119..4e9b9da1c5e 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -293,7 +293,7 @@ def read_raw_eeglab( uint16_codec=None, montage_units="auto", verbose=None, -): +) -> "RawEEGLAB": r"""Read an EEGLAB .set file. Parameters diff --git a/mne/io/egi/egi.py b/mne/io/egi/egi.py index 32cb71db28f..455c47ae726 100644 --- a/mne/io/egi/egi.py +++ b/mne/io/egi/egi.py @@ -104,7 +104,7 @@ def read_raw_egi( preload=False, channel_naming="E%d", verbose=None, -): +) -> "RawEGI": """Read EGI simple binary as raw object. .. note:: This function attempts to create a synthetic trigger channel. diff --git a/mne/io/eximia/eximia.py b/mne/io/eximia/eximia.py index 0af9d9daf5d..8b85768fedc 100644 --- a/mne/io/eximia/eximia.py +++ b/mne/io/eximia/eximia.py @@ -13,7 +13,7 @@ @fill_doc -def read_raw_eximia(fname, preload=False, verbose=None): +def read_raw_eximia(fname, preload=False, verbose=None) -> "RawEximia": """Reader for an eXimia EEG file. Parameters diff --git a/mne/io/eyelink/eyelink.py b/mne/io/eyelink/eyelink.py index 196aef408b1..1eaf82500ae 100644 --- a/mne/io/eyelink/eyelink.py +++ b/mne/io/eyelink/eyelink.py @@ -28,7 +28,7 @@ def read_raw_eyelink( find_overlaps=False, overlap_threshold=0.05, verbose=None, -): +) -> "RawEyelink": """Reader for an Eyelink ``.asc`` file. Parameters diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py index 8d054b076ee..bff6869e147 100644 --- a/mne/io/fieldtrip/fieldtrip.py +++ b/mne/io/fieldtrip/fieldtrip.py @@ -20,7 +20,7 @@ ) -def read_raw_fieldtrip(fname, info, data_name="data"): +def read_raw_fieldtrip(fname, info, data_name="data") -> RawArray: """Load continuous (raw) data from a FieldTrip preprocessing structure. This function expects to find single trial raw data (FT_DATATYPE_RAW) in diff --git a/mne/io/fiff/raw.py b/mne/io/fiff/raw.py index f4053f88b37..1c13189f723 100644 --- a/mne/io/fiff/raw.py +++ b/mne/io/fiff/raw.py @@ -502,7 +502,7 @@ def _check_entry(first, nent): @fill_doc def read_raw_fif( fname, allow_maxshield=False, preload=False, on_split_missing="raise", verbose=None -): +) -> Raw: """Reader function for Raw FIF data. Parameters diff --git a/mne/io/fil/fil.py b/mne/io/fil/fil.py index ea990b741de..99e2b77b2d8 100644 --- a/mne/io/fil/fil.py +++ b/mne/io/fil/fil.py @@ -25,7 +25,9 @@ @verbose -def read_raw_fil(binfile, precision="single", preload=False, *, verbose=None): +def read_raw_fil( + binfile, precision="single", preload=False, *, verbose=None +) -> "RawFIL": """Raw object from FIL-OPMEG formatted data. Parameters diff --git a/mne/io/hitachi/hitachi.py b/mne/io/hitachi/hitachi.py index 0f046bb37e6..a81095712d1 100644 --- a/mne/io/hitachi/hitachi.py +++ b/mne/io/hitachi/hitachi.py @@ -17,7 +17,7 @@ @fill_doc -def read_raw_hitachi(fname, preload=False, verbose=None): +def read_raw_hitachi(fname, preload=False, verbose=None) -> "RawHitachi": """Reader for a Hitachi fNIRS recording. Parameters diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index 88af0b2dc85..e6165a543d4 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -913,7 +913,7 @@ def read_raw_kit( allow_unknown_format=False, standardize_names=False, verbose=None, -): +) -> RawKIT: r"""Reader function for Ricoh/KIT conversion to FIF. Parameters diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py index c16f19d91b4..8e37cd36d54 100644 --- a/mne/io/nedf/nedf.py +++ b/mne/io/nedf/nedf.py @@ -202,7 +202,7 @@ def _convert_eeg(chunks, n_eeg, n_tot): @verbose -def read_raw_nedf(filename, preload=False, verbose=None): +def read_raw_nedf(filename, preload=False, verbose=None) -> "RawNedf": """Read NeuroElectrics .nedf files. NEDF file versions starting from 1.3 are supported. diff --git a/mne/io/neuralynx/neuralynx.py b/mne/io/neuralynx/neuralynx.py index 06d5000fcb6..4b6dea1a339 100644 --- a/mne/io/neuralynx/neuralynx.py +++ b/mne/io/neuralynx/neuralynx.py @@ -14,7 +14,7 @@ @fill_doc def read_raw_neuralynx( fname, *, preload=False, exclude_fname_patterns=None, verbose=None -): +) -> "RawNeuralynx": """Reader for Neuralynx files. Parameters diff --git a/mne/io/nicolet/nicolet.py b/mne/io/nicolet/nicolet.py index 37855b97054..9b5fa2b3ae5 100644 --- a/mne/io/nicolet/nicolet.py +++ b/mne/io/nicolet/nicolet.py @@ -19,7 +19,7 @@ @fill_doc def read_raw_nicolet( input_fname, ch_type, eog=(), ecg=(), emg=(), misc=(), preload=False, verbose=None -): +) -> "RawNicolet": """Read Nicolet data as raw object. ..note:: This reader takes data files with the extension ``.data`` as an diff --git a/mne/io/nihon/nihon.py b/mne/io/nihon/nihon.py index 919719f24a2..fb7855e5323 100644 --- a/mne/io/nihon/nihon.py +++ b/mne/io/nihon/nihon.py @@ -24,7 +24,7 @@ def _ensure_path(fname): @fill_doc -def read_raw_nihon(fname, preload=False, verbose=None): +def read_raw_nihon(fname, preload=False, verbose=None) -> "RawNihon": """Reader for an Nihon Kohden EEG file. Parameters diff --git a/mne/io/nirx/nirx.py b/mne/io/nirx/nirx.py index 98d81f9c268..1fb51b50380 100644 --- a/mne/io/nirx/nirx.py +++ b/mne/io/nirx/nirx.py @@ -34,7 +34,9 @@ @fill_doc -def read_raw_nirx(fname, saturated="annotate", preload=False, verbose=None): +def read_raw_nirx( + fname, saturated="annotate", preload=False, verbose=None +) -> "RawNIRX": """Reader for a NIRX fNIRS recording. Parameters diff --git a/mne/io/nsx/nsx.py b/mne/io/nsx/nsx.py index 95448b1b22c..2a39efa2989 100644 --- a/mne/io/nsx/nsx.py +++ b/mne/io/nsx/nsx.py @@ -88,7 +88,7 @@ @fill_doc def read_raw_nsx( input_fname, stim_channel=True, eog=None, misc=None, preload=False, *, verbose=None -): +) -> "RawNSX": """Reader function for NSx (Blackrock Microsystems) files. Parameters diff --git a/mne/io/persyst/persyst.py b/mne/io/persyst/persyst.py index 44334fa4555..0ef6723ba11 100644 --- a/mne/io/persyst/persyst.py +++ b/mne/io/persyst/persyst.py @@ -18,7 +18,7 @@ @fill_doc -def read_raw_persyst(fname, preload=False, verbose=None): +def read_raw_persyst(fname, preload=False, verbose=None) -> "RawPersyst": """Reader for a Persyst (.lay/.dat) recording. Parameters diff --git a/mne/io/snirf/_snirf.py b/mne/io/snirf/_snirf.py index e32b32370b3..0fc9ee246e9 100644 --- a/mne/io/snirf/_snirf.py +++ b/mne/io/snirf/_snirf.py @@ -21,7 +21,9 @@ @fill_doc -def read_raw_snirf(fname, optode_frame="unknown", preload=False, verbose=None): +def read_raw_snirf( + fname, optode_frame="unknown", preload=False, verbose=None +) -> "RawSNIRF": """Reader for a continuous wave SNIRF data. .. note:: This reader supports the .snirf file type only, diff --git a/pyproject.toml b/pyproject.toml index c23caa13d06..39c6876e43d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -295,10 +295,10 @@ ignore_messages = "^.*(Unknown target name|Undefined substitution referenced)[^` [tool.mypy] ignore_errors = true scripts_are_modules = true -strict = true +strict = false [[tool.mypy.overrides]] -module = ['mne.evoked', 'mne.io'] +module = ['mne.annotations', 'mne.epochs', 'mne.evoked', 'mne.io'] ignore_errors = false # Ignore "attr-defined" until we fix stuff like: # - BunchConstNamed: '"BunchConstNamed" has no attribute "FIFFB_EVOKED"' From 40256aef4dd5b417be91ce544fa8031cc2abd9bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Richard=20H=C3=B6chenberger?= Date: Fri, 15 Dec 2023 19:22:48 +0100 Subject: [PATCH 16/37] MRG: Add return type hints to all `read_epochs_*()` functions (#12297) --- doc/changes/devel.rst | 5 +++-- doc/conf.py | 25 ++++++++++++------------- mne/io/brainvision/brainvision.py | 2 +- mne/io/bti/bti.py | 2 +- mne/io/eeglab/eeglab.py | 2 +- mne/io/fieldtrip/fieldtrip.py | 4 +++- mne/io/kit/kit.py | 2 +- mne/io/nedf/nedf.py | 2 +- 8 files changed, 23 insertions(+), 21 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index feae12dcbb2..d993f4cc26c 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -25,7 +25,8 @@ In this version, we started adding type hints (also known as "type annotations") This meta information will be used by development environments (IDEs) like VS Code and PyCharm automatically to provide better assistance such as tab completion or error detection even before running your code. -So far, we've only added return type hints to :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Now your editors will know: +So far, we've only added return type hints to :func:`mne.io.read_raw`, :func:`mne.read_epochs`, :func:`mne.read_evokeds` and +all format-specific ``read_raw_*()`` and ``read_epochs_*()`` functions. Now your editors will know: these functions return evoked and raw data, respectively. We are planning add type hints to more functions after careful evaluation in the future. @@ -36,7 +37,7 @@ Enhancements ~~~~~~~~~~~~ - Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python`` (:gh:`12218` by :newcontrib:`Florian Hofer`) - Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv` (:gh:`12238` by :newcontrib:`Nikolai Kapralov`) -- We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250` by `Richard Höchenberger`_ and `Eric Larson`_) +- We added type hints for the return values of raw, epochs, and evoked reading functions. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250`, :gh:`12297` by `Richard Höchenberger`_ and `Eric Larson`_) - Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) - The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269`, :gh:`12281` by `Richard Höchenberger`_) - :meth:`mne.Annotations.to_data_frame` can now output different formats for the ``onset`` column: seconds, milliseconds, datetime objects, and timedelta objects. (:gh:`12289` by `Daniel McCloy`_) diff --git a/doc/conf.py b/doc/conf.py index 3b544f2a03e..c855a82f0cc 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -6,32 +6,32 @@ # License: BSD-3-Clause # Copyright the MNE-Python contributors. -from datetime import datetime, timezone import faulthandler import gc -from importlib.metadata import metadata import os -from pathlib import Path import subprocess import sys import time import warnings +from datetime import datetime, timezone +from importlib.metadata import metadata +from pathlib import Path -import numpy as np import matplotlib +import numpy as np import sphinx -from sphinx.domains.changeset import versionlabels -from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder from numpydoc import docscrape +from sphinx.domains.changeset import versionlabels +from sphinx_gallery.sorting import ExplicitOrder, FileNameSortKey import mne import mne.html_templates._templates from mne.tests.test_docstring_parameters import error_ignores from mne.utils import ( - linkcode_resolve, # noqa, analysis:ignore _assert_no_instances, - sizeof_fmt, + linkcode_resolve, # noqa, analysis:ignore run_subprocess, + sizeof_fmt, ) from mne.viz import Brain # noqa @@ -270,6 +270,8 @@ "Spectrum": "mne.time_frequency.Spectrum", "EpochsSpectrum": "mne.time_frequency.EpochsSpectrum", "EpochsFIF": "mne.Epochs", + "EpochsEEGLAB": "mne.Epochs", + "EpochsKIT": "mne.Epochs", "RawBOXY": "mne.io.Raw", "RawBrainVision": "mne.io.Raw", "RawBTi": "mne.io.Raw", @@ -685,11 +687,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): .. minigallery:: {1} -""".format( - name.split(".")[-1], name - ).split( - "\n" - ) +""".format(name.split(".")[-1], name).split("\n") # -- Other extension configuration ------------------------------------------- @@ -784,6 +782,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # Type hints for undocumented types ("py:.*", r"mne\.io\..*\.Raw.*"), # RawEDF etc. ("py:.*", r"mne\.epochs\.EpochsFIF.*"), + ("py:.*", r"mne\.io\..*\.Epochs.*"), # EpochsKIT etc. ( "py:obj", "(filename|metadata|proj|times|tmax|tmin|annotations|ch_names|compensation_grade|filenames|first_samp|first_time|last_samp|n_times|proj|times|tmax|tmin)", diff --git a/mne/io/brainvision/brainvision.py b/mne/io/brainvision/brainvision.py index 3a4f63718c3..e0f4e5a5c57 100644 --- a/mne/io/brainvision/brainvision.py +++ b/mne/io/brainvision/brainvision.py @@ -921,7 +921,7 @@ def read_raw_brainvision( scale=1.0, preload=False, verbose=None, -): +) -> RawBrainVision: """Reader for Brain Vision EEG file. Parameters diff --git a/mne/io/bti/bti.py b/mne/io/bti/bti.py index 91fb2a112fd..8b9a6ac973f 100644 --- a/mne/io/bti/bti.py +++ b/mne/io/bti/bti.py @@ -1435,7 +1435,7 @@ def read_raw_bti( eog_ch=("E63", "E64"), preload=False, verbose=None, -) -> "RawBTi": +) -> RawBTi: """Raw object from 4D Neuroimaging MagnesWH3600 data. .. note:: diff --git a/mne/io/eeglab/eeglab.py b/mne/io/eeglab/eeglab.py index 4e9b9da1c5e..cd383c6ddb4 100644 --- a/mne/io/eeglab/eeglab.py +++ b/mne/io/eeglab/eeglab.py @@ -349,7 +349,7 @@ def read_epochs_eeglab( uint16_codec=None, montage_units="auto", verbose=None, -): +) -> "EpochsEEGLAB": r"""Reader function for EEGLAB epochs files. Parameters diff --git a/mne/io/fieldtrip/fieldtrip.py b/mne/io/fieldtrip/fieldtrip.py index bff6869e147..3dac2992be1 100644 --- a/mne/io/fieldtrip/fieldtrip.py +++ b/mne/io/fieldtrip/fieldtrip.py @@ -83,7 +83,9 @@ def read_raw_fieldtrip(fname, info, data_name="data") -> RawArray: return raw -def read_epochs_fieldtrip(fname, info, data_name="data", trialinfo_column=0): +def read_epochs_fieldtrip( + fname, info, data_name="data", trialinfo_column=0 +) -> EpochsArray: """Load epoched data from a FieldTrip preprocessing structure. This function expects to find epoched data in the structure data_name is diff --git a/mne/io/kit/kit.py b/mne/io/kit/kit.py index e6165a543d4..2aaa79017ba 100644 --- a/mne/io/kit/kit.py +++ b/mne/io/kit/kit.py @@ -981,7 +981,7 @@ def read_epochs_kit( allow_unknown_format=False, standardize_names=False, verbose=None, -): +) -> EpochsKIT: """Reader function for Ricoh/KIT epochs files. Parameters diff --git a/mne/io/nedf/nedf.py b/mne/io/nedf/nedf.py index 8e37cd36d54..df6030f31c1 100644 --- a/mne/io/nedf/nedf.py +++ b/mne/io/nedf/nedf.py @@ -202,7 +202,7 @@ def _convert_eeg(chunks, n_eeg, n_tot): @verbose -def read_raw_nedf(filename, preload=False, verbose=None) -> "RawNedf": +def read_raw_nedf(filename, preload=False, verbose=None) -> RawNedf: """Read NeuroElectrics .nedf files. NEDF file versions starting from 1.3 are supported. From b1329c3ae59d0da3646b0c667441e12ee0f7bd8d Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Fri, 15 Dec 2023 13:51:05 -0500 Subject: [PATCH 17/37] MAINT: Use HTML5 embedding for examples (#12298) --- doc/_static/style.css | 6 ++++++ doc/conf.py | 7 +++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/doc/_static/style.css b/doc/_static/style.css index 61eea678830..9b289b6c177 100644 --- a/doc/_static/style.css +++ b/doc/_static/style.css @@ -380,3 +380,9 @@ img.hidden { td.justify { text-align-last: justify; } + +/* Matplotlib HTML5 video embedding */ +div.sphx-glr-animation video { + max-width: 100%; + height: auto; +} diff --git a/doc/conf.py b/doc/conf.py index c855a82f0cc..837282c5b56 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -174,10 +174,7 @@ "imageio": ("https://imageio.readthedocs.io/en/latest", None), "picard": ("https://pierreablin.github.io/picard/", None), "eeglabio": ("https://eeglabio.readthedocs.io/en/latest", None), - "dipy": ( - "https://dipy.org/documentation/1.7.0/", - "https://dipy.org/documentation/1.7.0/objects.inv/", - ), + "dipy": ("https://docs.dipy.org/stable", None), "pybv": ("https://pybv.readthedocs.io/en/latest/", None), "pyqtgraph": ("https://pyqtgraph.readthedocs.io/en/latest/", None), } @@ -481,6 +478,8 @@ def __call__(self, gallery_conf, fname, when): plt.ioff() plt.rcParams["animation.embed_limit"] = 40.0 plt.rcParams["figure.raise_window"] = False + # https://github.com/sphinx-gallery/sphinx-gallery/pull/1243#issue-2043332860 + plt.rcParams["animation.html"] = "html5" # neo holds on to an exception, which in turn holds a stack frame, # which will keep alive the global vars during SG execution try: From 7242c291fdd572c58143281a64688968463b928a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:12:39 -0500 Subject: [PATCH 18/37] Bump actions/upload-artifact from 3 to 4 (#12302) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index c34bb80fd38..6523fb3204d 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -28,7 +28,7 @@ jobs: pip install build twine - run: python -m build --sdist --wheel - run: twine check --strict dist/* - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: dist path: dist From 60e46f0b6c184e1bfb9c399124fa7b619a96622b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:12:47 -0500 Subject: [PATCH 19/37] Bump github/codeql-action from 2 to 3 (#12303) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/codeql-analysis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index a06f3336543..7f348f80778 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -42,7 +42,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -56,7 +56,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v2 + uses: github/codeql-action/autobuild@v3 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -69,4 +69,4 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From cf2ca7ea723bc92ec1fdb77abc9eafe165160420 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 09:12:59 -0500 Subject: [PATCH 20/37] Bump actions/download-artifact from 3 to 4 (#12304) Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 6523fb3204d..c9895e11919 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -43,7 +43,7 @@ jobs: name: pypi url: https://pypi.org/p/mne steps: - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: dist path: dist From 0a0cad8802e832669bb954a3bdd8e08bfaecf784 Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Mon, 18 Dec 2023 13:37:22 -0600 Subject: [PATCH 21/37] fix icon link colors (#12301) --- doc/_static/style.css | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/doc/_static/style.css b/doc/_static/style.css index 9b289b6c177..ccf032c4a7b 100644 --- a/doc/_static/style.css +++ b/doc/_static/style.css @@ -17,7 +17,7 @@ html[data-theme="light"] { /* topbar logo links */ --mne-color-github: #000; - --mne-color-discourse: #000; + --mne-color-discourse: #d0232b; --mne-color-mastodon: #2F0C7A; /* code block copy button */ --copybtn-opacity: 0.75; @@ -222,16 +222,16 @@ aside.footnote:last-child { } /* ******************************************************* navbar icon links */ -#navbar-icon-links i.fa-square-github::before { +.navbar-icon-links i.fa-square-github::before { color: var(--mne-color-github); } -#navbar-icon-links i.fa-discourse::before { +.navbar-icon-links i.fa-discourse::before { color: var(--mne-color-discourse); } -#navbar-icon-links i.fa-discord::before { +.navbar-icon-links i.fa-discord::before { color: var(--mne-color-discord); } -#navbar-icon-links i.fa-mastodon::before { +.navbar-icon-links i.fa-mastodon::before { color: var(--mne-color-mastodon); } From f1a8120d29a162ec42c85e9d64136e3c2405da2c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 16:00:43 -0500 Subject: [PATCH 22/37] [pre-commit.ci] pre-commit autoupdate (#12307) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cd6d522d4e7..fed7db76310 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff name: ruff lint mne @@ -13,7 +13,7 @@ repos: # Ruff tutorials and examples - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.7 + rev: v0.1.8 hooks: - id: ruff name: ruff lint tutorials and examples From 4742914ff898d22a3c3012aeefaf2a8301f2c2f8 Mon Sep 17 00:00:00 2001 From: Thomas Samuel Binns Date: Tue, 19 Dec 2023 16:00:09 +0000 Subject: [PATCH 23/37] Switch from `epoch_data` to `data` for TFR array functions (#12308) --- doc/changes/devel.rst | 2 +- mne/time_frequency/multitaper.py | 20 ++++++++++++++++---- mne/time_frequency/tfr.py | 20 ++++++++++++++++---- 3 files changed, 33 insertions(+), 9 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index d993f4cc26c..fdf307bbbd1 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -52,4 +52,4 @@ Bugs API changes ~~~~~~~~~~~ -- None yet +- The parameter for providing data to :func:`mne.time_frequency.tfr_array_morlet` and :func:`mne.time_frequency.tfr_array_multitaper` has been switched from ``epoch_data`` to ``data``. Only use the ``data`` parameter to avoid a warning (:gh:`12308` by `Thomas Binns`_) diff --git a/mne/time_frequency/multitaper.py b/mne/time_frequency/multitaper.py index c6af2b20c60..1709d6c16d1 100644 --- a/mne/time_frequency/multitaper.py +++ b/mne/time_frequency/multitaper.py @@ -465,7 +465,7 @@ def psd_array_multitaper( @verbose def tfr_array_multitaper( - epoch_data, + data, sfreq, freqs, n_cycles=7.0, @@ -477,6 +477,7 @@ def tfr_array_multitaper( n_jobs=None, *, verbose=None, + epoch_data=None, ): """Compute Time-Frequency Representation (TFR) using DPSS tapers. @@ -486,7 +487,7 @@ def tfr_array_multitaper( Parameters ---------- - epoch_data : array of shape (n_epochs, n_channels, n_times) + data : array of shape (n_epochs, n_channels, n_times) The epochs. sfreq : float Sampling frequency of the data in Hz. @@ -509,11 +510,15 @@ def tfr_array_multitaper( coherence across trials. %(n_jobs)s %(verbose)s + epoch_data : None + Deprecated parameter for providing epoched data as of 1.7, will be replaced with + the ``data`` parameter in 1.8. New code should use the ``data`` parameter. If + ``epoch_data`` is not ``None``, a warning will be raised. Returns ------- out : array - Time frequency transform of ``epoch_data``. + Time frequency transform of ``data``. - if ``output in ('complex',' 'phase')``, array of shape ``(n_epochs, n_chans, n_tapers, n_freqs, n_times)`` @@ -543,8 +548,15 @@ def tfr_array_multitaper( """ from .tfr import _compute_tfr + if epoch_data is not None: + warn( + "The parameter for providing data will be switched from `epoch_data` to " + "`data` in 1.8. Use the `data` parameter to avoid this warning.", + FutureWarning, + ) + return _compute_tfr( - epoch_data, + data, freqs, sfreq=sfreq, method="multitaper", diff --git a/mne/time_frequency/tfr.py b/mne/time_frequency/tfr.py index 279e2c79879..ec53cd848f6 100644 --- a/mne/time_frequency/tfr.py +++ b/mne/time_frequency/tfr.py @@ -973,7 +973,7 @@ def tfr_morlet( @verbose def tfr_array_morlet( - epoch_data, + data, sfreq, freqs, n_cycles=7.0, @@ -983,6 +983,7 @@ def tfr_array_morlet( output="complex", n_jobs=None, verbose=None, + epoch_data=None, ): """Compute Time-Frequency Representation (TFR) using Morlet wavelets. @@ -991,7 +992,7 @@ def tfr_array_morlet( Parameters ---------- - epoch_data : array of shape (n_epochs, n_channels, n_times) + data : array of shape (n_epochs, n_channels, n_times) The epochs. sfreq : float | int Sampling frequency of the data. @@ -1015,11 +1016,15 @@ def tfr_array_morlet( The number of epochs to process at the same time. The parallelization is implemented across channels. Default 1. %(verbose)s + epoch_data : None + Deprecated parameter for providing epoched data as of 1.7, will be replaced with + the ``data`` parameter in 1.8. New code should use the ``data`` parameter. If + ``epoch_data`` is not ``None``, a warning will be raised. Returns ------- out : array - Time frequency transform of epoch_data. + Time frequency transform of ``data``. - if ``output in ('complex', 'phase', 'power')``, array of shape ``(n_epochs, n_chans, n_freqs, n_times)`` @@ -1049,8 +1054,15 @@ def tfr_array_morlet( ---------- .. footbibliography:: """ + if epoch_data is not None: + warn( + "The parameter for providing data will be switched from `epoch_data` to " + "`data` in 1.8. Use the `data` parameter to avoid this warning.", + FutureWarning, + ) + return _compute_tfr( - epoch_data=epoch_data, + epoch_data=data, freqs=freqs, sfreq=sfreq, method="morlet", From c52208bedfb36e9157678d091f4b4e03ec96c96d Mon Sep 17 00:00:00 2001 From: Daniel McCloy Date: Tue, 19 Dec 2023 13:36:55 -0600 Subject: [PATCH 24/37] fix 404 link on devel landing page (#12316) --- CONTRIBUTING.md | 2 +- doc/development/index.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index bec834c7fdb..e653797b3ad 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -5,5 +5,5 @@ MNE-Python is maintained by a community of scientists and research labs. The pro Users and contributors to MNE-Python are expected to follow our [code of conduct](https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md). -The [contributing guide](https://mne.tools/dev/install/contributing.html) has details on the preferred contribution workflow +The [contributing guide](https://mne.tools/dev/development/contributing.html) has details on the preferred contribution workflow and the recommended system configuration for a smooth contribution/development experience. diff --git a/doc/development/index.rst b/doc/development/index.rst index 1bdc5322f36..98fc28f8e7f 100644 --- a/doc/development/index.rst +++ b/doc/development/index.rst @@ -24,7 +24,7 @@ experience. .. _`opening an issue`: https://github.com/mne-tools/mne-python/issues/new/choose .. _`MNE Forum`: https://mne.discourse.group .. _`code of conduct`: https://github.com/mne-tools/.github/blob/main/CODE_OF_CONDUCT.md -.. _`contributing guide`: https://mne.tools/dev/install/contributing.html +.. _`contributing guide`: https://mne.tools/dev/development/contributing.html .. toctree:: :hidden: From 0f59894a2491797c996272c23c39412a62369f5b Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 19 Dec 2023 17:31:36 -0500 Subject: [PATCH 25/37] MAINT: Work around bad SciPy nightly wheels (#12317) --- tools/azure_dependencies.sh | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/azure_dependencies.sh b/tools/azure_dependencies.sh index 70c82baf1c1..cce220a8188 100755 --- a/tools/azure_dependencies.sh +++ b/tools/azure_dependencies.sh @@ -9,12 +9,13 @@ elif [ "${TEST_MODE}" == "pip-pre" ]; then python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://www.riverbankcomputing.com/pypi/simple" "PyQt6!=6.6.1" PyQt6-sip PyQt6-Qt6 "PyQt6-Qt6!=6.6.1" echo "Numpy etc." # See github_actions_dependencies.sh for comments - python -m pip install $STD_ARGS --only-binary "numpy" numpy - python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" "scipy>=1.12.0.dev0" scikit-learn matplotlib statsmodels + # Until https://github.com/scipy/scipy/issues/19605 and + # https://github.com/scipy/scipy/issues/19713 are resolved, we can't use the NumPy + # 2.0 wheels :( + python -m pip install $STD_ARGS --only-binary numpy scipy h5py + python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" scikit-learn matplotlib statsmodels # echo "dipy" # python -m pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://pypi.anaconda.org/scipy-wheels-nightly/simple" dipy - # echo "h5py" - # python -m pip install $STD_ARGS --only-binary ":all:" -f "https://7933911d6844c6c53a7d-47bd50c35cd79bd838daf386af554a83.ssl.cf2.rackcdn.com" h5py # echo "OpenMEEG" # pip install $STD_ARGS --only-binary ":all:" --extra-index-url "https://test.pypi.org/simple" openmeeg echo "vtk" From 97512a15a74c6a610132c83e8e420dd4a7caf4f5 Mon Sep 17 00:00:00 2001 From: Kristijan Armeni Date: Wed, 20 Dec 2023 09:11:50 -0500 Subject: [PATCH 26/37] BUG: handle temporal discontinuities in Neuralynx `.ncs` files (#12279) Co-authored-by: Eric Larson --- doc/changes/devel.rst | 1 + environment.yml | 1 + mne/datasets/config.py | 4 +- mne/io/neuralynx/neuralynx.py | 214 ++++++++++++++++++++--- mne/io/neuralynx/tests/test_neuralynx.py | 108 ++++++++++-- mne/utils/config.py | 1 + pyproject.toml | 2 + 7 files changed, 289 insertions(+), 42 deletions(-) diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index fdf307bbbd1..565a9f9fbf0 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -49,6 +49,7 @@ Bugs - Remove incorrect type hints in :func:`mne.io.read_raw_neuralynx` (:gh:`12236` by `Richard Höchenberger`_) - Fix bug where parent directory existence was not checked properly in :meth:`mne.io.Raw.save` (:gh:`12282` by `Eric Larson`_) - ``defusedxml`` is now an optional (rather than required) dependency and needed when reading EGI-MFF data, NEDF data, and BrainVision montages (:gh:`12264` by `Eric Larson`_) +- Correctly handle temporal gaps in Neuralynx .ncs files via :func:`mne.io.read_raw_neuralynx` (:gh:`12279` by `Kristijan Armeni`_ and `Eric Larson`_) API changes ~~~~~~~~~~~ diff --git a/environment.yml b/environment.yml index 8978dfc64e8..96c89fe472b 100644 --- a/environment.yml +++ b/environment.yml @@ -61,3 +61,4 @@ dependencies: - mamba - lazy_loader - defusedxml + - python-neo diff --git a/mne/datasets/config.py b/mne/datasets/config.py index b548f5273f2..b7780778f24 100644 --- a/mne/datasets/config.py +++ b/mne/datasets/config.py @@ -88,7 +88,7 @@ # respective repos, and make a new release of the dataset on GitHub. Then # update the checksum in the MNE_DATASETS dict below, and change version # here: ↓↓↓↓↓ ↓↓↓ -RELEASES = dict(testing="0.150", misc="0.27") +RELEASES = dict(testing="0.151", misc="0.27") TESTING_VERSIONED = f'mne-testing-data-{RELEASES["testing"]}' MISC_VERSIONED = f'mne-misc-data-{RELEASES["misc"]}' @@ -112,7 +112,7 @@ # Testing and misc are at the top as they're updated most often MNE_DATASETS["testing"] = dict( archive_name=f"{TESTING_VERSIONED}.tar.gz", - hash="md5:0b7452daef4d19132505b5639d695628", + hash="md5:5832b4d44f0423d22305fa61cb75bc25", url=( "https://codeload.github.com/mne-tools/mne-testing-data/" f'tar.gz/{RELEASES["testing"]}' diff --git a/mne/io/neuralynx/neuralynx.py b/mne/io/neuralynx/neuralynx.py index 4b6dea1a339..1c007ba5787 100644 --- a/mne/io/neuralynx/neuralynx.py +++ b/mne/io/neuralynx/neuralynx.py @@ -7,10 +7,51 @@ from ..._fiff.meas_info import create_info from ..._fiff.utils import _mult_cal_one +from ...annotations import Annotations from ...utils import _check_fname, _soft_import, fill_doc, logger, verbose from ..base import BaseRaw +class AnalogSignalGap(object): + """Dummy object to represent gaps in Neuralynx data. + + Creates a AnalogSignalProxy-like object. + Propagate `signal`, `units`, and `sampling_rate` attributes + to the `AnalogSignal` init returned by `load()`. + + Parameters + ---------- + signal : array-like + Array of shape (n_channels, n_samples) containing the data. + units : str + Units of the data. (e.g., 'uV') + sampling_rate : quantity + Sampling rate of the data. (e.g., 4000 * pq.Hz) + + Returns + ------- + sig : instance of AnalogSignal + A AnalogSignal object representing a gap in Neuralynx data. + """ + + def __init__(self, signal, units, sampling_rate): + self.signal = signal + self.units = units + self.sampling_rate = sampling_rate + + def load(self, channel_indexes): + """Return AnalogSignal object.""" + _soft_import("neo", "Reading NeuralynxIO files", strict=True) + from neo import AnalogSignal + + sig = AnalogSignal( + signal=self.signal[:, channel_indexes], + units=self.units, + sampling_rate=self.sampling_rate, + ) + return sig + + @fill_doc def read_raw_neuralynx( fname, *, preload=False, exclude_fname_patterns=None, verbose=None @@ -59,11 +100,11 @@ def __init__( exclude_fname_patterns=None, verbose=None, ): + fname = _check_fname(fname, "read", True, "fname", need_dir=True) + _soft_import("neo", "Reading NeuralynxIO files", strict=True) from neo.io import NeuralynxIO - fname = _check_fname(fname, "read", True, "fname", need_dir=True) - logger.info(f"Checking files in {fname}") # construct a list of filenames to ignore @@ -81,12 +122,18 @@ def __init__( try: nlx_reader = NeuralynxIO(dirname=fname, exclude_filename=exclude_fnames) except ValueError as e: - raise ValueError( - "It seems some .ncs channels might have different number of samples. " - + "This is likely due to different sampling rates. " - + "Try excluding them with `exclude_fname_patterns` input arg." - + f"\nOriginal neo.NeuralynxIO.parse_header() ValueError:\n{e}" - ) + # give a more informative error message and what the user can do about it + if "Incompatible section structures across streams" in str(e): + raise ValueError( + "It seems .ncs channels have different numbers of samples. " + + "This is likely due to different sampling rates. " + + "Try reading in only channels with uniform sampling rate " + + "by excluding other channels with `exclude_fname_patterns` " + + "input argument." + + f"\nOriginal neo.NeuralynxRawIO ValueError:\n{e}" + ) from None + else: + raise info = create_info( ch_types="seeg", @@ -98,32 +145,122 @@ def __init__( # the sample sizes of all segments n_segments = nlx_reader.header["nb_segment"][0] block_id = 0 # assumes there's only one block of recording - n_total_samples = sum( - nlx_reader.get_signal_size(block_id, segment) - for segment in range(n_segments) + + # get segment start/stop times + start_times = np.array( + [nlx_reader.segment_t_start(block_id, i) for i in range(n_segments)] + ) + stop_times = np.array( + [nlx_reader.segment_t_stop(block_id, i) for i in range(n_segments)] ) - # construct an array of shape (n_total_samples,) indicating - # segment membership for each sample - sample2segment = np.concatenate( + # find discontinuous boundaries (of length n-1) + next_start_times = start_times[1::] + previous_stop_times = stop_times[:-1] + seg_diffs = next_start_times - previous_stop_times + + # mark as discontinuous any two segments that have + # start/stop delta larger than sampling period (1/sampling_rate) + logger.info("Checking for temporal discontinuities in Neo data segments.") + delta = 1.5 / info["sfreq"] + gaps = seg_diffs > delta + + seg_gap_dict = {} + + logger.info( + f"N = {gaps.sum()} discontinuous Neo segments detected " + + f"with delta > {delta} sec. " + + "Annotating gaps as BAD_ACQ_SKIP." + if gaps.any() + else "No discontinuities detected." + ) + + gap_starts = stop_times[:-1][gaps] # gap starts at segment offset + gap_stops = start_times[1::][gaps] # gap stops at segment onset + + # (n_gaps,) array of ints giving number of samples per inferred gap + gap_n_samps = np.array( + [ + int(round(stop * info["sfreq"])) - int(round(start * info["sfreq"])) + for start, stop in zip(gap_starts, gap_stops) + ] + ).astype(int) # force an int array (if no gaps, empty array is a float) + + # get sort indices for all segments (valid and gap) in ascending order + all_starts_ids = np.argsort(np.concatenate([start_times, gap_starts])) + + # variable indicating whether each segment is a gap or not + gap_indicator = np.concatenate( [ - np.full(shape=(nlx_reader.get_signal_size(block_id, i),), fill_value=i) - for i in range(n_segments) + np.full(len(start_times), fill_value=0), + np.full(len(gap_starts), fill_value=1), ] ) + gap_indicator = gap_indicator[all_starts_ids].astype(bool) + + # store this in a dict to be passed to _raw_extras + seg_gap_dict = { + "gap_n_samps": gap_n_samps, + "isgap": gap_indicator, # False (data segment) or True (gap segment) + } + + valid_segment_sizes = [ + nlx_reader.get_signal_size(block_id, i) for i in range(n_segments) + ] + + sizes_sorted = np.concatenate([valid_segment_sizes, gap_n_samps])[ + all_starts_ids + ] + + # now construct an (n_samples,) indicator variable + sample2segment = np.concatenate( + [np.full(shape=(n,), fill_value=i) for i, n in enumerate(sizes_sorted)] + ) + + # construct Annotations() + gap_seg_ids = np.unique(sample2segment)[gap_indicator] + gap_start_ids = np.array( + [np.where(sample2segment == seg_id)[0][0] for seg_id in gap_seg_ids] + ) + + # recreate time axis for gap annotations + mne_times = np.arange(0, len(sample2segment)) / info["sfreq"] + + assert len(gap_start_ids) == len(gap_n_samps) + annotations = Annotations( + onset=[mne_times[onset_id] for onset_id in gap_start_ids], + duration=[ + mne_times[onset_id + (n - 1)] - mne_times[onset_id] + for onset_id, n in zip(gap_start_ids, gap_n_samps) + ], + description=["BAD_ACQ_SKIP"] * len(gap_start_ids), + ) super(RawNeuralynx, self).__init__( info=info, - last_samps=[n_total_samples - 1], + last_samps=[sizes_sorted.sum() - 1], filenames=[fname], preload=preload, - raw_extras=[dict(smp2seg=sample2segment, exclude_fnames=exclude_fnames)], + raw_extras=[ + dict( + smp2seg=sample2segment, + exclude_fnames=exclude_fnames, + segment_sizes=sizes_sorted, + seg_gap_dict=seg_gap_dict, + ) + ], ) + self.set_annotations(annotations) + def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): """Read a chunk of raw data.""" + from neo import Segment from neo.io import NeuralynxIO + # quantities is a dependency of neo so we are guaranteed it exists + from quantities import Hz + nlx_reader = NeuralynxIO( dirname=self._filenames[fi], exclude_filename=self._raw_extras[0]["exclude_fnames"], @@ -136,13 +273,7 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): [len(segment.analogsignals) for segment in neo_block[0].segments] ) == len(neo_block[0].segments) - # collect sizes of each segment - segment_sizes = np.array( - [ - nlx_reader.get_signal_size(0, segment_id) - for segment_id in range(len(neo_block[0].segments)) - ] - ) + segment_sizes = self._raw_extras[fi]["segment_sizes"] # construct a (n_segments, 2) array of the first and last # sample index for each segment relative to the start of the recording @@ -188,15 +319,44 @@ def _read_segment_file(self, data, idx, fi, start, stop, cals, mult): -1, 0 ] # express stop sample relative to segment onset + # array containing Segments + segments_arr = np.array(neo_block[0].segments, dtype=object) + + # if gaps were detected, correctly insert gap Segments in between valid Segments + gap_samples = self._raw_extras[fi]["seg_gap_dict"]["gap_n_samps"] + gap_segments = [Segment(f"gap-{i}") for i in range(len(gap_samples))] + + # create AnalogSignal objects representing gap data filled with 0's + sfreq = nlx_reader.get_signal_sampling_rate() + n_chans = ( + np.arange(idx.start, idx.stop, idx.step).size + if type(idx) is slice + else len(idx) # idx can be a slice or an np.array so check both + ) + + for seg, n in zip(gap_segments, gap_samples): + asig = AnalogSignalGap( + signal=np.zeros((n, n_chans)), units="uV", sampling_rate=sfreq * Hz + ) + seg.analogsignals.append(asig) + + n_total_segments = len(neo_block[0].segments + gap_segments) + segments_arr = np.zeros((n_total_segments,), dtype=object) + + # insert inferred gap segments at the right place in between valid segments + isgap = self._raw_extras[0]["seg_gap_dict"]["isgap"] + segments_arr[~isgap] = neo_block[0].segments + segments_arr[isgap] = gap_segments + # now load data from selected segments/channels via - # neo.Segment.AnalogSignal.load() + # neo.Segment.AnalogSignal.load() or AnalogSignalGap.load() all_data = np.concatenate( [ signal.load(channel_indexes=idx).magnitude[ samples[0] : samples[-1] + 1, : ] for seg, samples in zip( - neo_block[0].segments[first_seg : last_seg + 1], sel_samples_local + segments_arr[first_seg : last_seg + 1], sel_samples_local ) for signal in seg.analogsignals ] diff --git a/mne/io/neuralynx/tests/test_neuralynx.py b/mne/io/neuralynx/tests/test_neuralynx.py index 21cb73927a8..1532845ab7a 100644 --- a/mne/io/neuralynx/tests/test_neuralynx.py +++ b/mne/io/neuralynx/tests/test_neuralynx.py @@ -15,6 +15,8 @@ testing_path = data_path(download=False) / "neuralynx" +pytest.importorskip("neo") + def _nlxheader_to_dict(matdict: Dict) -> Dict: """Convert the read-in "Header" field into a dict. @@ -65,14 +67,42 @@ def _read_nlx_mat_chan(matfile: str) -> np.ndarray: return x -mne_testing_ncs = [ - "LAHC1.ncs", - "LAHC2.ncs", - "LAHC3.ncs", - "LAHCu1.ncs", # the 'u' files are going to be filtered out - "xAIR1.ncs", - "xEKG1.ncs", -] +def _read_nlx_mat_chan_keep_gaps(matfile: str) -> np.ndarray: + """Read a single channel from a Neuralynx .mat file and keep invalid samples.""" + mat = loadmat(matfile) + + hdr_dict = _nlxheader_to_dict(mat) + + # Nlx2MatCSC.m reads the data in N equal-sized (512-item) chunks + # this array (1, n_chunks) stores the number of valid samples + # per chunk (the last chunk is usually shorter) + n_valid_samples = mat["NumberOfValidSamples"].ravel() + + # read in the artificial zeros so that + # we can compare with the mne padded arrays + ncs_records_with_gaps = [9, 15, 20] + for i in ncs_records_with_gaps: + n_valid_samples[i] = 512 + + # concatenate chunks, respecting the number of valid samples + x = np.concatenate( + [mat["Samples"][0:n, i] for i, n in enumerate(n_valid_samples)] + ) # in ADBits + + # this value is the same for all channels and + # converts data from ADBits to Volts + conversionf = literal_eval(hdr_dict["ADBitVolts"]) + x = x * conversionf + + # if header says input was inverted at acquisition + # (possibly for spike detection or so?), flip it back + # NeuralynxIO does this under the hood in NeuralynxIO.parse_header() + # see this discussion: https://github.com/NeuralEnsemble/python-neo/issues/819 + if hdr_dict["InputInverted"] == "True": + x *= -1 + + return x + expected_chan_names = ["LAHC1", "LAHC2", "LAHC3", "xAIR1", "xEKG1"] @@ -80,15 +110,20 @@ def _read_nlx_mat_chan(matfile: str) -> np.ndarray: @requires_testing_data def test_neuralynx(): """Test basic reading.""" - pytest.importorskip("neo") - from neo.io import NeuralynxIO - excluded_ncs_files = ["LAHCu1.ncs", "LAHCu2.ncs", "LAHCu3.ncs"] + excluded_ncs_files = [ + "LAHCu1.ncs", + "LAHC1_3_gaps.ncs", + "LAHC2_3_gaps.ncs", + ] # ==== MNE-Python ==== # + fname_patterns = ["*u*.ncs", "*3_gaps.ncs"] raw = read_raw_neuralynx( - fname=testing_path, preload=True, exclude_fname_patterns=["*u*.ncs"] + fname=testing_path, + preload=True, + exclude_fname_patterns=fname_patterns, ) # test that channel selection worked @@ -136,5 +171,52 @@ def test_neuralynx(): ) # data _test_raw_reader( - read_raw_neuralynx, fname=testing_path, exclude_fname_patterns=["*u*.ncs"] + read_raw_neuralynx, + fname=testing_path, + exclude_fname_patterns=fname_patterns, + ) + + +@requires_testing_data +def test_neuralynx_gaps(): + """Test gap detection.""" + # ignore files with no gaps + ignored_ncs_files = [ + "LAHC1.ncs", + "LAHC2.ncs", + "LAHC3.ncs", + "xAIR1.ncs", + "xEKG1.ncs", + "LAHCu1.ncs", + ] + raw = read_raw_neuralynx( + fname=testing_path, + preload=True, + exclude_fname_patterns=ignored_ncs_files, + ) + mne_y, _ = raw.get_data(return_times=True) # in V + + # there should be 2 channels with 3 gaps (of 130 samples in total) + n_expected_gaps = 3 + n_expected_missing_samples = 130 + assert len(raw.annotations) == n_expected_gaps, "Wrong number of gaps detected" + assert ( + (mne_y[0, :] == 0).sum() == n_expected_missing_samples + ), "Number of true and inferred missing samples differ" + + # read in .mat files containing original gaps + matchans = ["LAHC1_3_gaps.mat", "LAHC2_3_gaps.mat"] + + # (n_chan, n_samples) array, in V + mat_y = np.stack( + [ + _read_nlx_mat_chan_keep_gaps(os.path.join(testing_path, ch)) + for ch in matchans + ] + ) + + # compare originally modified .ncs arrays with MNE-padded arrays + # and test that we back-inserted 0's at the right places + assert_allclose( + mne_y, mat_y, rtol=1e-6, err_msg="MNE and Nlx2MatCSC.m not all close" ) diff --git a/mne/utils/config.py b/mne/utils/config.py index 77b94508114..62b4d053012 100644 --- a/mne/utils/config.py +++ b/mne/utils/config.py @@ -684,6 +684,7 @@ def sys_info( "mne-connectivity", "mne-icalabel", "mne-bids-pipeline", + "neo", "", ) if dependencies == "developer": diff --git a/pyproject.toml b/pyproject.toml index 39c6876e43d..092e4dd102a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -104,6 +104,7 @@ full = [ "pybv", "snirf", "defusedxml", + "neo", ] # Dependencies for running the test infrastructure @@ -135,6 +136,7 @@ test_extra = [ "imageio>=2.6.1", "imageio-ffmpeg>=0.4.1", "snirf", + "neo", ] # Dependencies for building the docuemntation From 64c56936929edfff7822c94929cdaea97fb80135 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 20 Dec 2023 11:16:35 -0500 Subject: [PATCH 27/37] MAINT: Add bot entry [ci skip] --- pyproject.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 092e4dd102a..34555177bed 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -323,3 +323,9 @@ disable_error_code = [ 'assignment', 'operator', ] + +[tool.changelog-bot] +[tool.changelog-bot.towncrier_changelog] +enabled = true +verify_pr_number = true +changelog_skip_label = "no-changelog-entry-needed" From 5d740c11c375125a25abefd135bc33637401ffec Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 20 Dec 2023 11:23:01 -0500 Subject: [PATCH 28/37] MAINT: More [ci skip] --- pyproject.toml | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 34555177bed..0e76af897b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -324,6 +324,43 @@ disable_error_code = [ 'operator', ] +[tool.towncrier] +package = "mne" +directory = "doc/changes/devel/" +filename = "doc/changes/devel.rst" +title_format = "{version} ({project_date})" +issue_format = "`#{issue} `__" + +[[tool.towncrier.type]] +directory = "notable" +name = "Notable changes" +showcontent = true + +[[tool.towncrier.type]] +directory = "dependency" +name = "Dependencies" +showcontent = true + +[[tool.towncrier.type]] +directory = "bugfix" +name = "Bugfixes" +showcontent = true + +[[tool.towncrier.type]] +directory = "apichange" +name = "API changes by deprecation" +showcontent = true + +[[tool.towncrier.type]] +directory = "newfeature" +name = "New features" +showcontent = true + +[[tool.towncrier.type]] +directory = "other" +name = "Other changes" +showcontent = true + [tool.changelog-bot] [tool.changelog-bot.towncrier_changelog] enabled = true From 00882bc2d24b07594c080af1a768f970476bdd4c Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Wed, 20 Dec 2023 12:40:40 -0500 Subject: [PATCH 29/37] MAINT: Use towncrier for release notes (#12299) --- .github/workflows/check_changelog.yml | 15 ++++ doc/changes/devel.rst | 55 +------------ doc/changes/devel.rst.template | 34 -------- doc/changes/devel/.gitignore | 1 + doc/changes/devel/12190.bugfix.rst | 1 + doc/changes/devel/12218.newfeature.rst | 1 + doc/changes/devel/12236.bugfix.rst | 1 + doc/changes/devel/12238.newfeature.rst | 1 + doc/changes/devel/12248.bugfix.rst | 1 + doc/changes/devel/12250.newfeature.rst | 1 + doc/changes/devel/12250.notable.rst | 11 +++ doc/changes/devel/12264.dependency.rst | 1 + doc/changes/devel/12268.newfeature.rst | 1 + doc/changes/devel/12269.newfeature.rst | 1 + doc/changes/devel/12279.bugfix.rst | 1 + doc/changes/devel/12282.bugfix.rst | 1 + doc/changes/devel/12289.newfeature.rst | 1 + doc/changes/devel/12299.other.rst | 1 + doc/changes/devel/12308.apichange.rst | 1 + doc/conf.py | 14 ++-- doc/development/contributing.rst | 104 ++++++++++++------------- doc/links.inc | 1 + pyproject.toml | 2 + 23 files changed, 106 insertions(+), 145 deletions(-) create mode 100644 .github/workflows/check_changelog.yml delete mode 100644 doc/changes/devel.rst.template create mode 100644 doc/changes/devel/.gitignore create mode 100644 doc/changes/devel/12190.bugfix.rst create mode 100644 doc/changes/devel/12218.newfeature.rst create mode 100644 doc/changes/devel/12236.bugfix.rst create mode 100644 doc/changes/devel/12238.newfeature.rst create mode 100644 doc/changes/devel/12248.bugfix.rst create mode 100644 doc/changes/devel/12250.newfeature.rst create mode 100644 doc/changes/devel/12250.notable.rst create mode 100644 doc/changes/devel/12264.dependency.rst create mode 100644 doc/changes/devel/12268.newfeature.rst create mode 100644 doc/changes/devel/12269.newfeature.rst create mode 100644 doc/changes/devel/12279.bugfix.rst create mode 100644 doc/changes/devel/12282.bugfix.rst create mode 100644 doc/changes/devel/12289.newfeature.rst create mode 100644 doc/changes/devel/12299.other.rst create mode 100644 doc/changes/devel/12308.apichange.rst diff --git a/.github/workflows/check_changelog.yml b/.github/workflows/check_changelog.yml new file mode 100644 index 00000000000..cf59c165258 --- /dev/null +++ b/.github/workflows/check_changelog.yml @@ -0,0 +1,15 @@ +name: Changelog + +on: # yamllint disable-line rule:truthy + pull_request: + types: [opened, synchronize, labeled, unlabeled] + +jobs: + changelog_checker: + name: Check towncrier entry in doc/changes/devel/ + runs-on: ubuntu-latest + steps: + - uses: larsoner/action-towncrier-changelog@co # revert to scientific-python @ 0.1.1 once bug is fixed + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + BOT_USERNAME: changelog-bot diff --git a/doc/changes/devel.rst b/doc/changes/devel.rst index 565a9f9fbf0..0e80d522b51 100644 --- a/doc/changes/devel.rst +++ b/doc/changes/devel.rst @@ -1,56 +1,5 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc +.. See doc/development/contributing.rst for description of how to add entries. .. _current: -Version 1.7.dev0 (development) ------------------------------- - -In this version, we started adding type hints (also known as "type annotations") to select parts of the codebase. -This meta information will be used by development environments (IDEs) like VS Code and PyCharm automatically to provide -better assistance such as tab completion or error detection even before running your code. - -So far, we've only added return type hints to :func:`mne.io.read_raw`, :func:`mne.read_epochs`, :func:`mne.read_evokeds` and -all format-specific ``read_raw_*()`` and ``read_epochs_*()`` functions. Now your editors will know: -these functions return evoked and raw data, respectively. We are planning add type hints to more functions after careful -evaluation in the future. - -You don't need to do anything to benefit from these changes – your editor will pick them up automatically and provide the -enhanced experience if it supports it! - -Enhancements -~~~~~~~~~~~~ -- Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python`` (:gh:`12218` by :newcontrib:`Florian Hofer`) -- Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv` (:gh:`12238` by :newcontrib:`Nikolai Kapralov`) -- We added type hints for the return values of raw, epochs, and evoked reading functions. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. (:gh:`12250`, :gh:`12297` by `Richard Höchenberger`_ and `Eric Larson`_) -- Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn` (:gh:`12268` by `Eric Larson`_) -- The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. (:gh:`12269`, :gh:`12281` by `Richard Höchenberger`_) -- :meth:`mne.Annotations.to_data_frame` can now output different formats for the ``onset`` column: seconds, milliseconds, datetime objects, and timedelta objects. (:gh:`12289` by `Daniel McCloy`_) - -Bugs -~~~~ -- Allow :func:`mne.viz.plot_compare_evokeds` to plot eyetracking channels, and improve error handling (:gh:`12190` by `Scott Huberty`_) -- Fix bug with accessing the last data sample using ``raw[:, -1]`` where an empty array was returned (:gh:`12248` by `Eric Larson`_) -- Remove incorrect type hints in :func:`mne.io.read_raw_neuralynx` (:gh:`12236` by `Richard Höchenberger`_) -- Fix bug where parent directory existence was not checked properly in :meth:`mne.io.Raw.save` (:gh:`12282` by `Eric Larson`_) -- ``defusedxml`` is now an optional (rather than required) dependency and needed when reading EGI-MFF data, NEDF data, and BrainVision montages (:gh:`12264` by `Eric Larson`_) -- Correctly handle temporal gaps in Neuralynx .ncs files via :func:`mne.io.read_raw_neuralynx` (:gh:`12279` by `Kristijan Armeni`_ and `Eric Larson`_) - -API changes -~~~~~~~~~~~ -- The parameter for providing data to :func:`mne.time_frequency.tfr_array_morlet` and :func:`mne.time_frequency.tfr_array_multitaper` has been switched from ``epoch_data`` to ``data``. Only use the ``data`` parameter to avoid a warning (:gh:`12308` by `Thomas Binns`_) +.. towncrier-draft-entries:: Version |release| (development) diff --git a/doc/changes/devel.rst.template b/doc/changes/devel.rst.template deleted file mode 100644 index 09c49cad107..00000000000 --- a/doc/changes/devel.rst.template +++ /dev/null @@ -1,34 +0,0 @@ -.. NOTE: we use cross-references to highlight new functions and classes. - Please follow the examples below like :func:`mne.stats.f_mway_rm`, so the - whats_new page will have a link to the function/class documentation. - -.. NOTE: there are 3 separate sections for changes, based on type: - - "Enhancements" for new features - - "Bugs" for bug fixes - - "API changes" for backward-incompatible changes - -.. NOTE: changes from first-time contributors should be added to the TOP of - the relevant section (Enhancements / Bugs / API changes), and should look - like this (where xxxx is the pull request number): - - - description of enhancement/bugfix/API change (:gh:`xxxx` by - :newcontrib:`Firstname Lastname`) - - Also add a corresponding entry for yourself in doc/changes/names.inc - -.. _current: - -Version X.Y.dev0 (development) ------------------------------- - -Enhancements -~~~~~~~~~~~~ -- None yet - -Bugs -~~~~ -- None yet - -API changes -~~~~~~~~~~~ -- None yet diff --git a/doc/changes/devel/.gitignore b/doc/changes/devel/.gitignore new file mode 100644 index 00000000000..f935021a8f8 --- /dev/null +++ b/doc/changes/devel/.gitignore @@ -0,0 +1 @@ +!.gitignore diff --git a/doc/changes/devel/12190.bugfix.rst b/doc/changes/devel/12190.bugfix.rst new file mode 100644 index 00000000000..d7ef2e07444 --- /dev/null +++ b/doc/changes/devel/12190.bugfix.rst @@ -0,0 +1 @@ +Allow :func:`mne.viz.plot_compare_evokeds` to plot eyetracking channels, and improve error handling, y `Scott Huberty`_. \ No newline at end of file diff --git a/doc/changes/devel/12218.newfeature.rst b/doc/changes/devel/12218.newfeature.rst new file mode 100644 index 00000000000..4ea286f0a22 --- /dev/null +++ b/doc/changes/devel/12218.newfeature.rst @@ -0,0 +1 @@ +Speed up export to .edf in :func:`mne.export.export_raw` by using ``edfio`` instead of ``EDFlib-Python``. diff --git a/doc/changes/devel/12236.bugfix.rst b/doc/changes/devel/12236.bugfix.rst new file mode 100644 index 00000000000..ad807ea3487 --- /dev/null +++ b/doc/changes/devel/12236.bugfix.rst @@ -0,0 +1 @@ +Remove incorrect type hints in :func:`mne.io.read_raw_neuralynx`, by `Richard Höchenberger`_. diff --git a/doc/changes/devel/12238.newfeature.rst b/doc/changes/devel/12238.newfeature.rst new file mode 100644 index 00000000000..631722bc07a --- /dev/null +++ b/doc/changes/devel/12238.newfeature.rst @@ -0,0 +1 @@ +Inform the user about channel discrepancy between provided info, forward operator, and/or covariance matrices in :func:`mne.beamformer.make_lcmv`, by :newcontrib:`Nikolai Kapralov`. \ No newline at end of file diff --git a/doc/changes/devel/12248.bugfix.rst b/doc/changes/devel/12248.bugfix.rst new file mode 100644 index 00000000000..bc4124a2267 --- /dev/null +++ b/doc/changes/devel/12248.bugfix.rst @@ -0,0 +1 @@ +Fix bug with accessing the last data sample using ``raw[:, -1]`` where an empty array was returned, by `Eric Larson`_. diff --git a/doc/changes/devel/12250.newfeature.rst b/doc/changes/devel/12250.newfeature.rst new file mode 100644 index 00000000000..20d67dead77 --- /dev/null +++ b/doc/changes/devel/12250.newfeature.rst @@ -0,0 +1 @@ +We added type hints for the return values of :func:`mne.read_evokeds` and :func:`mne.io.read_raw`. Development environments like VS Code or PyCharm will now provide more help when using these functions in your code. By `Richard Höchenberger`_ and `Eric Larson`_. (:gh:`12297`) diff --git a/doc/changes/devel/12250.notable.rst b/doc/changes/devel/12250.notable.rst new file mode 100644 index 00000000000..7616894e636 --- /dev/null +++ b/doc/changes/devel/12250.notable.rst @@ -0,0 +1,11 @@ +In this version, we started adding type hints (also known as "type annotations") to select parts of the codebase. +This meta information will be used by development environments (IDEs) like VS Code and PyCharm automatically to provide +better assistance such as tab completion or error detection even before running your code. + +So far, we've only added return type hints to :func:`mne.io.read_raw`, :func:`mne.read_epochs`, :func:`mne.read_evokeds` and +all format-specific ``read_raw_*()`` and ``read_epochs_*()`` functions. Now your editors will know: +these functions return evoked and raw data, respectively. We are planning add type hints to more functions after careful +evaluation in the future. + +You don't need to do anything to benefit from these changes – your editor will pick them up automatically and provide the +enhanced experience if it supports it! diff --git a/doc/changes/devel/12264.dependency.rst b/doc/changes/devel/12264.dependency.rst new file mode 100644 index 00000000000..c511b3448a8 --- /dev/null +++ b/doc/changes/devel/12264.dependency.rst @@ -0,0 +1 @@ +``defusedxml`` is now an optional (rather than required) dependency and needed when reading EGI-MFF data, NEDF data, and BrainVision montages, by `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/devel/12268.newfeature.rst b/doc/changes/devel/12268.newfeature.rst new file mode 100644 index 00000000000..caf46fec03f --- /dev/null +++ b/doc/changes/devel/12268.newfeature.rst @@ -0,0 +1 @@ +Add ``method="polyphase"`` to :meth:`mne.io.Raw.resample` and related functions to allow resampling using :func:`scipy.signal.upfirdn`, by `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/devel/12269.newfeature.rst b/doc/changes/devel/12269.newfeature.rst new file mode 100644 index 00000000000..321bd02070e --- /dev/null +++ b/doc/changes/devel/12269.newfeature.rst @@ -0,0 +1 @@ +The package build backend was switched from ``setuptools`` to ``hatchling``. This will only affect users who build and install MNE-Python from source. By `Richard Höchenberger`_. (:gh:`12281`) \ No newline at end of file diff --git a/doc/changes/devel/12279.bugfix.rst b/doc/changes/devel/12279.bugfix.rst new file mode 100644 index 00000000000..93aee511fec --- /dev/null +++ b/doc/changes/devel/12279.bugfix.rst @@ -0,0 +1 @@ +Correctly handle temporal gaps in Neuralynx .ncs files via :func:`mne.io.read_raw_neuralynx`, by `Kristijan Armeni`_ and `Eric Larson`_. \ No newline at end of file diff --git a/doc/changes/devel/12282.bugfix.rst b/doc/changes/devel/12282.bugfix.rst new file mode 100644 index 00000000000..e743d0b6071 --- /dev/null +++ b/doc/changes/devel/12282.bugfix.rst @@ -0,0 +1 @@ +Fix bug where parent directory existence was not checked properly in :meth:`mne.io.Raw.save`, by `Eric Larson`_. diff --git a/doc/changes/devel/12289.newfeature.rst b/doc/changes/devel/12289.newfeature.rst new file mode 100644 index 00000000000..8110e4cf737 --- /dev/null +++ b/doc/changes/devel/12289.newfeature.rst @@ -0,0 +1 @@ +:meth:`mne.Annotations.to_data_frame` can now output different formats for the ``onset`` column: seconds, milliseconds, datetime objects, and timedelta objects. By `Daniel McCloy`_. diff --git a/doc/changes/devel/12299.other.rst b/doc/changes/devel/12299.other.rst new file mode 100644 index 00000000000..61c4bf56725 --- /dev/null +++ b/doc/changes/devel/12299.other.rst @@ -0,0 +1 @@ +Adopted towncrier_ for changelog entries, by `Eric Larson`_. diff --git a/doc/changes/devel/12308.apichange.rst b/doc/changes/devel/12308.apichange.rst new file mode 100644 index 00000000000..4d1b8e13923 --- /dev/null +++ b/doc/changes/devel/12308.apichange.rst @@ -0,0 +1 @@ +The parameter for providing data to :func:`mne.time_frequency.tfr_array_morlet` and :func:`mne.time_frequency.tfr_array_multitaper` has been switched from ``epoch_data`` to ``data``. Only use the ``data`` parameter to avoid a warning. Changes by `Thomas Binns`_. \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index 837282c5b56..e058234ebe2 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -51,9 +51,8 @@ # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -curdir = os.path.dirname(__file__) -sys.path.append(os.path.abspath(os.path.join(curdir, "..", "mne"))) -sys.path.append(os.path.abspath(os.path.join(curdir, "sphinxext"))) +curpath = Path(__file__).parent.resolve(strict=True) +sys.path.append(str(curpath / "sphinxext")) # -- Project information ----------------------------------------------------- @@ -107,6 +106,7 @@ "sphinx_gallery.gen_gallery", "sphinxcontrib.bibtex", "sphinxcontrib.youtube", + "sphinxcontrib.towncrier.ext", # homegrown "contrib_avatars", "gen_commands", @@ -123,7 +123,7 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["_includes"] +exclude_patterns = ["_includes", "changes/devel"] # The suffix of source filenames. source_suffix = ".rst" @@ -149,6 +149,10 @@ copybutton_prompt_text = r">>> |\.\.\. |\$ " copybutton_prompt_is_regexp = True +# -- sphinxcontrib-towncrier configuration ----------------------------------- + +towncrier_draft_working_directory = str(curpath.parent) + # -- Intersphinx configuration ----------------------------------------------- intersphinx_mapping = { @@ -804,7 +808,7 @@ def append_attr_meth_examples(app, what, name, obj, options, lines): # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -switcher_version_match = "dev" if release.endswith("dev0") else version +switcher_version_match = "dev" if ".dev" in version else version html_theme_options = { "icon_links": [ dict( diff --git a/doc/development/contributing.rst b/doc/development/contributing.rst index 2957b434751..15ad9dad2db 100644 --- a/doc/development/contributing.rst +++ b/doc/development/contributing.rst @@ -591,42 +591,54 @@ Describe your changes in the changelog -------------------------------------- Include in your changeset a brief description of the change in the -:ref:`changelog ` (:file:`doc/changes/devel.rst`; this can be -skipped for very minor changes like correcting typos in the documentation). - -There are different sections of the changelog for each release, and separate -**subsections for bugfixes, new features, and changes to the public API.** -Please be sure to add your entry to the appropriate subsection. - -The styling and positioning of the entry depends on whether you are a -first-time contributor or have been mentioned in the changelog before. - -First-time contributors -""""""""""""""""""""""" - -Welcome to MNE-Python! We're very happy to have you here. 🤗 And to ensure you -get proper credit for your work, please add a changelog entry with the -following pattern **at the top** of the respective subsection (bugs, -enhancements, etc.): - -.. code-block:: rst - - - Bugs - ---- - - - Short description of the changes (:gh:`0000` by :newcontrib:`Firstname Lastname`) - - - ... - -where ``0000`` must be replaced with the respective GitHub pull request (PR) -number, and ``Firstname Lastname`` must be replaced with your full name. - -It is usually best to wait to add a line to the changelog until your PR is -finalized, to avoid merge conflicts (since the changelog is updated with -almost every PR). - -Lastly, make sure that your name is included in the list of authors in +:ref:`changelog ` using towncrier_ format, which aggregates small, +properly-named ``.rst`` files to create a change log. This can be +skipped for very minor changes like correcting typos in the documentation. + +There are six separate sections for changes, based on change type. +To add a changelog entry to a given section, name it as +:file:`doc/changes/devel/..rst`. The types are: + +notable + For overarching changes, e.g., adding type hints package-wide. These are rare. +dependency + For changes to dependencies, e.g., adding a new dependency or changing + the minimum version of an existing dependency. +bugfix + For bug fixes. Can change code behavior with no deprecation period. +apichange + Code behavior changes that require a deprecation period. +newfeature + For new features. +other + For changes that don't fit into any of the above categories, e.g., + internal refactorings. + +For example, for an enhancement PR with number 12345, the changelog entry should be +added as a new file :file:`doc/changes/devel/12345.enhancement.rst`. The file should +contain: + +1. A brief description of the change, typically in a single line of one or two + sentences. +2. reST links to **public** API endpoints like functions (``:func:``), + classes (``:class``), and methods (``:meth:``). If changes are only internal + to private functions/attributes, mention internal refactoring rather than name + the private attributes changed. +3. Author credit. If you are a new contributor (we're very happy to have you here! 🤗), + you should using the ``:newcontrib:`` reST role, whereas previous contributors should + use a standard reST link to their name. For example, a new contributor could write: + + .. code-block:: rst + + Short description of the changes, by :newcontrib:`Firstname Lastname`. + + And an previous contributor could write: + + .. code-block:: rst + + Short description of the changes, by `Firstname Lastname`_. + +Make sure that your name is included in the list of authors in :file:`doc/changes/names.inc`, otherwise the documentation build will fail. To add an author name, append a line with the following pattern (note how the syntax is different from that used in the changelog): @@ -638,27 +650,13 @@ how the syntax is different from that used in the changelog): Many contributors opt to link to their GitHub profile that way. Have a look at the existing entries in the file to get some inspiration. -Recurring contributors -"""""""""""""""""""""" - -The changelog entry should follow the following patterns: - -.. code-block:: rst - - - Short description of the changes from one contributor (:gh:`0000` by `Contributor Name`_) - - Short description of the changes from several contributors (:gh:`0000` by `Contributor Name`_, `Second Contributor`_, and `Third Contributor`_) - -where ``0000`` must be replaced with the respective GitHub pull request (PR) -number. Mind the Oxford comma in the case of multiple contributors. - Sometimes, changes that shall appear as a single changelog entry are spread out -across multiple PRs. In this case, name all relevant PRs, separated by -commas: +across multiple PRs. In this case, edit the existing towncrier file for the relevant +change, and append additional PR numbers in parentheticals with the ``:gh:`` role like: .. code-block:: rst - - Short description of the changes from one contributor in multiple PRs (:gh:`0000`, :gh:`1111` by `Contributor Name`_) - - Short description of the changes from several contributors in multiple PRs (:gh:`0000`, :gh:`1111` by `Contributor Name`_, `Second Contributor`_, and `Third Contributor`_) + Short description of the changes, by `Firstname Lastname`_. (:gh:`12346`) Test locally before opening pull requests (PRs) ----------------------------------------------- diff --git a/doc/links.inc b/doc/links.inc index 9dd1f34872c..27e61c850bc 100644 --- a/doc/links.inc +++ b/doc/links.inc @@ -96,6 +96,7 @@ .. _PIL: https://pypi.python.org/pypi/PIL .. _tqdm: https://tqdm.github.io/ .. _pooch: https://www.fatiando.org/pooch/latest/ +.. _towncrier: https://towncrier.readthedocs.io/ .. python editors diff --git a/pyproject.toml b/pyproject.toml index 0e76af897b8..db21c0a1012 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -146,6 +146,7 @@ doc = [ "pydata_sphinx_theme==0.13.3", "sphinx-gallery", "sphinxcontrib-bibtex>=2.5", + "sphinxcontrib-towncrier", "memory_profiler", "neo", "seaborn!=0.11.2", @@ -291,6 +292,7 @@ ignore_directives = [ "toctree", "rst-class", "tab-set", + "towncrier-draft-entries", ] ignore_messages = "^.*(Unknown target name|Undefined substitution referenced)[^`]*$" From 7ccd100310892617dc3c4290465c1eefe1d47282 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 21 Dec 2023 10:23:58 -0500 Subject: [PATCH 30/37] MAINT: Fix CIs (#12320) --- mne/decoding/tests/test_transformer.py | 2 +- tools/github_actions_dependencies.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mne/decoding/tests/test_transformer.py b/mne/decoding/tests/test_transformer.py index f7eeb78ff33..1c2a29bdf8e 100644 --- a/mne/decoding/tests/test_transformer.py +++ b/mne/decoding/tests/test_transformer.py @@ -62,7 +62,7 @@ def test_scaler(info, method): epochs_data_t = epochs_data.transpose([1, 0, 2]) if method in ("mean", "median"): if not check_version("sklearn"): - with pytest.raises(ImportError, match="No module"): + with pytest.raises((ImportError, RuntimeError), match=" module "): Scaler(info, method) return diff --git a/tools/github_actions_dependencies.sh b/tools/github_actions_dependencies.sh index 9489a95f397..b9b425c67fb 100755 --- a/tools/github_actions_dependencies.sh +++ b/tools/github_actions_dependencies.sh @@ -28,7 +28,7 @@ else echo "PyQt6" pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url https://www.riverbankcomputing.com/pypi/simple "PyQt6!=6.6.1" "PyQt6-Qt6!=6.6.1" echo "NumPy/SciPy/pandas etc." - pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" "scipy>=1.12.0.dev0" scikit-learn matplotlib pillow statsmodels + pip install $STD_ARGS --only-binary ":all:" --default-timeout=60 --extra-index-url "https://pypi.anaconda.org/scientific-python-nightly-wheels/simple" "numpy>=2.0.0.dev0" "scipy>=1.12.0.dev0" "scikit-learn==1.4.dev0" matplotlib pillow statsmodels # No pandas, dipy, h5py, openmeeg, python-picard (needs numexpr) until they update to NumPy 2.0 compat INSTALL_KIND="test_extra" # echo "dipy" From a03a40d50f871b3d51da923ac156d50443162ea8 Mon Sep 17 00:00:00 2001 From: "Peter J. Molfese" Date: Thu, 21 Dec 2023 11:21:55 -0500 Subject: [PATCH 31/37] [MRG][ENH]: Add Ability to export STC files as GIFTI (#12309) Co-authored-by: Eric Larson Co-authored-by: Daniel McCloy --- doc/changes/devel/12309.newfeature.rst | 1 + mne/source_estimate.py | 72 ++++++++++++++++++++++++++ mne/tests/test_source_estimate.py | 28 ++++++++++ 3 files changed, 101 insertions(+) create mode 100644 doc/changes/devel/12309.newfeature.rst diff --git a/doc/changes/devel/12309.newfeature.rst b/doc/changes/devel/12309.newfeature.rst new file mode 100644 index 00000000000..8e732044a8e --- /dev/null +++ b/doc/changes/devel/12309.newfeature.rst @@ -0,0 +1 @@ +Add method :meth:`mne.SourceEstimate.save_as_surface` to allow saving GIFTI files from surface source estimates, by `Peter Molfese`_. diff --git a/mne/source_estimate.py b/mne/source_estimate.py index b2d197d7b2f..19b23da7d60 100644 --- a/mne/source_estimate.py +++ b/mne/source_estimate.py @@ -31,6 +31,7 @@ _ensure_src_subject, _get_morph_src_reordering, _get_src_nn, + get_decimated_surfaces, ) from .surface import _get_ico_surface, _project_onto_surface, mesh_edges, read_surface from .transforms import _get_trans, apply_trans @@ -1584,6 +1585,77 @@ def in_label(self, label): ) return label_stc + def save_as_surface(self, fname, src, *, scale=1, scale_rr=1e3): + """Save a surface source estimate (stc) as a GIFTI file. + + Parameters + ---------- + fname : path-like + Filename basename to save files as. + Will write anatomical GIFTI plus time series GIFTI for both lh/rh, + for example ``"basename"`` will write ``"basename.lh.gii"``, + ``"basename.lh.time.gii"``, ``"basename.rh.gii"``, and + ``"basename.rh.time.gii"``. + src : instance of SourceSpaces + The source space of the forward solution. + scale : float + Scale factor to apply to the data (functional) values. + scale_rr : float + Scale factor for the source vertex positions. The default (1e3) will + scale from meters to millimeters, which is more standard for GIFTI files. + + Notes + ----- + .. versionadded:: 1.7 + """ + nib = _import_nibabel() + _check_option("src.kind", src.kind, ("surface", "mixed")) + ss = get_decimated_surfaces(src) + assert len(ss) == 2 # should be guaranteed by _check_option above + + # Create lists to put DataArrays into + hemis = ("lh", "rh") + for s, hemi in zip(ss, hemis): + darrays = list() + darrays.append( + nib.gifti.gifti.GiftiDataArray( + data=(s["rr"] * scale_rr).astype(np.float32), + intent="NIFTI_INTENT_POINTSET", + datatype="NIFTI_TYPE_FLOAT32", + ) + ) + + # Make the topology DataArray + darrays.append( + nib.gifti.gifti.GiftiDataArray( + data=s["tris"].astype(np.int32), + intent="NIFTI_INTENT_TRIANGLE", + datatype="NIFTI_TYPE_INT32", + ) + ) + + # Make the output GIFTI for anatomicals + topo_gi_hemi = nib.gifti.gifti.GiftiImage(darrays=darrays) + + # actually save the file + nib.save(topo_gi_hemi, f"{fname}-{hemi}.gii") + + # Make the Time Series data arrays + ts = [] + data = getattr(self, f"{hemi}_data") * scale + ts = [ + nib.gifti.gifti.GiftiDataArray( + data=data[:, idx].astype(np.float32), + intent="NIFTI_INTENT_POINTSET", + datatype="NIFTI_TYPE_FLOAT32", + ) + for idx in range(data.shape[1]) + ] + + # save the time series + ts_gi = nib.gifti.gifti.GiftiImage(darrays=ts) + nib.save(ts_gi, f"{fname}-{hemi}.time.gii") + def expand(self, vertices): """Expand SourceEstimate to include more vertices. diff --git a/mne/tests/test_source_estimate.py b/mne/tests/test_source_estimate.py index be31fd1501b..ebe1a369e4d 100644 --- a/mne/tests/test_source_estimate.py +++ b/mne/tests/test_source_estimate.py @@ -248,6 +248,34 @@ def test_volume_stc(tmp_path): assert_array_almost_equal(stc.data, stc_new.data) +@testing.requires_testing_data +def test_save_stc_as_gifti(tmp_path): + """Save the stc as a GIFTI file and export.""" + nib = pytest.importorskip("nibabel") + surfpath_src = bem_path / "sample-oct-6-src.fif" + surfpath_stc = data_path / "MEG" / "sample" / "sample_audvis_trunc-meg" + src = read_source_spaces(surfpath_src) # need source space + stc = read_source_estimate(surfpath_stc) # need stc + assert isinstance(src, SourceSpaces) + assert isinstance(stc, SourceEstimate) + + surf_fname = tmp_path / "stc_write" + + stc.save_as_surface(surf_fname, src) + + # did structural get written? + img_lh = nib.load(f"{surf_fname}-lh.gii") + img_rh = nib.load(f"{surf_fname}-rh.gii") + assert isinstance(img_lh, nib.gifti.gifti.GiftiImage) + assert isinstance(img_rh, nib.gifti.gifti.GiftiImage) + + # did time series get written? + img_timelh = nib.load(f"{surf_fname}-lh.time.gii") + img_timerh = nib.load(f"{surf_fname}-rh.time.gii") + assert isinstance(img_timelh, nib.gifti.gifti.GiftiImage) + assert isinstance(img_timerh, nib.gifti.gifti.GiftiImage) + + @testing.requires_testing_data def test_stc_as_volume(): """Test previous volume source estimate morph.""" From ca7fe266c0a6d4426a62798f37d6a3428d08de6b Mon Sep 17 00:00:00 2001 From: Martin Oberg Date: Thu, 21 Dec 2023 12:53:16 -0800 Subject: [PATCH 32/37] fix section parameter to allow proper hierarchy (#12319) --- doc/changes/devel/12319.bugfix.rst | 1 + doc/changes/names.inc | 2 ++ mne/report/report.py | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 doc/changes/devel/12319.bugfix.rst diff --git a/doc/changes/devel/12319.bugfix.rst b/doc/changes/devel/12319.bugfix.rst new file mode 100644 index 00000000000..16eb1a3350a --- /dev/null +++ b/doc/changes/devel/12319.bugfix.rst @@ -0,0 +1 @@ +Fix bug where section parameter in :meth:`mne.Report.add_html` was not being utilized resulting in improper formatting, by :newcontrib:`Martin Oberg`. diff --git a/doc/changes/names.inc b/doc/changes/names.inc index 0d62d247dd3..f1a0c951da4 100644 --- a/doc/changes/names.inc +++ b/doc/changes/names.inc @@ -348,6 +348,8 @@ .. _Martin Luessi: https://github.com/mluessi +.. _Martin Oberg: https://github.com/obergmartin + .. _Martin Schulz: https://github.com/marsipu .. _Mathieu Scheltienne: https://github.com/mscheltienne diff --git a/mne/report/report.py b/mne/report/report.py index 9a547d4f7b6..ab56d03ab7e 100644 --- a/mne/report/report.py +++ b/mne/report/report.py @@ -2383,7 +2383,7 @@ def add_html( ) self._add_or_replace( title=title, - section=None, + section=section, tags=tags, html_partial=html_partial, replace=replace, From 6733cae2a0765da9ec1b67a98937839d4cd9aadf Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Thu, 21 Dec 2023 16:13:55 -0500 Subject: [PATCH 33/37] MAINT: Automate renaming of towncrier stubs (#12318) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .../rename_towncrier/rename_towncrier.py | 56 +++++++++++++++++++ .github/workflows/autofix.yml | 21 +++++++ .pre-commit-config.yaml | 3 - doc/changes/devel/12318.other.rst | 1 + 4 files changed, 78 insertions(+), 3 deletions(-) create mode 100755 .github/actions/rename_towncrier/rename_towncrier.py create mode 100644 .github/workflows/autofix.yml create mode 100644 doc/changes/devel/12318.other.rst diff --git a/.github/actions/rename_towncrier/rename_towncrier.py b/.github/actions/rename_towncrier/rename_towncrier.py new file mode 100755 index 00000000000..68971d1c83f --- /dev/null +++ b/.github/actions/rename_towncrier/rename_towncrier.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python3 + +# Adapted from action-towncrier-changelog +import json +import os +import re +import subprocess +import sys +from pathlib import Path + +from github import Github +from tomllib import loads + +event_name = os.getenv('GITHUB_EVENT_NAME', 'pull_request') +if not event_name.startswith('pull_request'): + print(f'No-op for {event_name}') + sys.exit(0) +if 'GITHUB_EVENT_PATH' in os.environ: + with open(os.environ['GITHUB_EVENT_PATH'], encoding='utf-8') as fin: + event = json.load(fin) + pr_num = event['number'] + basereponame = event['pull_request']['base']['repo']['full_name'] + real = True +else: # local testing + pr_num = 12318 # added some towncrier files + basereponame = "mne-tools/mne-python" + real = False + +g = Github(os.environ.get('GITHUB_TOKEN')) +baserepo = g.get_repo(basereponame) + +# Grab config from upstream's default branch +toml_cfg = loads(Path("pyproject.toml").read_text("utf-8")) + +config = toml_cfg["tool"]["towncrier"] +pr = baserepo.get_pull(pr_num) +modified_files = [f.filename for f in pr.get_files()] + +# Get types from config +types = [ent["directory"] for ent in toml_cfg["tool"]["towncrier"]["type"]] +type_pipe = "|".join(types) + +# Get files that potentially match the types +directory = toml_cfg["tool"]["towncrier"]["directory"] +assert directory.endswith("/"), directory + +file_re = re.compile(rf"^{directory}({type_pipe})\.rst$") +found_stubs = [ + f for f in modified_files if file_re.match(f) +] +for stub in found_stubs: + fro = stub + to = file_re.sub(rf"{directory}{pr_num}.\1.rst", fro) + print(f"Renaming {fro} to {to}") + if real: + subprocess.check_call(["mv", fro, to]) diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml new file mode 100644 index 00000000000..2c0b693750e --- /dev/null +++ b/.github/workflows/autofix.yml @@ -0,0 +1,21 @@ +name: autofix.ci + +on: # yamllint disable-line rule:truthy + pull_request: + types: [opened, synchronize, labeled, unlabeled] + +permissions: + contents: read + +jobs: + autofix: + name: Autoupdate changelog entry + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: '3.12' + - run: pip install --upgrade towncrier pygithub + - run: python ./.github/actions/rename_towncrier/rename_towncrier.py + - uses: autofix-ci/action@ea32e3a12414e6d3183163c3424a7d7a8631ad84 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fed7db76310..f23220d9819 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -59,6 +59,3 @@ repos: # Avoid the conflict between mne/__init__.py and mne/__init__.pyi by ignoring the former exclude: ^mne/(beamformer|channels|commands|datasets|decoding|export|forward|gui|html_templates|inverse_sparse|io|minimum_norm|preprocessing|report|simulation|source_space|stats|time_frequency|utils|viz)?/?__init__\.py$ additional_dependencies: ["numpy==1.26.2"] - -ci: - autofix_prs: false diff --git a/doc/changes/devel/12318.other.rst b/doc/changes/devel/12318.other.rst new file mode 100644 index 00000000000..94890e1dfc4 --- /dev/null +++ b/doc/changes/devel/12318.other.rst @@ -0,0 +1 @@ +Automate adding of PR number to towncrier stubs, by `Eric Larson`_. From 6790426221b83ee16375ec19e974808d7b9aad4c Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 25 Dec 2023 22:34:11 +0100 Subject: [PATCH 34/37] [pre-commit.ci] pre-commit autoupdate (#12325) --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f23220d9819..66f56539781 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,7 +1,7 @@ repos: # Ruff mne - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff name: ruff lint mne @@ -13,7 +13,7 @@ repos: # Ruff tutorials and examples - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.1.8 + rev: v0.1.9 hooks: - id: ruff name: ruff lint tutorials and examples @@ -53,7 +53,7 @@ repos: # mypy - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.7.1 + rev: v1.8.0 hooks: - id: mypy # Avoid the conflict between mne/__init__.py and mne/__init__.pyi by ignoring the former From c73b8afcf3cb6304bb67c390d667cd1ac526473d Mon Sep 17 00:00:00 2001 From: Alex Rockhill Date: Sat, 30 Dec 2023 18:35:06 -0800 Subject: [PATCH 35/37] [ENH, MRG] Allow epoch construction from annotations (#12311) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Richard Höchenberger --- doc/changes/devel/12311.newfeature.rst | 1 + doc/conf.py | 1 + examples/decoding/decoding_csp_eeg.py | 13 ++-- examples/decoding/decoding_csp_timefreq.py | 19 +++--- .../time_frequency/time_frequency_erds.py | 11 ++-- .../visualization/eyetracking_plot_heatmap.py | 6 +- mne/epochs.py | 60 +++++++++++++++++-- mne/tests/test_epochs.py | 20 +++++++ mne/utils/docs.py | 8 ++- tools/setup_xvfb.sh | 2 +- tutorials/clinical/20_seeg.py | 3 +- tutorials/clinical/30_ecog.py | 6 +- tutorials/time-freq/50_ssvep.py | 14 ++--- 13 files changed, 109 insertions(+), 55 deletions(-) create mode 100644 doc/changes/devel/12311.newfeature.rst diff --git a/doc/changes/devel/12311.newfeature.rst b/doc/changes/devel/12311.newfeature.rst new file mode 100644 index 00000000000..c5e074278f9 --- /dev/null +++ b/doc/changes/devel/12311.newfeature.rst @@ -0,0 +1 @@ +:class:`mne.Epochs` can now be constructed using :class:`mne.Annotations` stored in the ``raw`` object, by specifying ``events=None``. By `Alex Rockhill`_. \ No newline at end of file diff --git a/doc/conf.py b/doc/conf.py index e058234ebe2..d114237bd5a 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -1310,6 +1310,7 @@ def reset_warnings(gallery_conf, fname): for key in ( "invalid version and will not be supported", # pyxdf "distutils Version classes are deprecated", # seaborn and neo + "is_categorical_dtype is deprecated", # seaborn "`np.object` is a deprecated alias for the builtin `object`", # pyxdf # nilearn, should be fixed in > 0.9.1 "In future, it will be an error for 'np.bool_' scalars to", diff --git a/examples/decoding/decoding_csp_eeg.py b/examples/decoding/decoding_csp_eeg.py index 85a468cb590..cf588ebf18a 100644 --- a/examples/decoding/decoding_csp_eeg.py +++ b/examples/decoding/decoding_csp_eeg.py @@ -27,7 +27,7 @@ from sklearn.model_selection import ShuffleSplit, cross_val_score from sklearn.pipeline import Pipeline -from mne import Epochs, events_from_annotations, pick_types +from mne import Epochs, pick_types from mne.channels import make_standard_montage from mne.datasets import eegbci from mne.decoding import CSP @@ -41,7 +41,6 @@ # avoid classification of evoked responses by using epochs that start 1s after # cue onset. tmin, tmax = -1.0, 4.0 -event_id = dict(hands=2, feet=3) subject = 1 runs = [6, 10, 14] # motor imagery: hands vs feet @@ -50,22 +49,20 @@ eegbci.standardize(raw) # set channel names montage = make_standard_montage("standard_1005") raw.set_montage(montage) +raw.annotations.rename(dict(T1="hands", T2="feet")) # Apply band-pass filter raw.filter(7.0, 30.0, fir_design="firwin", skip_by_annotation="edge") -events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3)) - picks = pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False, exclude="bads") # Read epochs (train will be done only between 1 and 2s) # Testing will be done with a running classifier epochs = Epochs( raw, - events, - event_id, - tmin, - tmax, + event_id=["hands", "feet"], + tmin=tmin, + tmax=tmax, proj=True, picks=picks, baseline=None, diff --git a/examples/decoding/decoding_csp_timefreq.py b/examples/decoding/decoding_csp_timefreq.py index f81e4fc0fea..2f36064b615 100644 --- a/examples/decoding/decoding_csp_timefreq.py +++ b/examples/decoding/decoding_csp_timefreq.py @@ -29,7 +29,7 @@ from sklearn.pipeline import make_pipeline from sklearn.preprocessing import LabelEncoder -from mne import Epochs, create_info, events_from_annotations +from mne import Epochs, create_info from mne.datasets import eegbci from mne.decoding import CSP from mne.io import concatenate_raws, read_raw_edf @@ -37,15 +37,14 @@ # %% # Set parameters and read data -event_id = dict(hands=2, feet=3) # motor imagery: hands vs feet subject = 1 runs = [6, 10, 14] raw_fnames = eegbci.load_data(subject, runs) raw = concatenate_raws([read_raw_edf(f) for f in raw_fnames]) +raw.annotations.rename(dict(T1="hands", T2="feet")) # Extract information from the raw file sfreq = raw.info["sfreq"] -events, _ = events_from_annotations(raw, event_id=dict(T1=2, T2=3)) raw.pick(picks="eeg", exclude="bads") raw.load_data() @@ -95,10 +94,9 @@ # Extract epochs from filtered data, padded by window size epochs = Epochs( raw_filter, - events, - event_id, - tmin - w_size, - tmax + w_size, + event_id=["hands", "feet"], + tmin=tmin - w_size, + tmax=tmax + w_size, proj=False, baseline=None, preload=True, @@ -148,10 +146,9 @@ # Extract epochs from filtered data, padded by window size epochs = Epochs( raw_filter, - events, - event_id, - tmin - w_size, - tmax + w_size, + event_id=["hands", "feet"], + tmin=tmin - w_size, + tmax=tmax + w_size, proj=False, baseline=None, preload=True, diff --git a/examples/time_frequency/time_frequency_erds.py b/examples/time_frequency/time_frequency_erds.py index ee2dd62a2ba..593861674ed 100644 --- a/examples/time_frequency/time_frequency_erds.py +++ b/examples/time_frequency/time_frequency_erds.py @@ -54,8 +54,8 @@ raw = concatenate_raws([read_raw_edf(f, preload=True) for f in fnames]) raw.rename_channels(lambda x: x.strip(".")) # remove dots from channel names - -events, _ = mne.events_from_annotations(raw, event_id=dict(T1=2, T2=3)) +# rename descriptions to be more easily interpretable +raw.annotations.rename(dict(T1="hands", T2="feet")) # %% # Now we can create 5-second epochs around events of interest. @@ -64,10 +64,9 @@ epochs = mne.Epochs( raw, - events, - event_ids, - tmin - 0.5, - tmax + 0.5, + event_id=["hands", "feet"], + tmin=tmin - 0.5, + tmax=tmax + 0.5, picks=("C3", "Cz", "C4"), baseline=None, preload=True, diff --git a/examples/visualization/eyetracking_plot_heatmap.py b/examples/visualization/eyetracking_plot_heatmap.py index c12aa689984..e1826efb6f7 100644 --- a/examples/visualization/eyetracking_plot_heatmap.py +++ b/examples/visualization/eyetracking_plot_heatmap.py @@ -44,12 +44,8 @@ mne.preprocessing.eyetracking.interpolate_blinks(raw, interpolate_gaze=True) raw.annotations.rename({"dvns": "natural"}) # more intuitive -event_ids = {"natural": 1} -events, event_dict = mne.events_from_annotations(raw, event_id=event_ids) -epochs = mne.Epochs( - raw, events=events, event_id=event_dict, tmin=0, tmax=20, baseline=None -) +epochs = mne.Epochs(raw, event_id=["natural"], tmin=0, tmax=20, baseline=None) # %% diff --git a/mne/epochs.py b/mne/epochs.py index 50403345e92..34d942536bd 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -62,6 +62,7 @@ EpochAnnotationsMixin, _read_annotations_fif, _write_annotations, + events_from_annotations, ) from .baseline import _check_baseline, _log_rescale, rescale from .bem import _check_origin @@ -487,10 +488,7 @@ def __init__( if events is not None: # RtEpochs can have events=None for key, val in self.event_id.items(): if val not in events[:, 2]: - msg = "No matching events found for %s " "(event id %i)" % ( - key, - val, - ) + msg = f"No matching events found for {key} (event id {val})" _on_missing(on_missing, msg) # ensure metadata matches original events size @@ -3104,6 +3102,40 @@ def _ensure_list(x): return metadata, events, event_id +def _events_from_annotations(raw, events, event_id, annotations, on_missing): + """Generate events and event_ids from annotations.""" + events, event_id_tmp = events_from_annotations(raw) + if events.size == 0: + raise RuntimeError( + "No usable annotations found in the raw object. " + "Either `events` must be provided or the raw " + "object must have annotations to construct epochs" + ) + if any(raw.annotations.duration > 0): + logger.info( + "Ignoring annotation durations and creating fixed-duration epochs " + "around annotation onsets." + ) + if event_id is None: + event_id = event_id_tmp + # if event_id is the names of events, map to events integers + if isinstance(event_id, str): + event_id = [event_id] + if isinstance(event_id, (list, tuple, set)): + if not set(event_id).issubset(set(event_id_tmp)): + msg = ( + "No matching annotations found for event_id(s) " + f"{set(event_id) - set(event_id_tmp)}" + ) + _on_missing(on_missing, msg) + # remove extras if on_missing not error + event_id = set(event_id) & set(event_id_tmp) + event_id = {my_id: event_id_tmp[my_id] for my_id in event_id} + # remove any non-selected annotations + annotations.delete(~np.isin(raw.annotations.description, list(event_id))) + return events, event_id, annotations + + @fill_doc class Epochs(BaseEpochs): """Epochs extracted from a Raw instance. @@ -3111,7 +3143,16 @@ class Epochs(BaseEpochs): Parameters ---------- %(raw_epochs)s + + .. note:: + If ``raw`` contains annotations, ``Epochs`` can be constructed around + ``raw.annotations.onset``, but note that the durations of the annotations + are ignored in this case. %(events_epochs)s + + .. versionchanged:: 1.7 + Allow ``events=None`` to use ``raw.annotations.onset`` as the source of + epoch times. %(event_id)s %(epochs_tmin_tmax)s %(baseline_epochs)s @@ -3212,7 +3253,7 @@ class Epochs(BaseEpochs): def __init__( self, raw, - events, + events=None, event_id=None, tmin=-0.2, tmax=0.5, @@ -3240,6 +3281,7 @@ def __init__( "instance of mne.io.BaseRaw" ) info = deepcopy(raw.info) + annotations = raw.annotations.copy() # proj is on when applied in Raw proj = proj or raw.proj @@ -3249,6 +3291,12 @@ def __init__( # keep track of original sfreq (needed for annotations) raw_sfreq = raw.info["sfreq"] + # get events from annotations if no events given + if events is None: + events, event_id, annotations = _events_from_annotations( + raw, events, event_id, annotations, on_missing + ) + # call BaseEpochs constructor super(Epochs, self).__init__( info, @@ -3273,7 +3321,7 @@ def __init__( event_repeated=event_repeated, verbose=verbose, raw_sfreq=raw_sfreq, - annotations=raw.annotations, + annotations=annotations, ) @verbose diff --git a/mne/tests/test_epochs.py b/mne/tests/test_epochs.py index 1c0ff6c027c..c68fc7ce6bd 100644 --- a/mne/tests/test_epochs.py +++ b/mne/tests/test_epochs.py @@ -992,6 +992,26 @@ def test_filter(tmp_path): assert_allclose(epochs.get_data(), data_filt, atol=1e-17) +def test_epochs_from_annotations(): + """Test epoch instantiation using annotations.""" + raw, events = _get_data()[:2] + with pytest.raises( + RuntimeError, match="No usable annotations found in the raw object" + ): + Epochs(raw) + raw.set_annotations( + mne.annotations_from_events( + events, raw.info["sfreq"], first_samp=raw.first_samp + ) + ) + # test on_missing + with pytest.raises(ValueError, match="No matching annotations"): + Epochs(raw, event_id="foo") + # test on_missing warn + with pytest.warns(match="No matching annotations"): + Epochs(raw, event_id=["1", "foo"], on_missing="warn") + + def test_epochs_hash(): """Test epoch hashing.""" raw, events = _get_data()[:2] diff --git a/mne/utils/docs.py b/mne/utils/docs.py index 6d26d01dc40..1fa26fa16dd 100644 --- a/mne/utils/docs.py +++ b/mne/utils/docs.py @@ -1107,12 +1107,14 @@ def _reflow_param_docstring(docstring, has_first_line=True, width=75): """ docdict["event_id"] = """ -event_id : int | list of int | dict | None +event_id : int | list of int | dict | str | list of str | None The id of the :term:`events` to consider. If dict, the keys can later be used to access associated :term:`events`. Example: dict(auditory=1, visual=3). If int, a dict will be created with the id as - string. If a list, all :term:`events` with the IDs specified in the list - are used. If None, all :term:`events` will be used and a dict is created + string. If a list of int, all :term:`events` with the IDs specified in the list + are used. If a str or list of str, ``events`` must be ``None`` to use annotations + and then the IDs must be the name(s) of the annotations to use. + If None, all :term:`events` will be used and a dict is created with string integer names corresponding to the event id integers.""" docdict["event_id_ecg"] = """ diff --git a/tools/setup_xvfb.sh b/tools/setup_xvfb.sh index a5c55d0819b..d22f8e2b7ac 100755 --- a/tools/setup_xvfb.sh +++ b/tools/setup_xvfb.sh @@ -11,5 +11,5 @@ done # This also includes the libraries necessary for PyQt5/PyQt6 sudo apt update -sudo apt install -yqq xvfb libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 libegl1 libosmesa6 mesa-utils libxcb-shape0 libxcb-cursor0 +sudo apt install -yqq xvfb libxkbcommon-x11-0 libxcb-icccm4 libxcb-image0 libxcb-keysyms1 libxcb-randr0 libxcb-render-util0 libxcb-xinerama0 libxcb-xfixes0 libopengl0 libegl1 libosmesa6 mesa-utils libxcb-shape0 libxcb-cursor0 libxml2 /sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_99.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :99 -screen 0 1400x900x24 -ac +extension GLX +render -noreset diff --git a/tutorials/clinical/20_seeg.py b/tutorials/clinical/20_seeg.py index cce5f4a089a..dac5739110d 100644 --- a/tutorials/clinical/20_seeg.py +++ b/tutorials/clinical/20_seeg.py @@ -58,8 +58,7 @@ raw = mne.io.read_raw(misc_path / "seeg" / "sample_seeg_ieeg.fif") -events, event_id = mne.events_from_annotations(raw) -epochs = mne.Epochs(raw, events, event_id, detrend=1, baseline=None) +epochs = mne.Epochs(raw, detrend=1, baseline=None) epochs = epochs["Response"][0] # just process one epoch of data for speed # %% diff --git a/tutorials/clinical/30_ecog.py b/tutorials/clinical/30_ecog.py index 2ccc2d6cb91..d568d3b1bb4 100644 --- a/tutorials/clinical/30_ecog.py +++ b/tutorials/clinical/30_ecog.py @@ -100,15 +100,11 @@ # at the posterior commissure) raw.set_montage(montage) -# Find the annotated events -events, event_id = mne.events_from_annotations(raw) - # Make a 25 second epoch that spans before and after the seizure onset epoch_length = 25 # seconds epochs = mne.Epochs( raw, - events, - event_id=event_id["onset"], + event_id="onset", tmin=13, tmax=13 + epoch_length, baseline=None, diff --git a/tutorials/time-freq/50_ssvep.py b/tutorials/time-freq/50_ssvep.py index 323e8a4fe54..39113f08132 100644 --- a/tutorials/time-freq/50_ssvep.py +++ b/tutorials/time-freq/50_ssvep.py @@ -84,14 +84,12 @@ raw.filter(l_freq=0.1, h_freq=None, fir_design="firwin", verbose=False) # Construct epochs -event_id = {"12hz": 255, "15hz": 155} -events, _ = mne.events_from_annotations(raw, verbose=False) +raw.annotations.rename({"Stimulus/S255": "12hz", "Stimulus/S155": "15hz"}) tmin, tmax = -1.0, 20.0 # in s baseline = None epochs = mne.Epochs( raw, - events=events, - event_id=[event_id["12hz"], event_id["15hz"]], + event_id=["12hz", "15hz"], tmin=tmin, tmax=tmax, baseline=baseline, @@ -356,8 +354,8 @@ def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1): # Get indices for the different trial types # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -i_trial_12hz = np.where(epochs.events[:, 2] == event_id["12hz"])[0] -i_trial_15hz = np.where(epochs.events[:, 2] == event_id["15hz"])[0] +i_trial_12hz = np.where(epochs.annotations.description == "12hz")[0] +i_trial_15hz = np.where(epochs.annotations.description == "15hz")[0] # %% # Get indices of EEG channels forming the ROI @@ -604,7 +602,7 @@ def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1): window_snrs = [[]] * len(window_lengths) for i_win, win in enumerate(window_lengths): # compute spectrogram - this_spectrum = epochs[str(event_id["12hz"])].compute_psd( + this_spectrum = epochs["12hz"].compute_psd( "welch", n_fft=int(sfreq * win), n_overlap=0, @@ -688,7 +686,7 @@ def snr_spectrum(psd, noise_n_neighbor_freqs=1, noise_skip_neighbor_freqs=1): for i_win, win in enumerate(window_starts): # compute spectrogram - this_spectrum = epochs[str(event_id["12hz"])].compute_psd( + this_spectrum = epochs["12hz"].compute_psd( "welch", n_fft=int(sfreq * window_length) - 1, n_overlap=0, From ff03c6ca17f7d12f72ee1f488264acec6a8db06e Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 2 Jan 2024 14:17:33 -0500 Subject: [PATCH 36/37] BUG: Fix bug with epochs image (#12330) --- .github/workflows/tests.yml | 12 ++++++------ mne/conftest.py | 2 +- mne/viz/epochs.py | 5 ++--- pyproject.toml | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 419595c8354..85f537930a5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -89,14 +89,14 @@ jobs: python-version: ${{ matrix.python }} if: startswith(matrix.kind, 'pip') # Python (if conda) - - uses: conda-incubator/setup-miniconda@v3 + - uses: mamba-org/setup-micromamba@v1 with: - python-version: ${{ env.PYTHON_VERSION }} environment-file: ${{ env.CONDA_ENV }} - activate-environment: mne - miniforge-version: latest - miniforge-variant: Mambaforge - use-mamba: ${{ matrix.kind != 'conda' }} + environment-name: mne + create-args: >- + python=${{ env.PYTHON_VERSION }} + mamba + fmt!=10.2.0 if: ${{ !startswith(matrix.kind, 'pip') }} - run: ./tools/github_actions_dependencies.sh # Minimal commands on Linux (macOS stalls) diff --git a/mne/conftest.py b/mne/conftest.py index ba2bfd51dfa..b0882346586 100644 --- a/mne/conftest.py +++ b/mne/conftest.py @@ -788,7 +788,7 @@ def src_volume_labels(): """Create a 7mm source space with labels.""" pytest.importorskip("nibabel") volume_labels = mne.get_volume_labels_from_aseg(fname_aseg) - with pytest.warns(RuntimeWarning, match="Found no usable.*Left-vessel.*"): + with pytest.warns(RuntimeWarning, match="Found no usable.*t-vessel.*"): src = mne.setup_volume_source_space( "sample", 7.0, diff --git a/mne/viz/epochs.py b/mne/viz/epochs.py index c830570d457..e3ae7b28e6e 100644 --- a/mne/viz/epochs.py +++ b/mne/viz/epochs.py @@ -654,10 +654,9 @@ def _plot_epochs_image( # draw the colorbar if colorbar: - from matplotlib.pyplot import colorbar as cbar - if "colorbar" in ax: # axes supplied by user - this_colorbar = cbar(im, cax=ax["colorbar"]) + cax = ax["colorbar"] + this_colorbar = cax.figure.colorbar(im, cax=cax) this_colorbar.ax.set_ylabel(unit, rotation=270, labelpad=12) else: # we created them this_colorbar = fig.colorbar(im, ax=ax_im) diff --git a/pyproject.toml b/pyproject.toml index db21c0a1012..0b90b4a4e69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -109,7 +109,7 @@ full = [ # Dependencies for running the test infrastructure test = [ - "pytest", + "pytest!=8.0.0rc1", "pytest-cov", "pytest-timeout", "pytest-harvest", From 596122d1f39a962e8299c63020885e207f127c87 Mon Sep 17 00:00:00 2001 From: Clemens Brunner Date: Thu, 4 Jan 2024 16:31:19 +0100 Subject: [PATCH 37/37] Fix typo in contributing guide (#12335) --- doc/development/contributing.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development/contributing.rst b/doc/development/contributing.rst index 15ad9dad2db..6249251911f 100644 --- a/doc/development/contributing.rst +++ b/doc/development/contributing.rst @@ -621,7 +621,7 @@ contain: 1. A brief description of the change, typically in a single line of one or two sentences. 2. reST links to **public** API endpoints like functions (``:func:``), - classes (``:class``), and methods (``:meth:``). If changes are only internal + classes (``:class:``), and methods (``:meth:``). If changes are only internal to private functions/attributes, mention internal refactoring rather than name the private attributes changed. 3. Author credit. If you are a new contributor (we're very happy to have you here! 🤗),