diff --git a/.github/workflows/deployment.yml b/.github/workflows/deployment.yml index dd900496e..a55b7860d 100644 --- a/.github/workflows/deployment.yml +++ b/.github/workflows/deployment.yml @@ -94,7 +94,15 @@ jobs: run: | VERSION=${{ needs.build-testpypi-package.outputs.version }} SUFFIX=${{ needs.build-testpypi-package.outputs.suffix }} - python -m pip install mrpro==$VERSION$SUFFIX --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ + for i in {1..3}; do + if python -m pip install mrpro==$VERSION$SUFFIX --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/; then + echo "Package installed successfully." + break + else + echo "Attempt $i failed. Retrying in 10 seconds..." + sleep 10 + fi + done build-pypi-package: name: Build Package for PyPI diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 95d14317a..303fd43fa 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -6,7 +6,6 @@ repos: rev: v5.0.0 hooks: - id: check-added-large-files - - id: check-docstring-first - id: check-merge-conflict - id: check-yaml - id: check-toml @@ -54,11 +53,11 @@ repos: - "--extra-index-url=https://pypi.python.org/simple" ci: - autofix_commit_msg: | - [pre-commit] auto fixes from pre-commit hooks - autofix_prs: false - autoupdate_branch: '' - autoupdate_commit_msg: '[pre-commit] pre-commit autoupdate' - autoupdate_schedule: monthly - skip: [mypy] - submodules: false + autofix_commit_msg: | + [pre-commit] auto fixes from pre-commit hooks + autofix_prs: false + autoupdate_branch: "" + autoupdate_commit_msg: "[pre-commit] pre-commit autoupdate" + autoupdate_schedule: monthly + skip: [mypy] + submodules: false diff --git a/examples/direct_reconstruction.ipynb b/examples/direct_reconstruction.ipynb index 1e4e74c9c..3b6dc930e 100644 --- a/examples/direct_reconstruction.ipynb +++ b/examples/direct_reconstruction.ipynb @@ -37,10 +37,10 @@ "\n", "import requests\n", "\n", - "with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n", - " response = requests.get(zenodo_url + fname, timeout=30)\n", - " data_file.write(response.content)\n", - " data_file.flush()" + "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", + "response = requests.get(zenodo_url + fname, timeout=30)\n", + "data_file.write(response.content)\n", + "data_file.flush()" ] }, { diff --git a/examples/direct_reconstruction.py b/examples/direct_reconstruction.py index 5d55812c9..7672aa7e7 100644 --- a/examples/direct_reconstruction.py +++ b/examples/direct_reconstruction.py @@ -11,10 +11,10 @@ import requests -with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file: - response = requests.get(zenodo_url + fname, timeout=30) - data_file.write(response.content) - data_file.flush() +data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') +response = requests.get(zenodo_url + fname, timeout=30) +data_file.write(response.content) +data_file.flush() # %% [markdown] # ### Image reconstruction diff --git a/examples/iterative_sense_reconstruction.ipynb b/examples/iterative_sense_reconstruction.ipynb index f612d7522..87249b2fb 100644 --- a/examples/iterative_sense_reconstruction.ipynb +++ b/examples/iterative_sense_reconstruction.ipynb @@ -37,10 +37,10 @@ "\n", "import requests\n", "\n", - "with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n", - " response = requests.get(zenodo_url + fname, timeout=30)\n", - " data_file.write(response.content)\n", - " data_file.flush()" + "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", + "response = requests.get(zenodo_url + fname, timeout=30)\n", + "data_file.write(response.content)\n", + "data_file.flush()" ] }, { diff --git a/examples/iterative_sense_reconstruction.py b/examples/iterative_sense_reconstruction.py index ba5e6a01a..6d0bc49a5 100644 --- a/examples/iterative_sense_reconstruction.py +++ b/examples/iterative_sense_reconstruction.py @@ -11,10 +11,10 @@ import requests -with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file: - response = requests.get(zenodo_url + fname, timeout=30) - data_file.write(response.content) - data_file.flush() +data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') +response = requests.get(zenodo_url + fname, timeout=30) +data_file.write(response.content) +data_file.flush() # %% [markdown] # ### Image reconstruction diff --git a/examples/pulseq_2d_radial_golden_angle.ipynb b/examples/pulseq_2d_radial_golden_angle.ipynb index bcb4482a1..52e0310bb 100644 --- a/examples/pulseq_2d_radial_golden_angle.ipynb +++ b/examples/pulseq_2d_radial_golden_angle.ipynb @@ -33,14 +33,13 @@ "cell_type": "code", "execution_count": null, "id": "d16f41f1", - "metadata": { - "lines_to_next_cell": 2 - }, + "metadata": {}, "outputs": [], "source": [ "# define zenodo records URL and create a temporary directory and h5-file\n", "zenodo_url = 'https://zenodo.org/records/10854057/files/'\n", - "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'" + "fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'\n", + "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')" ] }, { @@ -51,10 +50,9 @@ "outputs": [], "source": [ "# Download raw data using requests\n", - "with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n", - " response = requests.get(zenodo_url + fname, timeout=30)\n", - " data_file.write(response.content)\n", - " data_file.flush()" + "response = requests.get(zenodo_url + fname, timeout=30)\n", + "data_file.write(response.content)\n", + "data_file.flush()" ] }, { @@ -127,10 +125,10 @@ "# download the sequence file from zenodo\n", "zenodo_url = 'https://zenodo.org/records/10868061/files/'\n", "seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq'\n", - "with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') as seq_file:\n", - " response = requests.get(zenodo_url + seq_fname, timeout=30)\n", - " seq_file.write(response.content)\n", - " seq_file.flush()" + "seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq')\n", + "response = requests.get(zenodo_url + seq_fname, timeout=30)\n", + "seq_file.write(response.content)\n", + "seq_file.flush()" ] }, { diff --git a/examples/pulseq_2d_radial_golden_angle.py b/examples/pulseq_2d_radial_golden_angle.py index f4db5217a..3f857c382 100644 --- a/examples/pulseq_2d_radial_golden_angle.py +++ b/examples/pulseq_2d_radial_golden_angle.py @@ -19,14 +19,13 @@ # define zenodo records URL and create a temporary directory and h5-file zenodo_url = 'https://zenodo.org/records/10854057/files/' fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5' - +data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') # %% # Download raw data using requests -with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file: - response = requests.get(zenodo_url + fname, timeout=30) - data_file.write(response.content) - data_file.flush() +response = requests.get(zenodo_url + fname, timeout=30) +data_file.write(response.content) +data_file.flush() # %% [markdown] # ### Image reconstruction using KTrajectoryIsmrmrd @@ -63,10 +62,10 @@ # download the sequence file from zenodo zenodo_url = 'https://zenodo.org/records/10868061/files/' seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq' -with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') as seq_file: - response = requests.get(zenodo_url + seq_fname, timeout=30) - seq_file.write(response.content) - seq_file.flush() +seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') +response = requests.get(zenodo_url + seq_fname, timeout=30) +seq_file.write(response.content) +seq_file.flush() # %% # Read raw data and calculate trajectory using KTrajectoryPulseq diff --git a/examples/regularized_iterative_sense_reconstruction.ipynb b/examples/regularized_iterative_sense_reconstruction.ipynb index 0a6743161..6b1c2704b 100644 --- a/examples/regularized_iterative_sense_reconstruction.ipynb +++ b/examples/regularized_iterative_sense_reconstruction.ipynb @@ -37,10 +37,10 @@ "\n", "import requests\n", "\n", - "with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n", - " response = requests.get(zenodo_url + fname, timeout=30)\n", - " data_file.write(response.content)\n", - " data_file.flush()" + "data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n", + "response = requests.get(zenodo_url + fname, timeout=30)\n", + "data_file.write(response.content)\n", + "data_file.flush()" ] }, { diff --git a/examples/regularized_iterative_sense_reconstruction.py b/examples/regularized_iterative_sense_reconstruction.py index 2ab7ba033..e41dc4ac5 100644 --- a/examples/regularized_iterative_sense_reconstruction.py +++ b/examples/regularized_iterative_sense_reconstruction.py @@ -11,10 +11,10 @@ import requests -with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file: - response = requests.get(zenodo_url + fname, timeout=30) - data_file.write(response.content) - data_file.flush() +data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') +response = requests.get(zenodo_url + fname, timeout=30) +data_file.write(response.content) +data_file.flush() # %% [markdown] # ### Image reconstruction diff --git a/examples/ruff.toml b/examples/ruff.toml index 11a1e6167..1bb114755 100644 --- a/examples/ruff.toml +++ b/examples/ruff.toml @@ -5,4 +5,5 @@ lint.extend-ignore = [ "T20", #print "E402", #module-import-not-at-top-of-file "S101", #assert + "SIM115", #context manager for opening files ] diff --git a/src/mrpro/VERSION b/src/mrpro/VERSION index 0f6ae6fb6..c60039027 100644 --- a/src/mrpro/VERSION +++ b/src/mrpro/VERSION @@ -1 +1 @@ -0.241029 +0.241112 diff --git a/src/mrpro/data/MoveDataMixin.py b/src/mrpro/data/MoveDataMixin.py index ce0a9a0aa..8d977d0a6 100644 --- a/src/mrpro/data/MoveDataMixin.py +++ b/src/mrpro/data/MoveDataMixin.py @@ -252,11 +252,9 @@ def apply_( ---------- function The function to apply to all fields. None is interpreted as a no-op. - memo - A dictionary to keep track of objects that the function has already been applied to, + A dictionary to keep track of objects that the function has already been applied to, to avoid multiple applications. This is useful if the object has a circular reference. - recurse If True, the function will be applied to all children that are MoveDataMixin instances. """ diff --git a/src/mrpro/data/_kdata/KDataSelectMixin.py b/src/mrpro/data/_kdata/KDataSelectMixin.py index 4a7d1504d..8f8a452cf 100644 --- a/src/mrpro/data/_kdata/KDataSelectMixin.py +++ b/src/mrpro/data/_kdata/KDataSelectMixin.py @@ -7,6 +7,7 @@ from typing_extensions import Self from mrpro.data._kdata.KDataProtocol import _KDataProtocol +from mrpro.data.Rotation import Rotation class KDataSelectMixin(_KDataProtocol): @@ -50,7 +51,9 @@ def select_other_subset( other_idx = torch.cat([torch.where(idx == label_idx[:, 0, 0])[0] for idx in subset_idx], dim=0) # Adapt header - kheader.acq_info.apply_(lambda field: field[other_idx, ...] if isinstance(field, torch.Tensor) else field) + kheader.acq_info.apply_( + lambda field: field[other_idx, ...] if isinstance(field, torch.Tensor | Rotation) else field + ) # Select data kdat = self.data[other_idx, ...] diff --git a/src/mrpro/data/_kdata/KDataSplitMixin.py b/src/mrpro/data/_kdata/KDataSplitMixin.py index 111876aa9..bb6bc0ce5 100644 --- a/src/mrpro/data/_kdata/KDataSplitMixin.py +++ b/src/mrpro/data/_kdata/KDataSplitMixin.py @@ -1,6 +1,6 @@ """Mixin class to split KData into other subsets.""" -from typing import Literal, TypeVar +from typing import Literal, TypeVar, cast import torch from einops import rearrange, repeat @@ -10,10 +10,8 @@ from mrpro.data.AcqInfo import rearrange_acq_info_fields from mrpro.data.EncodingLimits import Limits from mrpro.data.Rotation import Rotation -from mrpro.data.SpatialDimension import SpatialDimension - -T = TypeVar('T', torch.Tensor, Rotation, SpatialDimension) +RotationOrTensor = TypeVar('RotationOrTensor', bound=torch.Tensor | Rotation) class KDataSplitMixin(_KDataProtocol): """Split KData into other subsets.""" @@ -59,8 +57,9 @@ def _split_k2_or_k1_into_other( def split_data_traj(dat_traj: torch.Tensor) -> torch.Tensor: return dat_traj[:, :, :, split_idx, :] - def split_acq_info(acq_info: T) -> T: - return acq_info[:, :, split_idx, ...] + def split_acq_info(acq_info: RotationOrTensor) -> RotationOrTensor: + # cast due to https://github.com/python/mypy/issues/10817 + return cast(RotationOrTensor, acq_info[:, :, split_idx, ...]) # Rearrange other_split and k1 dimension rearrange_pattern_data = 'other coils k2 other_split k1 k0->(other other_split) coils k2 k1 k0' @@ -72,8 +71,8 @@ def split_acq_info(acq_info: T) -> T: def split_data_traj(dat_traj: torch.Tensor) -> torch.Tensor: return dat_traj[:, :, split_idx, :, :] - def split_acq_info(acq_info: T) -> T: - return acq_info[:, split_idx, ...] + def split_acq_info(acq_info: RotationOrTensor) -> RotationOrTensor: + return cast(RotationOrTensor, acq_info[:, split_idx, ...]) # Rearrange other_split and k1 dimension rearrange_pattern_data = 'other coils other_split k2 k1 k0->(other other_split) coils k2 k1 k0' @@ -101,7 +100,7 @@ def split_acq_info(acq_info: T) -> T: # Update shape of acquisition info index kheader.acq_info.apply_( lambda field: rearrange_acq_info_fields(split_acq_info(field), rearrange_pattern_acq_info) - if isinstance(field, T.__constraints__) + if isinstance(field, Rotation | torch.Tensor) else field ) diff --git a/src/mrpro/utils/__init__.py b/src/mrpro/utils/__init__.py index 57634c015..80ef9d398 100644 --- a/src/mrpro/utils/__init__.py +++ b/src/mrpro/utils/__init__.py @@ -1,13 +1,16 @@ import mrpro.utils.slice_profiles import mrpro.utils.typing +import mrpro.utils.unit_conversion from mrpro.utils.smap import smap from mrpro.utils.remove_repeat import remove_repeat from mrpro.utils.zero_pad_or_crop import zero_pad_or_crop from mrpro.utils.split_idx import split_idx -from mrpro.utils.reshape import broadcast_right, unsqueeze_left, unsqueeze_right +from mrpro.utils.reshape import broadcast_right, unsqueeze_left, unsqueeze_right, reduce_view import mrpro.utils.unit_conversion + __all__ = [ "broadcast_right", + "reduce_view", "remove_repeat", "slice_profiles", "smap", diff --git a/src/mrpro/utils/reshape.py b/src/mrpro/utils/reshape.py index 39d12e51f..31d495afd 100644 --- a/src/mrpro/utils/reshape.py +++ b/src/mrpro/utils/reshape.py @@ -1,5 +1,7 @@ """Tensor reshaping utilities.""" +from collections.abc import Sequence + import torch @@ -67,3 +69,33 @@ def broadcast_right(*x: torch.Tensor) -> tuple[torch.Tensor, ...]: max_dim = max(el.ndim for el in x) unsqueezed = torch.broadcast_tensors(*(unsqueeze_right(el, max_dim - el.ndim) for el in x)) return unsqueezed + + +def reduce_view(x: torch.Tensor, dim: int | Sequence[int] | None = None) -> torch.Tensor: + """Reduce expanded dimensions in a view to singletons. + + Reduce either all or specific dimensions to a singleton if it + points to the same memory address. + This undoes expand. + + Parameters + ---------- + x + input tensor + dim + only reduce expanded dimensions in the specified dimensions. + If None, reduce all expanded dimensions. + """ + if dim is None: + dim_: Sequence[int] = range(x.ndim) + elif isinstance(dim, Sequence): + dim_ = [d % x.ndim for d in dim] + else: + dim_ = [dim % x.ndim] + + stride = x.stride() + newsize = [ + 1 if stride == 0 and d in dim_ else oldsize + for d, (oldsize, stride) in enumerate(zip(x.size(), stride, strict=True)) + ] + return torch.as_strided(x, newsize, stride) diff --git a/src/mrpro/utils/unit_conversion.py b/src/mrpro/utils/unit_conversion.py index 274dc71c8..0115bed47 100644 --- a/src/mrpro/utils/unit_conversion.py +++ b/src/mrpro/utils/unit_conversion.py @@ -5,7 +5,20 @@ import numpy as np import torch -GYROMAGNETIC_MOMENT_PROTON = 42.58 * 1e6 +__all__ = [ + 'ms_to_s', + 's_to_ms', + 'mm_to_m', + 'm_to_mm', + 'deg_to_rad', + 'rad_to_deg', + 'lamor_frequency_to_magnetic_field', + 'magnetic_field_to_lamor_frequency', + 'GYROMAGNETIC_RATIO_PROTON', +] + +GYROMAGNETIC_RATIO_PROTON = 42.58 * 1e6 +r"""The gyromagnetic ratio :math:`\frac{\gamma}{2\pi}` of 1H in H20 in Hz/T""" # Conversion functions for units T = TypeVar('T', float, torch.Tensor) @@ -45,7 +58,7 @@ def rad_to_deg(deg: T) -> T: return deg * 180.0 / np.pi -def lamor_frequency_to_magnetic_field(lamor_frequency: T, gyromagnetic_ratio: float = GYROMAGNETIC_MOMENT_PROTON) -> T: +def lamor_frequency_to_magnetic_field(lamor_frequency: T, gyromagnetic_ratio: float = GYROMAGNETIC_RATIO_PROTON) -> T: """Convert the Lamor frequency [Hz] to the magntic field strength [T]. Parameters @@ -63,7 +76,7 @@ def lamor_frequency_to_magnetic_field(lamor_frequency: T, gyromagnetic_ratio: fl def magnetic_field_to_lamor_frequency( - magnetic_field_strength: T, gyromagnetic_ratio: float = GYROMAGNETIC_MOMENT_PROTON + magnetic_field_strength: T, gyromagnetic_ratio: float = GYROMAGNETIC_RATIO_PROTON ) -> T: """Convert the magntic field strength [T] to Lamor frequency [Hz]. diff --git a/tests/utils/test_reshape.py b/tests/utils/test_reshape.py index 60a0dc5e3..dd57b8feb 100644 --- a/tests/utils/test_reshape.py +++ b/tests/utils/test_reshape.py @@ -1,7 +1,9 @@ """Tests for reshaping utilities.""" import torch -from mrpro.utils import broadcast_right, unsqueeze_left, unsqueeze_right +from mrpro.utils import broadcast_right, reduce_view, unsqueeze_left, unsqueeze_right + +from tests import RandomGenerator def test_broadcast_right(): @@ -12,7 +14,7 @@ def test_broadcast_right(): def test_unsqueeze_left(): - """Test unsqueeze left""" + """Test unsqueeze_left""" tensor = torch.ones(1, 2, 3) unsqueezed = unsqueeze_left(tensor, 2) assert unsqueezed.shape == (1, 1, 1, 2, 3) @@ -20,8 +22,32 @@ def test_unsqueeze_left(): def test_unsqueeze_right(): - """Test unsqueeze right""" + """Test unsqueeze_right""" tensor = torch.ones(1, 2, 3) unsqueezed = unsqueeze_right(tensor, 2) assert unsqueezed.shape == (1, 2, 3, 1, 1) assert torch.equal(tensor.ravel(), unsqueezed.ravel()) + + +def test_reduce_view(): + """Test reduce_view""" + + tensor = RandomGenerator(0).float32_tensor((1, 2, 3, 1, 1, 1)) + tensor = tensor.expand(1, 2, 3, 4, 1, 1).contiguous() # this cannot be removed + tensor = tensor.expand(7, 2, 3, 4, 5, 6) + + reduced_all = reduce_view(tensor) + assert reduced_all.shape == (1, 2, 3, 4, 1, 1) + assert torch.equal(reduced_all.expand_as(tensor), tensor) + + reduced_two = reduce_view(tensor, (0, -1)) + assert reduced_two.shape == (1, 2, 3, 4, 5, 1) + assert torch.equal(reduced_two.expand_as(tensor), tensor) + + reduced_one_neg = reduce_view(tensor, -1) + assert reduced_one_neg.shape == (7, 2, 3, 4, 5, 1) + assert torch.equal(reduced_one_neg.expand_as(tensor), tensor) + + reduced_one_pos = reduce_view(tensor, 0) + assert reduced_one_pos.shape == (1, 2, 3, 4, 5, 6) + assert torch.equal(reduced_one_pos.expand_as(tensor), tensor)