Skip to content

Commit

Permalink
Merge branch 'main' into gram
Browse files Browse the repository at this point in the history
  • Loading branch information
fzimmermann89 authored Nov 12, 2024
2 parents 3e763e2 + 202d395 commit 1e2a8ae
Show file tree
Hide file tree
Showing 48 changed files with 1,142 additions and 578 deletions.
10 changes: 9 additions & 1 deletion .github/workflows/deployment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,15 @@ jobs:
run: |
VERSION=${{ needs.build-testpypi-package.outputs.version }}
SUFFIX=${{ needs.build-testpypi-package.outputs.suffix }}
python -m pip install mrpro==$VERSION$SUFFIX --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/
for i in {1..3}; do
if python -m pip install mrpro==$VERSION$SUFFIX --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/; then
echo "Package installed successfully."
break
else
echo "Attempt $i failed. Retrying in 10 seconds..."
sleep 10
fi
done
build-pypi-package:
name: Build Package for PyPI
Expand Down
17 changes: 8 additions & 9 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ repos:
rev: v5.0.0
hooks:
- id: check-added-large-files
- id: check-docstring-first
- id: check-merge-conflict
- id: check-yaml
- id: check-toml
Expand Down Expand Up @@ -54,11 +53,11 @@ repos:
- "--extra-index-url=https://pypi.python.org/simple"

ci:
autofix_commit_msg: |
[pre-commit] auto fixes from pre-commit hooks
autofix_prs: false
autoupdate_branch: ''
autoupdate_commit_msg: '[pre-commit] pre-commit autoupdate'
autoupdate_schedule: monthly
skip: [mypy]
submodules: false
autofix_commit_msg: |
[pre-commit] auto fixes from pre-commit hooks
autofix_prs: false
autoupdate_branch: ""
autoupdate_commit_msg: "[pre-commit] pre-commit autoupdate"
autoupdate_schedule: monthly
skip: [mypy]
submodules: false
8 changes: 4 additions & 4 deletions examples/direct_reconstruction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@
"\n",
"import requests\n",
"\n",
"with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n",
" response = requests.get(zenodo_url + fname, timeout=30)\n",
" data_file.write(response.content)\n",
" data_file.flush()"
"data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n",
"response = requests.get(zenodo_url + fname, timeout=30)\n",
"data_file.write(response.content)\n",
"data_file.flush()"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions examples/direct_reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

import requests

with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()
data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()

# %% [markdown]
# ### Image reconstruction
Expand Down
8 changes: 4 additions & 4 deletions examples/iterative_sense_reconstruction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@
"\n",
"import requests\n",
"\n",
"with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n",
" response = requests.get(zenodo_url + fname, timeout=30)\n",
" data_file.write(response.content)\n",
" data_file.flush()"
"data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n",
"response = requests.get(zenodo_url + fname, timeout=30)\n",
"data_file.write(response.content)\n",
"data_file.flush()"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions examples/iterative_sense_reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

import requests

with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()
data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()

# %% [markdown]
# ### Image reconstruction
Expand Down
22 changes: 10 additions & 12 deletions examples/pulseq_2d_radial_golden_angle.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,13 @@
"cell_type": "code",
"execution_count": null,
"id": "d16f41f1",
"metadata": {
"lines_to_next_cell": 2
},
"metadata": {},
"outputs": [],
"source": [
"# define zenodo records URL and create a temporary directory and h5-file\n",
"zenodo_url = 'https://zenodo.org/records/10854057/files/'\n",
"fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'"
"fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'\n",
"data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')"
]
},
{
Expand All @@ -51,10 +50,9 @@
"outputs": [],
"source": [
"# Download raw data using requests\n",
"with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n",
" response = requests.get(zenodo_url + fname, timeout=30)\n",
" data_file.write(response.content)\n",
" data_file.flush()"
"response = requests.get(zenodo_url + fname, timeout=30)\n",
"data_file.write(response.content)\n",
"data_file.flush()"
]
},
{
Expand Down Expand Up @@ -127,10 +125,10 @@
"# download the sequence file from zenodo\n",
"zenodo_url = 'https://zenodo.org/records/10868061/files/'\n",
"seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq'\n",
"with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') as seq_file:\n",
" response = requests.get(zenodo_url + seq_fname, timeout=30)\n",
" seq_file.write(response.content)\n",
" seq_file.flush()"
"seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq')\n",
"response = requests.get(zenodo_url + seq_fname, timeout=30)\n",
"seq_file.write(response.content)\n",
"seq_file.flush()"
]
},
{
Expand Down
17 changes: 8 additions & 9 deletions examples/pulseq_2d_radial_golden_angle.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,13 @@
# define zenodo records URL and create a temporary directory and h5-file
zenodo_url = 'https://zenodo.org/records/10854057/files/'
fname = 'pulseq_radial_2D_402spokes_golden_angle_with_traj.h5'

data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')

# %%
# Download raw data using requests
with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()

# %% [markdown]
# ### Image reconstruction using KTrajectoryIsmrmrd
Expand Down Expand Up @@ -63,10 +62,10 @@
# download the sequence file from zenodo
zenodo_url = 'https://zenodo.org/records/10868061/files/'
seq_fname = 'pulseq_radial_2D_402spokes_golden_angle.seq'
with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq') as seq_file:
response = requests.get(zenodo_url + seq_fname, timeout=30)
seq_file.write(response.content)
seq_file.flush()
seq_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.seq')
response = requests.get(zenodo_url + seq_fname, timeout=30)
seq_file.write(response.content)
seq_file.flush()

# %%
# Read raw data and calculate trajectory using KTrajectoryPulseq
Expand Down
8 changes: 4 additions & 4 deletions examples/regularized_iterative_sense_reconstruction.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -37,10 +37,10 @@
"\n",
"import requests\n",
"\n",
"with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:\n",
" response = requests.get(zenodo_url + fname, timeout=30)\n",
" data_file.write(response.content)\n",
" data_file.flush()"
"data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')\n",
"response = requests.get(zenodo_url + fname, timeout=30)\n",
"data_file.write(response.content)\n",
"data_file.flush()"
]
},
{
Expand Down
8 changes: 4 additions & 4 deletions examples/regularized_iterative_sense_reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@

import requests

with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5') as data_file:
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()
data_file = tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.h5')
response = requests.get(zenodo_url + fname, timeout=30)
data_file.write(response.content)
data_file.flush()

# %% [markdown]
# ### Image reconstruction
Expand Down
1 change: 1 addition & 0 deletions examples/ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,5 @@ lint.extend-ignore = [
"T20", #print
"E402", #module-import-not-at-top-of-file
"S101", #assert
"SIM115", #context manager for opening files
]
2 changes: 1 addition & 1 deletion src/mrpro/VERSION
Original file line number Diff line number Diff line change
@@ -1 +1 @@
0.241029
0.241112
55 changes: 24 additions & 31 deletions src/mrpro/data/AcqInfo.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,29 @@
"""Acquisition information dataclass."""

from collections.abc import Callable, Sequence
from collections.abc import Sequence
from dataclasses import dataclass

import ismrmrd
import numpy as np
import torch
from typing_extensions import Self, TypeVar
from einops import rearrange
from typing_extensions import Self

from mrpro.data.MoveDataMixin import MoveDataMixin
from mrpro.data.Rotation import Rotation
from mrpro.data.SpatialDimension import SpatialDimension
from mrpro.utils.unit_conversion import mm_to_m

# Conversion functions for units
T = TypeVar('T', float, torch.Tensor)

def rearrange_acq_info_fields(field: object, pattern: str, **axes_lengths: dict[str, int]) -> object:
"""Change the shape of the fields in AcqInfo."""
if isinstance(field, Rotation):
return Rotation.from_matrix(rearrange(field.as_matrix(), pattern, **axes_lengths))

def ms_to_s(ms: T) -> T:
"""Convert ms to s."""
return ms / 1000
if isinstance(field, torch.Tensor):
return rearrange(field, pattern, **axes_lengths)


def mm_to_m(m: T) -> T:
"""Convert mm to m."""
return m / 1000
return field


@dataclass(slots=True)
Expand Down Expand Up @@ -121,30 +122,24 @@ class AcqInfo(MoveDataMixin):
number_of_samples: torch.Tensor
"""Number of sample points per readout (readouts may have different number of sample points)."""

orientation: Rotation
"""Rotation describing the orientation of the readout, phase and slice encoding direction."""

patient_table_position: SpatialDimension[torch.Tensor]
"""Offset position of the patient table, in LPS coordinates [m]."""

phase_dir: SpatialDimension[torch.Tensor]
"""Directional cosine of phase encoding (2D)."""

physiology_time_stamp: torch.Tensor
"""Time stamps relative to physiological triggering, e.g. ECG. Not in s but in vendor-specific time units"""

position: SpatialDimension[torch.Tensor]
"""Center of the excited volume, in LPS coordinates relative to isocenter [m]."""

read_dir: SpatialDimension[torch.Tensor]
"""Directional cosine of readout/frequency encoding."""

sample_time_us: torch.Tensor
"""Readout bandwidth, as time between samples [us]."""

scan_counter: torch.Tensor
"""Zero-indexed incrementing counter for readouts."""

slice_dir: SpatialDimension[torch.Tensor]
"""Directional cosine of slice normal, i.e. cross-product of read_dir and phase_dir."""

trajectory_dimensions: torch.Tensor # =3. We only support 3D Trajectories: kz always exists.
"""Dimensionality of the k-space trajectory vector."""

Expand Down Expand Up @@ -206,17 +201,13 @@ def tensor_2d(data: np.ndarray) -> torch.Tensor:
data_tensor = data_tensor[None, None]
return data_tensor

def spatialdimension_2d(
data: np.ndarray, conversion: Callable[[torch.Tensor], torch.Tensor] | None = None
) -> SpatialDimension[torch.Tensor]:
def spatialdimension_2d(data: np.ndarray) -> SpatialDimension[torch.Tensor]:
# Ensure spatial dimension is (k1*k2*other, 1, 3)
if data.ndim != 2:
raise ValueError('Spatial dimension is expected to be of shape (N,3)')
data = data[:, None, :]
# all spatial dimensions are float32
return (
SpatialDimension[torch.Tensor].from_array_xyz(torch.tensor(data.astype(np.float32))).apply_(conversion)
)
return SpatialDimension[torch.Tensor].from_array_xyz(torch.tensor(data.astype(np.float32)))

acq_idx = AcqIdx(
k1=tensor(idx['kspace_encode_step_1']),
Expand Down Expand Up @@ -251,14 +242,16 @@ def spatialdimension_2d(
flags=tensor_2d(headers['flags']),
measurement_uid=tensor_2d(headers['measurement_uid']),
number_of_samples=tensor_2d(headers['number_of_samples']),
patient_table_position=spatialdimension_2d(headers['patient_table_position'], mm_to_m),
phase_dir=spatialdimension_2d(headers['phase_dir']),
orientation=Rotation.from_directions(
spatialdimension_2d(headers['slice_dir']),
spatialdimension_2d(headers['phase_dir']),
spatialdimension_2d(headers['read_dir']),
),
patient_table_position=spatialdimension_2d(headers['patient_table_position']).apply_(mm_to_m),
physiology_time_stamp=tensor_2d(headers['physiology_time_stamp']),
position=spatialdimension_2d(headers['position'], mm_to_m),
read_dir=spatialdimension_2d(headers['read_dir']),
position=spatialdimension_2d(headers['position']).apply_(mm_to_m),
sample_time_us=tensor_2d(headers['sample_time_us']),
scan_counter=tensor_2d(headers['scan_counter']),
slice_dir=spatialdimension_2d(headers['slice_dir']),
trajectory_dimensions=tensor_2d(headers['trajectory_dimensions']).fill_(3), # see above
user_float=tensor_2d(headers['user_float']),
user_int=tensor_2d(headers['user_int']),
Expand Down
Loading

0 comments on commit 1e2a8ae

Please sign in to comment.