From de79063464f9608d01b4c414cc0f2da6d0a78560 Mon Sep 17 00:00:00 2001 From: Felix F Zimmermann Date: Thu, 21 Nov 2024 12:56:17 +0100 Subject: [PATCH 1/4] Add apply (out of place) (#547) --- src/mrpro/data/MoveDataMixin.py | 18 +++++++++++++++++ src/mrpro/data/SpatialDimension.py | 13 ++++++++++++ tests/data/test_movedatamixin.py | 23 ++++++++++++++++++++- tests/data/test_spatial_dimension.py | 30 +++++++++++++++++++++++++++- 4 files changed, 82 insertions(+), 2 deletions(-) diff --git a/src/mrpro/data/MoveDataMixin.py b/src/mrpro/data/MoveDataMixin.py index 8d977d0a6..2ac3c1a58 100644 --- a/src/mrpro/data/MoveDataMixin.py +++ b/src/mrpro/data/MoveDataMixin.py @@ -239,6 +239,24 @@ def _convert(data: T) -> T: new.apply_(_convert, memo=memo, recurse=False) return new + def apply( + self: Self, + function: Callable[[Any], Any] | None = None, + *, + recurse: bool = True, + ) -> Self: + """Apply a function to all children. Returns a new object. + + Parameters + ---------- + function + The function to apply to all fields. None is interpreted as a no-op. + recurse + If True, the function will be applied to all children that are MoveDataMixin instances. + """ + new = self.clone().apply_(function, recurse=recurse) + return new + def apply_( self: Self, function: Callable[[Any], Any] | None = None, diff --git a/src/mrpro/data/SpatialDimension.py b/src/mrpro/data/SpatialDimension.py index 46b1db89a..12f94e8a6 100644 --- a/src/mrpro/data/SpatialDimension.py +++ b/src/mrpro/data/SpatialDimension.py @@ -18,6 +18,7 @@ VectorTypes = torch.Tensor ScalarTypes = int | float T = TypeVar('T', torch.Tensor, int, float) + # Covariant types, as SpatialDimension is a Container # and we want, for example, SpatialDimension[int] to also be a SpatialDimension[float] T_co = TypeVar('T_co', torch.Tensor, int, float, covariant=True) @@ -108,6 +109,7 @@ def from_array_zyx( return SpatialDimension(z, y, x) + # This function is mainly for type hinting and docstring def apply_(self, function: Callable[[T], T] | None = None, **_) -> Self: """Apply a function to each z, y, x (in-place). @@ -118,6 +120,17 @@ def apply_(self, function: Callable[[T], T] | None = None, **_) -> Self: """ return super(SpatialDimension, self).apply_(function) + # This function is mainly for type hinting and docstring + def apply(self, function: Callable[[T], T] | None = None, **_) -> Self: + """Apply a function to each z, y, x (returning a new object). + + Parameters + ---------- + function + function to apply + """ + return super(SpatialDimension, self).apply(function) + @property def zyx(self) -> tuple[T_co, T_co, T_co]: """Return a z,y,x tuple.""" diff --git a/tests/data/test_movedatamixin.py b/tests/data/test_movedatamixin.py index 3feb091de..c92e12b2a 100644 --- a/tests/data/test_movedatamixin.py +++ b/tests/data/test_movedatamixin.py @@ -207,7 +207,7 @@ def testchild(attribute, expected_dtype): assert new.module.module1.weight is new.module.module1.weight, 'shared module parameters should remain shared' -def test_movedatamixin_apply(): +def test_movedatamixin_apply_(): """Tests apply_ method of MoveDataMixin.""" data = B() # make one of the parameters shared to test memo behavior @@ -223,3 +223,24 @@ def multiply_by_2(obj): torch.testing.assert_close(data.floattensor, original.floattensor * 2) torch.testing.assert_close(data.child.floattensor2, original.child.floattensor2 * 2) assert data.child.floattensor is data.child.floattensor2, 'shared module parameters should remain shared' + + +def test_movedatamixin_apply(): + """Tests apply method of MoveDataMixin.""" + data = B() + # make one of the parameters shared to test memo behavior + data.child.floattensor2 = data.child.floattensor + original = data.clone() + + def multiply_by_2(obj): + if isinstance(obj, torch.Tensor): + return obj * 2 + return obj + + new = data.apply(multiply_by_2) + torch.testing.assert_close(data.floattensor, original.floattensor) + torch.testing.assert_close(data.child.floattensor2, original.child.floattensor2) + torch.testing.assert_close(new.floattensor, original.floattensor * 2) + torch.testing.assert_close(new.child.floattensor2, original.child.floattensor2 * 2) + assert data.child.floattensor is data.child.floattensor2, 'shared module parameters should remain shared' + assert new is not data, 'new object should be different from the original' diff --git a/tests/data/test_spatial_dimension.py b/tests/data/test_spatial_dimension.py index afafece04..61fd127df 100644 --- a/tests/data/test_spatial_dimension.py +++ b/tests/data/test_spatial_dimension.py @@ -93,7 +93,7 @@ def test_spatial_dimension_broadcasting(): def test_spatial_dimension_apply_(): - """Test apply_ (inplace)""" + """Test apply_ (in place)""" def conversion(x: torch.Tensor) -> torch.Tensor: assert isinstance(x, torch.Tensor), 'The argument to the conversion function should be a tensor' @@ -115,6 +115,34 @@ def conversion(x: torch.Tensor) -> torch.Tensor: assert torch.equal(spatial_dimension_inplace.z, z) +def test_spatial_dimension_apply(): + """Test apply (out of place)""" + + def conversion(x: torch.Tensor) -> torch.Tensor: + assert isinstance(x, torch.Tensor), 'The argument to the conversion function should be a tensor' + return x.swapaxes(0, 1).square() + + xyz = RandomGenerator(0).float32_tensor((1, 2, 3)) + spatial_dimension = SpatialDimension.from_array_xyz(xyz.numpy()) + spatial_dimension_outofplace = spatial_dimension.apply(conversion) + + assert spatial_dimension_outofplace is not spatial_dimension + + assert isinstance(spatial_dimension_outofplace.x, torch.Tensor) + assert isinstance(spatial_dimension_outofplace.y, torch.Tensor) + assert isinstance(spatial_dimension_outofplace.z, torch.Tensor) + + x, y, z = conversion(xyz).unbind(-1) + assert torch.equal(spatial_dimension_outofplace.x, x) + assert torch.equal(spatial_dimension_outofplace.y, y) + assert torch.equal(spatial_dimension_outofplace.z, z) + + x, y, z = xyz.unbind(-1) # original should be unmodified + assert torch.equal(spatial_dimension.x, x) + assert torch.equal(spatial_dimension.y, y) + assert torch.equal(spatial_dimension.z, z) + + def test_spatial_dimension_zyx(): """Test the zyx tuple property""" z, y, x = (2, 3, 4) From bf2859bf537833232015fe165cd4fc6fc52c8dc3 Mon Sep 17 00:00:00 2001 From: Felix F Zimmermann Date: Tue, 26 Nov 2024 16:39:37 +0100 Subject: [PATCH 2/4] Fix DCF and FourierOp edge cases (#553) Our FourierOp did not work for image_size < nufft_numpoint Our DCF voronoi assumed that the k dimension requiring vorinoi did not contain singleton dimensions. --- src/mrpro/data/DcfData.py | 2 +- src/mrpro/operators/FourierOp.py | 6 +++--- tests/data/test_dcf_data.py | 16 +++++++++++++++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/mrpro/data/DcfData.py b/src/mrpro/data/DcfData.py index 62726744d..3db7f6772 100644 --- a/src/mrpro/data/DcfData.py +++ b/src/mrpro/data/DcfData.py @@ -56,7 +56,7 @@ def from_traj_voronoi(cls, traj: KTrajectory) -> Self: if ks_needing_voronoi: # Handle full dimensions needing voronoi - dcfs.append(smap(dcf_2d3d_voronoi, torch.stack(list(ks_needing_voronoi), -4), 4)) + dcfs.append(smap(dcf_2d3d_voronoi, torch.stack(torch.broadcast_tensors(*ks_needing_voronoi), -4), 4)) if dcfs: # Multiply all dcfs together diff --git a/src/mrpro/operators/FourierOp.py b/src/mrpro/operators/FourierOp.py index a3e81aba7..2e2087f78 100644 --- a/src/mrpro/operators/FourierOp.py +++ b/src/mrpro/operators/FourierOp.py @@ -108,17 +108,17 @@ def get_traj(traj: KTrajectory, dims: Sequence[int]): # Broadcast shapes not always needed but also does not hurt omega = [k.expand(*np.broadcast_shapes(*[k.shape for k in omega])) for k in omega] self.register_buffer('_omega', torch.stack(omega, dim=-4)) # use the 'coil' dim for the direction - + numpoints = [min(img_size, nufft_numpoints) for img_size in self._nufft_im_size] self._fwd_nufft_op: KbNufftAdjoint | None = KbNufft( im_size=self._nufft_im_size, grid_size=grid_size, - numpoints=nufft_numpoints, + numpoints=numpoints, kbwidth=nufft_kbwidth, ) self._adj_nufft_op: KbNufftAdjoint | None = KbNufftAdjoint( im_size=self._nufft_im_size, grid_size=grid_size, - numpoints=nufft_numpoints, + numpoints=numpoints, kbwidth=nufft_kbwidth, ) else: diff --git a/tests/data/test_dcf_data.py b/tests/data/test_dcf_data.py index 72761739c..314ad2ba2 100644 --- a/tests/data/test_dcf_data.py +++ b/tests/data/test_dcf_data.py @@ -5,6 +5,8 @@ from einops import repeat from mrpro.data import DcfData, KTrajectory +from tests import RandomGenerator + def example_traj_rpe(n_kr, n_ka, n_k0, broadcast=True): """Create RPE trajectory with uniform angular gap.""" @@ -17,7 +19,7 @@ def example_traj_rpe(n_kr, n_ka, n_k0, broadcast=True): return trajectory -def example_traj_spiral_2d(n_kr, n_ki, n_ka, broadcast=True) -> KTrajectory: +def example_traj_spiral_2d(n_kr: int, n_ki: int, n_ka: int, broadcast: bool = True) -> KTrajectory: """Create 2D spiral trajectory with n_kr points along each spiral arm, n_ki turns per spiral arm and n_ka spiral arms.""" ang = repeat(torch.linspace(0, 2 * torch.pi * n_ki, n_kr), 'k0 -> other k2 k1 k0', other=1, k2=1, k1=1) @@ -82,3 +84,15 @@ def test_dcf_rpe_traj_voronoi_cuda(n_kr, n_ka, n_k0): trajectory = example_traj_rpe(n_kr, n_ka, n_k0) dcf = DcfData.from_traj_voronoi(trajectory.cuda()) assert dcf.data.is_cuda + + +def test_dcf_broadcast(): + """Test broadcasting within voronoi dcf calculation.""" + rng = RandomGenerator(0) + # kx and ky force voronoi calculation and need to be broadcasted + kx = rng.float32_tensor((1, 1, 4, 4)) + ky = rng.float32_tensor((1, 4, 1, 4)) + kz = torch.zeros(1, 1, 1, 1) + trajectory = KTrajectory(kz, ky, kx) + dcf = DcfData.from_traj_voronoi(trajectory) + assert dcf.data.shape == trajectory.broadcasted_shape From 41111461575f60b39c6b97fe92af24dbef782ed2 Mon Sep 17 00:00:00 2001 From: Felix F Zimmermann Date: Tue, 26 Nov 2024 16:43:30 +0100 Subject: [PATCH 3/4] Fix gram and adjoint of AdjointLinearOperator (#541) --- src/mrpro/operators/LinearOperator.py | 7 +------ tests/operators/test_operators.py | 6 ++++++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/mrpro/operators/LinearOperator.py b/src/mrpro/operators/LinearOperator.py index 029089f5d..74d3bafdd 100644 --- a/src/mrpro/operators/LinearOperator.py +++ b/src/mrpro/operators/LinearOperator.py @@ -415,9 +415,4 @@ def adjoint(self, x: torch.Tensor) -> tuple[torch.Tensor,]: @property def H(self) -> LinearOperator: # noqa: N802 """Adjoint of adjoint operator, i.e. original LinearOperator.""" - return self.operator - - @property - def gram(self) -> LinearOperator: - """Gram operator.""" - return self._operator.gram.H + return self._operator diff --git a/tests/operators/test_operators.py b/tests/operators/test_operators.py index 378060b74..7c1712cdd 100644 --- a/tests/operators/test_operators.py +++ b/tests/operators/test_operators.py @@ -337,6 +337,12 @@ def test_sum_operator_multiple_adjoint(): dotproduct_adjointness_test(linear_op_sum, u, v) +def test_adjoint_of_adjoint(): + """Test that the adjoint of the adjoint is the original operator""" + a = DummyLinearOperator(RandomGenerator(7).complex64_tensor((3, 10))) + assert a.H.H is a + + def test_gram_shortcuts(): """Test that .gram for composition and scalar multiplication results in shortcuts.""" From 82801e6528352e6ef9af6a4db3b04b740e6485da Mon Sep 17 00:00:00 2001 From: Felix F Zimmermann Date: Tue, 26 Nov 2024 17:02:23 +0100 Subject: [PATCH 4/4] Release v0.241126 (#561) --- src/mrpro/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mrpro/VERSION b/src/mrpro/VERSION index c60039027..1e8c1c10e 100644 --- a/src/mrpro/VERSION +++ b/src/mrpro/VERSION @@ -1 +1 @@ -0.241112 +0.241126