diff --git a/src/mrpro/operators/WaveletOp.py b/src/mrpro/operators/WaveletOp.py index df98d78b6..411ffdbc0 100644 --- a/src/mrpro/operators/WaveletOp.py +++ b/src/mrpro/operators/WaveletOp.py @@ -3,7 +3,6 @@ from collections.abc import Sequence from typing import Literal -import numpy as np import torch from ptwt.conv_transform import wavedec, waverec from ptwt.conv_transform_2 import wavedec2, waverec2 @@ -366,7 +365,7 @@ def _stacked_tensor_to_coeff(self, coefficients_stack: torch.Tensor) -> list[tor 3D: [aaa, aad_n, ada_n, add_n, ..., ..., aad_1, ada_1, add_1, ...] """ coefficients = torch.split( - coefficients_stack, [int(np.prod(shape)) for shape in self.coefficients_shape], dim=-1 + coefficients_stack, [int(torch.prod(torch.as_tensor(shape))) for shape in self.coefficients_shape], dim=-1 ) return [ torch.reshape(coeff, (*coeff.shape[:-1], *shape)) diff --git a/tests/__init__.py b/tests/__init__.py index 675fd5e26..c29efd34a 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,2 +1,10 @@ from ._RandomGenerator import RandomGenerator -from .helper import relative_image_difference, dotproduct_adjointness_test, operator_isometry_test, linear_operator_unitary_test, autodiff_test +from .helper import ( + relative_image_difference, + dotproduct_adjointness_test, + operator_isometry_test, + linear_operator_unitary_test, + autodiff_test, + gradient_of_linear_operator_test, + forward_mode_autodiff_of_linear_operator_test +) diff --git a/tests/helper.py b/tests/helper.py index 7e11826a7..852c608fe 100644 --- a/tests/helper.py +++ b/tests/helper.py @@ -170,3 +170,84 @@ def autodiff_test( with torch.autograd.detect_anomaly(): (_, vjpfunc) = torch.func.vjp(operator.forward, *u) vjpfunc(v_range) + + +def gradient_of_linear_operator_test( + operator: LinearOperator, + u: torch.Tensor, + v: torch.Tensor, + relative_tolerance: float = 1e-3, + absolute_tolerance=1e-5, +): + """Test the gradient of a linear operator is the adjoint. + Note: This property should hold for all u and v. + Commonly, this function is called with two random vectors u and v. + Parameters + ---------- + operator + linear operator + u + element of the domain of the operator + v + element of the range of the operator + relative_tolerance + default is pytorch's default for float16 + absolute_tolerance + default is pytorch's default for float16 + Raises + ------ + AssertionError + if the gradient is not the adjoint + """ + # Gradient of the forward via vjp + (_, vjpfunc) = torch.func.vjp(operator.forward, u) + assert torch.allclose(vjpfunc((v,))[0], operator.adjoint(v)[0], rtol=relative_tolerance, atol=absolute_tolerance) + + # Gradient of the adjoint via vjp + (_, vjpfunc) = torch.func.vjp(operator.adjoint, v) + assert torch.allclose(vjpfunc((u,))[0], operator.forward(u)[0], rtol=relative_tolerance, atol=absolute_tolerance) + + +def forward_mode_autodiff_of_linear_operator_test( + operator: LinearOperator, + u: torch.Tensor, + v: torch.Tensor, + relative_tolerance: float = 1e-3, + absolute_tolerance=1e-5, +): + """Test the forward-mode autodiff calculation. + Verifies that the Jacobian-vector product (jvp) is equivalent to applying the operator. + Note: This property should hold for all u and v. + Commonly, this function is called with two random vectors u and v. + Parameters + ---------- + operator + linear operator + u + element of the domain of the operator + v + element of the range of the operator + relative_tolerance + default is pytorch's default for float16 + absolute_tolerance + default is pytorch's default for float16 + Raises + ------ + AssertionError + if the jvp yields different results than applying the operator + """ + # jvp of the forward + assert torch.allclose( + torch.func.jvp(operator.forward, (u,), (u,))[0][0], + operator.forward(u)[0], + rtol=relative_tolerance, + atol=absolute_tolerance, + ) + + # jvp of the adjoint + assert torch.allclose( + torch.func.jvp(operator.adjoint, (v,), (v,))[0][0], + operator.adjoint(v)[0], + rtol=relative_tolerance, + atol=absolute_tolerance, + ) diff --git a/tests/operators/models/test_inversion_recovery.py b/tests/operators/models/test_inversion_recovery.py index b3d32a211..92b6341e7 100644 --- a/tests/operators/models/test_inversion_recovery.py +++ b/tests/operators/models/test_inversion_recovery.py @@ -47,3 +47,26 @@ def test_autodiff_inversion_recovery(): model = InversionRecovery(ti=10) m0, t1 = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=2) autodiff_test(model, m0, t1) + + +@pytest.mark.cuda +def test_inversion_recovery_cuda(): + """Test the inversion recovery model works on cuda devices.""" + m0, t1 = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=2) + + # Create on CPU, transfer to GPU and run on GPU + model = InversionRecovery(ti=10) + model.cuda() + (signal,) = model(m0.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = InversionRecovery(ti=torch.as_tensor(10).cuda()) + (signal,) = model(m0.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = InversionRecovery(ti=torch.as_tensor(10).cuda()) + model.cpu() + (signal,) = model(m0, t1) + assert signal.is_cpu diff --git a/tests/operators/models/test_molli.py b/tests/operators/models/test_molli.py index 82b5f6c04..b0cc6cff8 100644 --- a/tests/operators/models/test_molli.py +++ b/tests/operators/models/test_molli.py @@ -52,3 +52,26 @@ def test_autodiff_molli(): model = MOLLI(ti=10) a, b, t1 = create_parameter_tensor_tuples((2, 5, 10, 10, 10), number_of_tensors=3) autodiff_test(model, a, b, t1) + + +@pytest.mark.cuda +def test_molli_cuda(): + """Test the molli model works on cuda devices.""" + a, b, t1 = create_parameter_tensor_tuples((2, 5, 10, 10, 10), number_of_tensors=3) + + # Create on CPU, transfer to GPU and run on GPU + model = MOLLI(ti=10) + model.cuda() + (signal,) = model(a.cuda(), b.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = MOLLI(ti=torch.as_tensor(10).cuda()) + (signal,) = model(a.cuda(), b.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = MOLLI(ti=torch.as_tensor(10).cuda()) + model.cpu() + (signal,) = model(a, b, t1) + assert signal.is_cpu diff --git a/tests/operators/models/test_mono_exponential_decay.py b/tests/operators/models/test_mono_exponential_decay.py index 1aba27891..fa74538b1 100644 --- a/tests/operators/models/test_mono_exponential_decay.py +++ b/tests/operators/models/test_mono_exponential_decay.py @@ -44,8 +44,31 @@ def test_mono_exponential_decay_shape(parameter_shape, contrast_dim_shape, signa assert signal.shape == signal_shape -def test_autodiff_exponential_decay(): +def test_autodiff_mono_exponential_decay(): """Test autodiff works for mono-exponential decay model.""" model = MonoExponentialDecay(decay_time=20) m0, decay_constant = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=2) autodiff_test(model, m0, decay_constant) + + +@pytest.mark.cuda +def test_mono_exponential_deay_cuda(): + """Test the mono-exponential decay model works on cuda devices.""" + m0, decay_constant = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=2) + + # Create on CPU, transfer to GPU and run on GPU + model = MonoExponentialDecay(decay_time=10) + model.cuda() + (signal,) = model(m0.cuda(), decay_constant.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = MonoExponentialDecay(decay_time=torch.as_tensor(10).cuda()) + (signal,) = model(m0.cuda(), decay_constant.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = MonoExponentialDecay(decay_time=torch.as_tensor(10).cuda()) + model.cpu() + (signal,) = model(m0, decay_constant) + assert signal.is_cpu diff --git a/tests/operators/models/test_saturation_recovery.py b/tests/operators/models/test_saturation_recovery.py index 0b2406ee6..7549b72b3 100644 --- a/tests/operators/models/test_saturation_recovery.py +++ b/tests/operators/models/test_saturation_recovery.py @@ -49,3 +49,26 @@ def test_autodiff_aturation_recovery(): model = SaturationRecovery(ti=10) m0, t1 = create_parameter_tensor_tuples((2, 5, 10, 10, 10), number_of_tensors=2) autodiff_test(model, m0, t1) + + +@pytest.mark.cuda +def test_saturation_recovery_cuda(): + """Test the saturation recovery model works on cuda devices.""" + m0, t1 = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=2) + + # Create on CPU, transfer to GPU and run on GPU + model = SaturationRecovery(ti=10) + model.cuda() + (signal,) = model(m0.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = SaturationRecovery(ti=torch.as_tensor(10).cuda()) + (signal,) = model(m0.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = SaturationRecovery(ti=torch.as_tensor(10).cuda()) + model.cpu() + (signal,) = model(m0, t1) + assert signal.is_cpu diff --git a/tests/operators/models/test_transient_steady_state_with_preparation.py b/tests/operators/models/test_transient_steady_state_with_preparation.py index 5d43f8eb8..6895db359 100644 --- a/tests/operators/models/test_transient_steady_state_with_preparation.py +++ b/tests/operators/models/test_transient_steady_state_with_preparation.py @@ -94,3 +94,36 @@ def test_autodiff_transient_steady_state(): ) m0, t1, flip_angle = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=3) autodiff_test(model, m0, t1, flip_angle) + + +@pytest.mark.cuda +def test_transient_steady_state_cuda(): + """Test the transient steady state model works on cuda devices.""" + m0, t1, flip_angle = create_parameter_tensor_tuples(parameter_shape=(2, 5, 10, 10, 10), number_of_tensors=3) + contrast_dim_shape = (6,) + (sampling_time,) = create_parameter_tensor_tuples(contrast_dim_shape, number_of_tensors=1) + repetition_time, m0_scaling_preparation, delay_after_preparation = create_parameter_tensor_tuples( + contrast_dim_shape[1:], number_of_tensors=3 + ) + # Create on CPU, transfer to GPU and run on GPU + model = TransientSteadyStateWithPreparation( + sampling_time, repetition_time, m0_scaling_preparation, delay_after_preparation + ) + model.cuda() + (signal,) = model(m0.cuda(), t1.cuda(), flip_angle.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = TransientSteadyStateWithPreparation( + sampling_time.cuda(), repetition_time, m0_scaling_preparation, delay_after_preparation + ) + (signal,) = model(m0.cuda(), t1.cuda(), flip_angle.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = TransientSteadyStateWithPreparation( + sampling_time.cuda(), repetition_time, m0_scaling_preparation, delay_after_preparation + ) + model.cpu() + (signal,) = model(m0, t1, flip_angle) + assert signal.is_cpu diff --git a/tests/operators/models/test_wasabi.py b/tests/operators/models/test_wasabi.py index 3e58e0a05..0f5b13aa2 100644 --- a/tests/operators/models/test_wasabi.py +++ b/tests/operators/models/test_wasabi.py @@ -1,5 +1,6 @@ """Tests for the WASABI signal model.""" +import pytest import torch from mrpro.operators.models import WASABI from tests import autodiff_test @@ -53,3 +54,26 @@ def test_autodiff_WASABI(): offset, b0_shift, rb1, c, d = create_data(offset_max=300, n_offsets=2) wasabi_model = WASABI(offsets=offset) autodiff_test(wasabi_model, b0_shift, rb1, c, d) + + +@pytest.mark.cuda +def test_wasabi_cuda(): + """Test the WASABI model works on cuda devices.""" + offset, b0_shift, rb1, c, d = create_data(offset_max=300, n_offsets=2) + + # Create on CPU, transfer to GPU and run on GPU + model = WASABI(offset) + model.cuda() + (signal,) = model(b0_shift.cuda(), rb1.cuda(), c.cuda(), d.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = WASABI(offset.cuda()) + (signal,) = model(b0_shift.cuda(), rb1.cuda(), c.cuda(), d.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = WASABI(offset.cuda()) + model.cpu() + (signal,) = model(b0_shift, rb1, c, d) + assert signal.is_cpu diff --git a/tests/operators/models/test_wasabiti.py b/tests/operators/models/test_wasabiti.py index 637f9ff9e..442e2092a 100644 --- a/tests/operators/models/test_wasabiti.py +++ b/tests/operators/models/test_wasabiti.py @@ -85,3 +85,27 @@ def test_autodiff_WASABITI(): trec = torch.ones_like(offset) * t1 wasabiti_model = WASABITI(offsets=offset, trec=trec) autodiff_test(wasabiti_model, b0_shift, rb1, t1) + + +@pytest.mark.cuda +def test_wasabiti_cuda(): + """Test the WASABITI model works on cuda devices.""" + offset, b0_shift, rb1, t1 = create_data(offset_max=300, n_offsets=2) + trec = torch.ones_like(offset) * t1 + + # Create on CPU, transfer to GPU and run on GPU + model = WASABITI(offset, trec) + model.cuda() + (signal,) = model(b0_shift.cuda(), rb1.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU and run on GPU + model = WASABITI(offset.cuda(), trec) + (signal,) = model(b0_shift.cuda(), rb1.cuda(), t1.cuda()) + assert signal.is_cuda + + # Create on GPU, transfer to CPU and run on CPU + model = WASABITI(offset.cuda(), trec) + model.cpu() + (signal,) = model(b0_shift, rb1, t1) + assert signal.is_cpu diff --git a/tests/operators/test_cartesian_sampling_op.py b/tests/operators/test_cartesian_sampling_op.py index 0fd320212..8e880bae0 100644 --- a/tests/operators/test_cartesian_sampling_op.py +++ b/tests/operators/test_cartesian_sampling_op.py @@ -7,7 +7,12 @@ from mrpro.operators import CartesianSamplingOp from typing_extensions import Unpack -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) from tests.conftest import create_traj @@ -90,23 +95,7 @@ def subsample_traj( return trajectory -@pytest.mark.parametrize( - 'sampling', - [ - 'random', - 'partial_echo', - 'partial_fourier', - 'regular_undersampling', - 'random_undersampling', - 'different_random_undersampling', - 'cartesian_and_non_cartesian', - 'kx_ky_along_k0', - 'kx_ky_along_k0_undersampling', - ], -) -def test_cart_sampling_op_fwd_adj(sampling): - """Test adjoint property of Cartesian sampling operator.""" - +def create_cart_sampling_op_and_range_domain(sampling): # Create 3D uniform trajectory k_shape = (2, 5, 20, 40, 60) nkx = (2, 1, 1, 60) @@ -125,7 +114,41 @@ def test_cart_sampling_op_fwd_adj(sampling): random_generator = RandomGenerator(seed=0) u = random_generator.complex64_tensor(size=k_shape) v = random_generator.complex64_tensor(size=k_shape[:2] + trajectory.as_tensor().shape[2:]) - dotproduct_adjointness_test(sampling_op, u, v) + return sampling_op, u, v + + +SAMPLING_PARAMETERS = pytest.mark.parametrize( + 'sampling', + [ + 'random', + 'partial_echo', + 'partial_fourier', + 'regular_undersampling', + 'random_undersampling', + 'different_random_undersampling', + 'cartesian_and_non_cartesian', + 'kx_ky_along_k0', + 'kx_ky_along_k0_undersampling', + ], +) + + +@SAMPLING_PARAMETERS +def test_cart_sampling_op_fwd_adj(sampling): + """Test adjoint property of the Cartesian sampling operator.""" + dotproduct_adjointness_test(*create_cart_sampling_op_and_range_domain(sampling)) + + +@SAMPLING_PARAMETERS +def test_cart_sampling_op_grad(sampling): + """Test the gradient of the Cartesian sampling operator.""" + gradient_of_linear_operator_test(*create_cart_sampling_op_and_range_domain(sampling)) + + +@SAMPLING_PARAMETERS +def test_cart_sampling_op_forward_mode_autodiff(sampling): + """Test forward-mode autodiff of the Cartesian sampling operator.""" + forward_mode_autodiff_of_linear_operator_test(*create_cart_sampling_op_and_range_domain(sampling)) @pytest.mark.parametrize( @@ -144,21 +167,7 @@ def test_cart_sampling_op_fwd_adj(sampling): ) def test_cart_sampling_op_gram(sampling): """Test adjoint gram of Cartesian sampling operator.""" - - # Create 3D uniform trajectory - k_shape = (2, 5, 20, 40, 60) - nkx = (2, 1, 1, 60) - nky = (2, 1, 40, 1) - nkz = (2, 20, 1, 1) - type_kx = 'uniform' - type_ky = 'non-uniform' if sampling == 'cartesian_and_non_cartesian' else 'uniform' - type_kz = 'non-uniform' if sampling == 'cartesian_and_non_cartesian' else 'uniform' - trajectory = create_traj(k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz) - trajectory = subsample_traj(trajectory, sampling, k_shape) - - encoding_matrix = SpatialDimension(k_shape[-3], k_shape[-2], k_shape[-1]) - sampling_op = CartesianSamplingOp(encoding_matrix=encoding_matrix, traj=trajectory) - u = RandomGenerator(seed=0).complex64_tensor(size=k_shape) + sampling_op, u, _ = create_cart_sampling_op_and_range_domain(sampling) (expected,) = (sampling_op.H @ sampling_op)(u) (actual,) = sampling_op.gram(u) torch.testing.assert_close(actual, expected, rtol=1e-3, atol=1e-3) diff --git a/tests/operators/test_density_compensation_op.py b/tests/operators/test_density_compensation_op.py index 96b59547e..bb8d57bf7 100644 --- a/tests/operators/test_density_compensation_op.py +++ b/tests/operators/test_density_compensation_op.py @@ -4,26 +4,44 @@ from mrpro.data import DcfData from mrpro.operators import DensityCompensationOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) -def test_density_compensation_op_adjointness(): - """Test density operator adjoint property.""" +def create_density_compensation_op_and_range_domain(): + """Create a density compensation operator and an element from domain and range.""" random_generator = RandomGenerator(seed=0) n_zyx = (2, 3, 4) n_other = (5, 6, 7) n_coils = 8 - # Generate random dcf and operator random_tensor = random_generator.complex64_tensor(size=(*n_other, *n_zyx)) random_dcf = DcfData(data=random_tensor) dcf_op = DensityCompensationOp(random_dcf) - # Check adjoint property u = random_generator.complex64_tensor(size=(*n_other, n_coils, *n_zyx)) v = random_generator.complex64_tensor(size=(*n_other, n_coils, *n_zyx)) - dotproduct_adjointness_test(dcf_op, u, v) + return dcf_op, u, v + + +def test_density_compensation_op_adjointness(): + """Test density operator adjoint property.""" + dotproduct_adjointness_test(*create_density_compensation_op_and_range_domain()) + + +def test_density_compensation_op_grad(): + """Test the gradient of the density compensation operator.""" + gradient_of_linear_operator_test(*create_density_compensation_op_and_range_domain()) + + +def test_density_compensation_op_forward_mode_autodiff(): + """Test forward-mode autodiff of the density compensation operator.""" + forward_mode_autodiff_of_linear_operator_test(*create_density_compensation_op_and_range_domain()) def test_density_compensation_op_dcfdata_tensor(): diff --git a/tests/operators/test_einsum_op.py b/tests/operators/test_einsum_op.py index 8e098ab32..0fd288db4 100644 --- a/tests/operators/test_einsum_op.py +++ b/tests/operators/test_einsum_op.py @@ -4,11 +4,26 @@ import torch from mrpro.operators.EinsumOp import EinsumOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) -@pytest.mark.parametrize('dtype', ['float32', 'complex128']) -@pytest.mark.parametrize( +def create_einsum_op_and_range_domain(tensor_shape, input_shape, rule, output_shape, dtype): + """Create an Einsum operator and an element from range and domain.""" + generator = RandomGenerator(seed=0) + generate_tensor = getattr(generator, f'{dtype}_tensor') + tensor = generate_tensor(size=tensor_shape) + u = generate_tensor(size=input_shape) + v = generate_tensor(size=output_shape) + einsum_op = EinsumOp(tensor, rule) + return einsum_op, u, v + + +EINSUM_PARAMETERS = pytest.mark.parametrize( ('tensor_shape', 'input_shape', 'rule', 'output_shape'), [ ((3, 3), (3), 'i j,j->i', (3)), # matrix vector product @@ -18,15 +33,33 @@ ((3, 5, 4, 2), (3, 2, 5), 'l ... i j, l j k -> k i l', (5, 4, 3)), # general tensor contraction ], ) + + +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@EINSUM_PARAMETERS def test_einsum_op(tensor_shape, input_shape, rule, output_shape, dtype): """Test adjointness and shape.""" - generator = RandomGenerator(seed=0) - generate_tensor = getattr(generator, f'{dtype}_tensor') - tensor = generate_tensor(size=tensor_shape) - u = generate_tensor(size=input_shape) - v = generate_tensor(size=output_shape) - operator = EinsumOp(tensor, rule) - dotproduct_adjointness_test(operator, u, v) + dotproduct_adjointness_test( + *create_einsum_op_and_range_domain(tensor_shape, input_shape, rule, output_shape, dtype) + ) + + +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@EINSUM_PARAMETERS +def test_einsum_op_grad(tensor_shape, input_shape, rule, output_shape, dtype): + """Test the gradient of the einsum operator.""" + gradient_of_linear_operator_test( + *create_einsum_op_and_range_domain(tensor_shape, input_shape, rule, output_shape, dtype) + ) + + +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@EINSUM_PARAMETERS +def test_einsum_op_forward_mode_autodiff(tensor_shape, input_shape, rule, output_shape, dtype): + """Test forward-mode autodiff of the einsum operator.""" + forward_mode_autodiff_of_linear_operator_test( + *create_einsum_op_and_range_domain(tensor_shape, input_shape, rule, output_shape, dtype) + ) @pytest.mark.parametrize( diff --git a/tests/operators/test_fast_fourier_op.py b/tests/operators/test_fast_fourier_op.py index b7fd94576..ed327a6fc 100644 --- a/tests/operators/test_fast_fourier_op.py +++ b/tests/operators/test_fast_fourier_op.py @@ -6,7 +6,24 @@ from mrpro.data import SpatialDimension from mrpro.operators import FastFourierOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) + + +def create_fast_fourier_op_and_range_domain(recon_matrix, encoding_matrix): + """Create a fast Fourier operator and an element from domain and range.""" + # Create test data + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(recon_matrix) + v = generator.complex64_tensor(encoding_matrix) + + # Create operator and apply + ff_op = FastFourierOp(recon_matrix=recon_matrix, encoding_matrix=encoding_matrix) + return ff_op, u, v @pytest.mark.parametrize(('npoints', 'a'), [(100, 20), (300, 20)]) @@ -34,7 +51,7 @@ def test_fast_fourier_op_forward(npoints, a): torch.testing.assert_close(igauss_fwd, kgauss) -@pytest.mark.parametrize( +MATRIX_PARAMETERS = pytest.mark.parametrize( ('encoding_matrix', 'recon_matrix'), [ ((101, 201, 50), (13, 221, 64)), @@ -43,17 +60,26 @@ def test_fast_fourier_op_forward(npoints, a): ((100, 200, 50), (13, 221, 64)), ], ) + + +@MATRIX_PARAMETERS def test_fast_fourier_op_adjoint(encoding_matrix, recon_matrix): """Test adjointness of Fast Fourier Op.""" + dotproduct_adjointness_test(*create_fast_fourier_op_and_range_domain(recon_matrix, encoding_matrix)) - # Create test data - generator = RandomGenerator(seed=0) - u = generator.complex64_tensor(recon_matrix) - v = generator.complex64_tensor(encoding_matrix) - # Create operator and apply - ff_op = FastFourierOp(recon_matrix=recon_matrix, encoding_matrix=encoding_matrix) - dotproduct_adjointness_test(ff_op, u, v) +@MATRIX_PARAMETERS +def test_density_compensation_op_grad(encoding_matrix, recon_matrix): + """Test the gradient of the fast Fourier operator.""" + gradient_of_linear_operator_test(*create_fast_fourier_op_and_range_domain(recon_matrix, encoding_matrix)) + + +@MATRIX_PARAMETERS +def test_density_compensation_op_forward_mode_autodiff(encoding_matrix, recon_matrix): + """Test forward-mode autodiff of the fast Fourier operator.""" + forward_mode_autodiff_of_linear_operator_test( + *create_fast_fourier_op_and_range_domain(recon_matrix, encoding_matrix) + ) def test_fast_fourier_op_spatial_dim(): diff --git a/tests/operators/test_finite_difference_op.py b/tests/operators/test_finite_difference_op.py index ea21ae919..5d66cf986 100644 --- a/tests/operators/test_finite_difference_op.py +++ b/tests/operators/test_finite_difference_op.py @@ -5,7 +5,25 @@ from einops import repeat from mrpro.operators import FiniteDifferenceOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) + + +def create_finite_difference_op_and_range_domain(dim, mode, pad_mode): + """Create a finite difference operator and an element from domain and range.""" + random_generator = RandomGenerator(seed=0) + im_shape = (5, 6, 4, 10, 20, 16) + + # Generate finite difference operator + finite_difference_op = FiniteDifferenceOp(dim, mode, pad_mode) + + u = random_generator.complex64_tensor(size=im_shape) + v = random_generator.complex64_tensor(size=(len(dim), *im_shape)) + return finite_difference_op, u, v @pytest.mark.parametrize('mode', ['central', 'forward', 'backward']) @@ -31,14 +49,20 @@ def test_finite_difference_op_forward(mode): @pytest.mark.parametrize('dim', [(-1,), (-2, -1), (-3, -2, -1), (-4,), (1, 3)]) def test_finite_difference_op_adjointness(dim, mode, pad_mode): """Test finite difference operator adjoint property.""" + dotproduct_adjointness_test(*create_finite_difference_op_and_range_domain(dim, mode, pad_mode)) - random_generator = RandomGenerator(seed=0) - im_shape = (5, 6, 4, 10, 20, 16) - # Generate finite difference operator - finite_difference_op = FiniteDifferenceOp(dim, mode, pad_mode) +@pytest.mark.parametrize('pad_mode', ['zeros', 'circular']) +@pytest.mark.parametrize('mode', ['central', 'forward', 'backward']) +@pytest.mark.parametrize('dim', [(-1,), (-2, -1), (-3, -2, -1), (-4,), (1, 3)]) +def test_finite_difference_op_grad(dim, mode, pad_mode): + """Test the gradient of finite difference operator.""" + gradient_of_linear_operator_test(*create_finite_difference_op_and_range_domain(dim, mode, pad_mode)) - # Check adjoint property - u = random_generator.complex64_tensor(size=im_shape) - v = random_generator.complex64_tensor(size=(len(dim), *im_shape)) - dotproduct_adjointness_test(finite_difference_op, u, v) + +@pytest.mark.parametrize('pad_mode', ['zeros', 'circular']) +@pytest.mark.parametrize('mode', ['central', 'forward', 'backward']) +@pytest.mark.parametrize('dim', [(-1,), (-2, -1), (-3, -2, -1), (-4,), (1, 3)]) +def test_finite_difference_op_forward_mode_autodiff(dim, mode, pad_mode): + """Test the forward-mode autodiff of the finite difference operator.""" + forward_mode_autodiff_of_linear_operator_test(*create_finite_difference_op_and_range_domain(dim, mode, pad_mode)) diff --git a/tests/operators/test_fourier_op.py b/tests/operators/test_fourier_op.py index c7c58c266..c5c096b6a 100644 --- a/tests/operators/test_fourier_op.py +++ b/tests/operators/test_fourier_op.py @@ -6,7 +6,12 @@ from mrpro.data.traj_calculators import KTrajectoryCartesian from mrpro.operators import FourierOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) from tests.conftest import COMMON_MR_TRAJECTORIES, create_traj @@ -20,12 +25,8 @@ def create_data(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz): return img, trajectory -@COMMON_MR_TRAJECTORIES -def test_fourier_op_fwd_adj_property( - im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz, type_k0, type_k1, type_k2 -): - """Test adjoint property of Fourier operator.""" - +def create_fourier_op_and_range_domain(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz): + """Create a fourier operator and an element from domain and range.""" # generate random images and k-space trajectories img, trajectory = create_data(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz) @@ -37,15 +38,41 @@ def test_fourier_op_fwd_adj_property( int(trajectory.kx.max() - trajectory.kx.min() + 1), ) fourier_op = FourierOp(recon_matrix=recon_matrix, encoding_matrix=encoding_matrix, traj=trajectory) - # apply forward operator (kdata,) = fourier_op(img) - # test adjoint property; i.e. == for all u,v random_generator = RandomGenerator(seed=0) u = random_generator.complex64_tensor(size=img.shape) v = random_generator.complex64_tensor(size=kdata.shape) - dotproduct_adjointness_test(fourier_op, u, v) + return fourier_op, u, v + + +@COMMON_MR_TRAJECTORIES +def test_fourier_fwd_adj_property( + im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz, type_k0, type_k1, type_k2 +): + """Test adjoint property of Fourier operator.""" + dotproduct_adjointness_test( + *create_fourier_op_and_range_domain(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz) + ) + + +@COMMON_MR_TRAJECTORIES +def test_fourier_op_grad(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz, type_k0, type_k1, type_k2): + """Test gradient of Fourier operator.""" + gradient_of_linear_operator_test( + *create_fourier_op_and_range_domain(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz) + ) + + +@COMMON_MR_TRAJECTORIES +def test_fourier_op_forward_mode_autodiff( + im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz, type_k0, type_k1, type_k2 +): + """Test forward-mode autodiff of Fourier operator.""" + forward_mode_autodiff_of_linear_operator_test( + *create_fourier_op_and_range_domain(im_shape, k_shape, nkx, nky, nkz, type_kx, type_ky, type_kz) + ) @COMMON_MR_TRAJECTORIES diff --git a/tests/operators/test_pca_compression_op.py b/tests/operators/test_pca_compression_op.py index e73bf3951..eff7e2cdd 100644 --- a/tests/operators/test_pca_compression_op.py +++ b/tests/operators/test_pca_compression_op.py @@ -3,10 +3,29 @@ import pytest from mrpro.operators import PCACompressionOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) + + +def create_pca_compression_op_and_domain_range(init_data_shape, input_shape, n_components): + """Create a pca compression operator and an element from domain and range.""" + # Create test data + generator = RandomGenerator(seed=0) + data_to_calculate_compression_matrix_from = generator.complex64_tensor(init_data_shape) + u = generator.complex64_tensor(input_shape) + output_shape = (*input_shape[:-1], n_components) + v = generator.complex64_tensor(output_shape) + + # Create operator and apply + pca_comp_op = PCACompressionOp(data=data_to_calculate_compression_matrix_from, n_components=n_components) + return pca_comp_op, u, v -@pytest.mark.parametrize( +SHAPE_PARAMETERS = pytest.mark.parametrize( ('init_data_shape', 'input_shape', 'n_components'), [ ((40, 10), (100, 10), 6), @@ -15,19 +34,28 @@ ((3, 4, 40, 10), (7, 3, 4, 100, 10), 3), ], ) + + +@SHAPE_PARAMETERS def test_pca_compression_op_adjoint(init_data_shape, input_shape, n_components): """Test adjointness of PCA Compression Op.""" + dotproduct_adjointness_test(*create_pca_compression_op_and_domain_range(init_data_shape, input_shape, n_components)) - # Create test data - generator = RandomGenerator(seed=0) - data_to_calculate_compression_matrix_from = generator.complex64_tensor(init_data_shape) - u = generator.complex64_tensor(input_shape) - output_shape = (*input_shape[:-1], n_components) - v = generator.complex64_tensor(output_shape) - # Create operator and apply - pca_comp_op = PCACompressionOp(data=data_to_calculate_compression_matrix_from, n_components=n_components) - dotproduct_adjointness_test(pca_comp_op, u, v) +@SHAPE_PARAMETERS +def test_pca_compression_op_grad(init_data_shape, input_shape, n_components): + """Test gradient of PCA Compression Op.""" + gradient_of_linear_operator_test( + *create_pca_compression_op_and_domain_range(init_data_shape, input_shape, n_components) + ) + + +@SHAPE_PARAMETERS +def test_pca_compression_op_forward_mode_autodiff(init_data_shape, input_shape, n_components): + """Test forward-mode autodiff of PCA Compression Op.""" + forward_mode_autodiff_of_linear_operator_test( + *create_pca_compression_op_and_domain_range(init_data_shape, input_shape, n_components) + ) def test_pca_compression_op_wrong_shapes(): diff --git a/tests/operators/test_rearrangeop.py b/tests/operators/test_rearrangeop.py index ecacafb42..4624c920a 100644 --- a/tests/operators/test_rearrangeop.py +++ b/tests/operators/test_rearrangeop.py @@ -3,11 +3,14 @@ import pytest from mrpro.operators.RearrangeOp import RearrangeOp -from tests import RandomGenerator, dotproduct_adjointness_test - +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) -@pytest.mark.parametrize('dtype', ['float32', 'complex128']) -@pytest.mark.parametrize( +SHAPE_PARAMETERS = pytest.mark.parametrize( ('input_shape', 'rule', 'output_shape', 'additional_info'), [ ((1, 2, 3), 'a b c-> b a c', (2, 1, 3), None), # swap axes @@ -16,8 +19,12 @@ ], ids=['swap_axes', 'flatten', 'unflatten'], ) -def test_einsum_op(input_shape, rule, output_shape, additional_info, dtype): - """Test adjointness and shape.""" + + +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@SHAPE_PARAMETERS +def test_einsum_op_adjointness(input_shape, rule, output_shape, additional_info, dtype): + """Test adjointness and shape of Einsum Op.""" generator = RandomGenerator(seed=0) generate_tensor = getattr(generator, f'{dtype}_tensor') u = generate_tensor(size=input_shape) @@ -26,6 +33,30 @@ def test_einsum_op(input_shape, rule, output_shape, additional_info, dtype): dotproduct_adjointness_test(operator, u, v) +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@SHAPE_PARAMETERS +def test_einsum_op_grad(input_shape, rule, output_shape, additional_info, dtype): + """Test gradient of Einsum Op.""" + generator = RandomGenerator(seed=0) + generate_tensor = getattr(generator, f'{dtype}_tensor') + u = generate_tensor(size=input_shape) + v = generate_tensor(size=output_shape) + operator = RearrangeOp(rule, additional_info) + gradient_of_linear_operator_test(operator, u, v) + + +@pytest.mark.parametrize('dtype', ['float32', 'complex128']) +@SHAPE_PARAMETERS +def test_einsum_op_forward_mode_autodiff(input_shape, rule, output_shape, additional_info, dtype): + """Test forward-mode autodiff of Einsum Op.""" + generator = RandomGenerator(seed=0) + generate_tensor = getattr(generator, f'{dtype}_tensor') + u = generate_tensor(size=input_shape) + v = generate_tensor(size=output_shape) + operator = RearrangeOp(rule, additional_info) + forward_mode_autodiff_of_linear_operator_test(operator, u, v) + + def test_einsum_op_invalid(): """Test with invalid rule.""" with pytest.raises(ValueError, match='pattern should match'): diff --git a/tests/operators/test_sensitivity_op.py b/tests/operators/test_sensitivity_op.py index 5576a9892..e7dea6fe0 100644 --- a/tests/operators/test_sensitivity_op.py +++ b/tests/operators/test_sensitivity_op.py @@ -5,27 +5,44 @@ from mrpro.data import CsmData, QHeader, SpatialDimension from mrpro.operators import SensitivityOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) -def test_sensitivity_op_adjointness(): - """Test Sensitivity operator adjoint property.""" - +def create_sensitivity_op_and_domain_range(): + """Create a sensitivity operator and an element from domain and range.""" random_generator = RandomGenerator(seed=0) n_zyx = (2, 3, 4) n_other = (5, 6, 7) n_coils = 4 - # Generate sensitivity operator random_tensor = random_generator.complex64_tensor(size=(*n_other, n_coils, *n_zyx)) random_csmdata = CsmData(data=random_tensor, header=QHeader(fov=SpatialDimension(1.0, 1.0, 1.0))) sensitivity_op = SensitivityOp(random_csmdata) - # Check adjoint property u = random_generator.complex64_tensor(size=(*n_other, 1, *n_zyx)) v = random_generator.complex64_tensor(size=(*n_other, n_coils, *n_zyx)) - dotproduct_adjointness_test(sensitivity_op, u, v) + return sensitivity_op, u, v + + +def test_sensitivity_op_adjointness(): + """Test Sensitivity operator adjoint property.""" + dotproduct_adjointness_test(*create_sensitivity_op_and_domain_range()) + + +def test_sensitivity_op_grad(): + """Test gradient of sensitivity operator.""" + gradient_of_linear_operator_test(*create_sensitivity_op_and_domain_range()) + + +def test_sensitivity_op_forward_mode_autodiff(): + """Test forward-mode autodiff of sensitivity operator.""" + forward_mode_autodiff_of_linear_operator_test(*create_sensitivity_op_and_domain_range()) def test_sensitivity_op_csmdata_tensor(): diff --git a/tests/operators/test_slice_projection_op.py b/tests/operators/test_slice_projection_op.py index 53209e98d..23f91800c 100644 --- a/tests/operators/test_slice_projection_op.py +++ b/tests/operators/test_slice_projection_op.py @@ -9,7 +9,33 @@ from mrpro.operators import SliceProjectionOp from mrpro.utils.slice_profiles import SliceGaussian, SliceInterpolate, SliceSmoothedRectangular -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) + + +def create_slice_projection_op_and_domain_range(optimize_for, dtype): + """Create a slice projection operator and an element from domain and range.""" + rng = getattr(RandomGenerator(314), f'{dtype}_tensor') + operator_dtype = getattr(torch, dtype).to_real() + input_shape = SpatialDimension(10, 20, 30) + slice_rotation = None + slice_shift = 0.0 + slice_profile = 1.0 + operator = SliceProjectionOp( + input_shape=input_shape, + slice_rotation=slice_rotation, + slice_shift=slice_shift, + slice_profile=slice_profile, + optimize_for=optimize_for, + ) + operator = operator.to(operator_dtype) + u = rng((1, *input_shape.zyx)) + v = rng((1, 1, 1, max(input_shape.zyx), max(input_shape.zyx))) + return operator, u, v def test_slice_projection_op_cube_basic(): @@ -98,23 +124,7 @@ def test_slice_projection_width_error(): @pytest.mark.parametrize('dtype', ['complex64', 'float64', 'float32']) @pytest.mark.parametrize('optimize_for', ['forward', 'adjoint', 'both']) def test_slice_projection_op_basic_adjointness(optimize_for, dtype): - rng = getattr(RandomGenerator(314), f'{dtype}_tensor') - operator_dtype = getattr(torch, dtype).to_real() - input_shape = SpatialDimension(10, 20, 30) - slice_rotation = None - slice_shift = 0.0 - slice_profile = 1.0 - operator = SliceProjectionOp( - input_shape=input_shape, - slice_rotation=slice_rotation, - slice_shift=slice_shift, - slice_profile=slice_profile, - optimize_for=optimize_for, - ) - operator = operator.to(operator_dtype) - u = rng((1, *input_shape.zyx)) - v = rng((1, 1, 1, max(input_shape.zyx), max(input_shape.zyx))) - dotproduct_adjointness_test(operator, u, v) + dotproduct_adjointness_test(*create_slice_projection_op_and_domain_range(optimize_for, dtype)) def test_slice_projection_op_slice_batching(): @@ -189,3 +199,20 @@ def test_slice_projection_op_backward_is_adjoint(optimize_for, dtype, direction) dotproduct_range = torch.vdot(forward_u.flatten(), v.flatten()) dotproduct_domain = torch.vdot(u.flatten().flatten(), adjoint_v.flatten()) torch.testing.assert_close(dotproduct_range, dotproduct_domain) + + +# Sparse tensors do currently not work with torch.func. See https://github.com/pytorch/pytorch/issues/136357 +@pytest.mark.parametrize('dtype', ['complex64', 'float64', 'float32']) +@pytest.mark.parametrize('optimize_for', ['forward', 'adjoint', 'both']) +def test_slice_projection_op_grad(optimize_for, dtype): + """Test gradient of slice projection operator.""" + with pytest.raises(RuntimeError, match='Sparse CSR tensors'): + gradient_of_linear_operator_test(*create_slice_projection_op_and_domain_range(optimize_for, dtype)) + + +@pytest.mark.parametrize('dtype', ['complex64', 'float64', 'float32']) +@pytest.mark.parametrize('optimize_for', ['forward', 'adjoint', 'both']) +def test_slice_projection_op_forward_mode_autodiff(optimize_for, dtype): + """Test forward-mode autodiff of slice projection operator.""" + with pytest.raises(RuntimeError, match='Sparse CSR tensors'): + forward_mode_autodiff_of_linear_operator_test(*create_slice_projection_op_and_domain_range(optimize_for, dtype)) diff --git a/tests/operators/test_wavelet_op.py b/tests/operators/test_wavelet_op.py index 92de01286..2f8efca7b 100644 --- a/tests/operators/test_wavelet_op.py +++ b/tests/operators/test_wavelet_op.py @@ -8,7 +8,34 @@ from ptwt.conv_transform_2 import wavedec2 from ptwt.conv_transform_3 import wavedec3 -from tests import RandomGenerator, dotproduct_adjointness_test, linear_operator_unitary_test, operator_isometry_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, + linear_operator_unitary_test, + operator_isometry_test, +) + + +def create_wavelet_op_and_domain_range(im_shape, domain_shape, dim, wavelet_name): + """Create a wavelet operator and an element from domain and range.""" + random_generator = RandomGenerator(seed=0) + + wavelet_op = WaveletOp(domain_shape=domain_shape, dim=dim, wavelet_name=wavelet_name) + + # calculate 1D length of wavelet coefficients + wavelet_stack_length = torch.sum(torch.as_tensor([(np.prod(shape)) for shape in wavelet_op.coefficients_shape])) + + # sorted and normed dimensions needed to correctly calculate range + dim_sorted = sorted([d % len(im_shape) for d in dim], reverse=True) + range_shape = list(im_shape) + range_shape[dim_sorted[-1]] = int(wavelet_stack_length) + [range_shape.pop(d) for d in dim_sorted[:-1]] + + u = random_generator.complex64_tensor(size=im_shape) + v = random_generator.complex64_tensor(size=range_shape) + return wavelet_op, u, v @pytest.mark.parametrize( @@ -88,8 +115,7 @@ def test_wavelet_op_complex_real_shape(): assert wavelet_op.adjoint(coeff_complex)[0].shape == wavelet_op.adjoint(coeff_real)[0].shape -@pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) -@pytest.mark.parametrize( +SHAPE_PARAMETERS = pytest.mark.parametrize( ('im_shape', 'domain_shape', 'dim'), [ ((1, 5, 20, 30), (30,), (-1,)), @@ -101,9 +127,12 @@ def test_wavelet_op_complex_real_shape(): ((6, 10, 20, 30), (6, 20, 30), (-4, -2, -1)), ((6, 10, 20, 30), (20, 30, 6), (-2, -1, -4)), ((6, 10, 20, 30), (20, 30, 6), (2, 3, 0)), - ((5, 10, 20, 30), None, (-3, -2, -1)), ], ) + + +@pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) +@SHAPE_PARAMETERS def test_wavelet_op_isometry(im_shape, domain_shape, dim, wavelet_name): """Test that the wavelet operator is a linear isometry.""" random_generator = RandomGenerator(seed=0) @@ -113,58 +142,33 @@ def test_wavelet_op_isometry(im_shape, domain_shape, dim, wavelet_name): @pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) -@pytest.mark.parametrize( - ('im_shape', 'domain_shape', 'dim'), - [ - ((1, 5, 20, 30), (30,), (-1,)), - ((5, 1, 10, 20, 30), (10,), (-3,)), - ((1, 5, 20, 30), (20, 30), (-2, -1)), - ((4, 10, 20, 30), (20, 30), (-2, -1)), - ((4, 10, 20, 30), (10, 30), (-3, -1)), - ((5, 10, 20, 30), (10, 20, 30), (-3, -2, -1)), - ((6, 10, 20, 30), (6, 20, 30), (-4, -2, -1)), - ((6, 10, 20, 30), (20, 30, 6), (-2, -1, -4)), - ((6, 10, 20, 30), (20, 30, 6), (2, 3, 0)), - ], -) +@SHAPE_PARAMETERS def test_wavelet_op_adjointness(im_shape, domain_shape, dim, wavelet_name): """Test adjoint property; i.e. == for all u,v.""" - random_generator = RandomGenerator(seed=0) - - wavelet_op = WaveletOp(domain_shape=domain_shape, dim=dim, wavelet_name=wavelet_name) - - # calculate 1D length of wavelet coefficients - wavelet_stack_length = torch.sum(torch.as_tensor([(np.prod(shape)) for shape in wavelet_op.coefficients_shape])) - - # sorted and normed dimensions needed to correctly calculate range - dim_sorted = sorted([d % len(im_shape) for d in dim], reverse=True) - range_shape = list(im_shape) - range_shape[dim_sorted[-1]] = int(wavelet_stack_length) - [range_shape.pop(d) for d in dim_sorted[:-1]] - - u = random_generator.complex64_tensor(size=im_shape) - v = random_generator.complex64_tensor(size=range_shape) - dotproduct_adjointness_test(wavelet_op, u, v) + dotproduct_adjointness_test(*create_wavelet_op_and_domain_range(im_shape, domain_shape, dim, wavelet_name)) @pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) -@pytest.mark.parametrize( - ('im_shape', 'domain_shape', 'dim'), - [ - ((1, 5, 20, 30), (30,), (-1,)), - ((5, 1, 10, 20, 30), (10,), (-3,)), - ((1, 5, 20, 30), (20, 30), (-2, -1)), - ((4, 10, 20, 30), (20, 30), (-2, -1)), - ((4, 10, 20, 30), (10, 30), (-3, -1)), - ((5, 10, 20, 30), (10, 20, 30), (-3, -2, -1)), - ((6, 10, 20, 30), (6, 20, 30), (-4, -2, -1)), - ((6, 10, 20, 30), (20, 30, 6), (-2, -1, -4)), - ((6, 10, 20, 30), (20, 30, 6), (2, 3, 0)), - ], -) +@SHAPE_PARAMETERS def test_wavelet_op_unitary(im_shape, domain_shape, dim, wavelet_name): """Test if wavelet operator is unitary.""" random_generator = RandomGenerator(seed=0) img = random_generator.complex64_tensor(size=im_shape) wavelet_op = WaveletOp(domain_shape=domain_shape, dim=dim, wavelet_name=wavelet_name) linear_operator_unitary_test(wavelet_op, img) + + +@pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) +@SHAPE_PARAMETERS +def test_wavelet_op_grad(im_shape, domain_shape, dim, wavelet_name): + """Test gradient of wavelet operator.""" + gradient_of_linear_operator_test(*create_wavelet_op_and_domain_range(im_shape, domain_shape, dim, wavelet_name)) + + +@pytest.mark.parametrize('wavelet_name', ['haar', 'db4']) +@SHAPE_PARAMETERS +def test_wavelet_op_forward_mode_autodiff(im_shape, domain_shape, dim, wavelet_name): + """Test forward-mode autodiff of wavelet operator.""" + forward_mode_autodiff_of_linear_operator_test( + *create_wavelet_op_and_domain_range(im_shape, domain_shape, dim, wavelet_name) + ) diff --git a/tests/operators/test_zero_op.py b/tests/operators/test_zero_op.py index 1e7f47017..218f62c17 100644 --- a/tests/operators/test_zero_op.py +++ b/tests/operators/test_zero_op.py @@ -3,7 +3,12 @@ from mrpro.operators.LinearOperator import LinearOperatorSum from typing_extensions import assert_type -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) def test_zero_op_keepshape(): @@ -65,6 +70,24 @@ def test_zero_op_adjoint_keepshape(): dotproduct_adjointness_test(operator, u, v) +def test_zero_op_grad_keepshape(): + """Test gradient of zero operator.""" + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(2, 3, 4) + v = generator.complex64_tensor(2, 3, 4) + operator = ZeroOp(keep_shape=True) + gradient_of_linear_operator_test(operator, u, v) + + +def test_zero_op_forward_mode_autodiff_keepshape(): + """Test forward-mode autodiff of zero operator.""" + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(2, 3, 4) + v = generator.complex64_tensor(2, 3, 4) + operator = ZeroOp(keep_shape=True) + forward_mode_autodiff_of_linear_operator_test(operator, u, v) + + def test_zero_op_adjoint_scalar(): """Test that the adjoint of the zero operator is the zero operator.""" generator = RandomGenerator(seed=0) @@ -77,3 +100,21 @@ def test_zero_op_adjoint_scalar(): # as ZeroOp is the neutral element of the addition. operator = LinearOperatorSum(ZeroOp(keep_shape=False), IdentityOp()) dotproduct_adjointness_test(operator, u, v) + + +def test_zero_op_grad_scalar(): + """Test gradient of zero operator.""" + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(2, 3, 4) + v = generator.complex64_tensor(2, 3, 4) + operator = LinearOperatorSum(ZeroOp(keep_shape=False), IdentityOp()) + gradient_of_linear_operator_test(operator, u, v) + + +def test_zero_op_forward_mode_autodiff_scalar(): + """Test forward-mode autodiff of zero operator.""" + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(2, 3, 4) + v = generator.complex64_tensor(2, 3, 4) + operator = LinearOperatorSum(ZeroOp(keep_shape=False), IdentityOp()) + forward_mode_autodiff_of_linear_operator_test(operator, u, v) diff --git a/tests/operators/test_zero_pad_op.py b/tests/operators/test_zero_pad_op.py index 5d1f02135..e0d0fac0a 100644 --- a/tests/operators/test_zero_pad_op.py +++ b/tests/operators/test_zero_pad_op.py @@ -4,7 +4,21 @@ import torch from mrpro.operators import ZeroPadOp -from tests import RandomGenerator, dotproduct_adjointness_test +from tests import ( + RandomGenerator, + dotproduct_adjointness_test, + forward_mode_autodiff_of_linear_operator_test, + gradient_of_linear_operator_test, +) + + +def create_zero_pad_op_and_domain_range(u_shape, v_shape): + """Create a zero padding operator and an element from domain and range.""" + generator = RandomGenerator(seed=0) + u = generator.complex64_tensor(u_shape) + v = generator.complex64_tensor(v_shape) + zero_padding_op = ZeroPadOp(dim=(-3, -2, -1), original_shape=u_shape, padded_shape=v_shape) + return zero_padding_op, u, v def test_zero_pad_op_content(): @@ -25,7 +39,7 @@ def test_zero_pad_op_content(): torch.testing.assert_close(original_data[:, 10:90, :, 50:150, :, :], padded_data[:, :, :, :, 95:145, :]) -@pytest.mark.parametrize( +SHAPE_PARAMETERS = pytest.mark.parametrize( ('u_shape', 'v_shape'), [ ((101, 201, 50), (13, 221, 64)), @@ -34,10 +48,21 @@ def test_zero_pad_op_content(): ((100, 200, 50), (13, 221, 64)), ], ) + + +@SHAPE_PARAMETERS def test_zero_pad_op_adjoint(u_shape, v_shape): """Test adjointness of pad operator.""" - generator = RandomGenerator(seed=0) - u = generator.complex64_tensor(u_shape) - v = generator.complex64_tensor(v_shape) - zero_padding_op = ZeroPadOp(dim=(-3, -2, -1), original_shape=u_shape, padded_shape=v_shape) - dotproduct_adjointness_test(zero_padding_op, u, v) + dotproduct_adjointness_test(*create_zero_pad_op_and_domain_range(u_shape, v_shape)) + + +@SHAPE_PARAMETERS +def test_zero_pad_op_grad(u_shape, v_shape): + """Test gradient of zero padding operator.""" + gradient_of_linear_operator_test(*create_zero_pad_op_and_domain_range(u_shape, v_shape)) + + +@SHAPE_PARAMETERS +def test_zero_pad_op_forward_mode_autodiff(u_shape, v_shape): + """Test forward-mode autodiff of zero padding operator.""" + forward_mode_autodiff_of_linear_operator_test(*create_zero_pad_op_and_domain_range(u_shape, v_shape))