diff --git a/tests/test_coreg.py b/tests/test_coreg.py index 7add132c..1293456a 100644 --- a/tests/test_coreg.py +++ b/tests/test_coreg.py @@ -186,6 +186,21 @@ def test_error_method(self) -> None: dem3 = dem1.copy() + np.random.random(size=dem1.size).reshape(dem1.shape) assert abs(biascorr.error(dem1, dem3, transform=affine, error_type="std") - np.std(dem3)) < 1e-6 + def test_coreg_example(self) -> None: + """ + Test the co-registration outputs performed on the example are always the same. This overlaps with the test in + test_examples.py, but helps identify from where differences arise. + """ + + # Run co-registration + nuth_kaab = xdem.coreg.NuthKaab() + nuth_kaab.fit(self.ref, self.tba, inlier_mask=self.inlier_mask) + + # Check the output metadata is always the same + assert nuth_kaab._meta["offset_east_px"] == pytest.approx(-0.46255704521968716) + assert nuth_kaab._meta["offset_north_px"] == pytest.approx(-0.13618536563846081) + assert nuth_kaab._meta["bias"] == pytest.approx(-1.9815309753424906) + def test_nuth_kaab(self) -> None: warnings.simplefilter("error") @@ -209,12 +224,6 @@ def test_nuth_kaab(self) -> None: assert nuth_kaab._meta["offset_north_px"] == pytest.approx(0, abs=0.03) assert nuth_kaab._meta["bias"] == pytest.approx(-bias, 0.03) - # Check that the random states forces always the same results - # Note: in practice, the values are not exactly equal for different OS/conda config - assert nuth_kaab._meta["offset_east_px"] == pytest.approx(2.00019, abs=1e-5) - assert nuth_kaab._meta["offset_north_px"] == pytest.approx(-0.00012, abs=1e-5) - assert nuth_kaab._meta["bias"] == -5.0 - # Apply the estimated shift to "revert the DEM" to its original state. unshifted_dem = nuth_kaab.apply(shifted_dem, transform=self.ref.transform) # Measure the difference (should be more or less zero) diff --git a/tests/test_examples.py b/tests/test_examples.py index 9cd56e88..00b6901b 100644 --- a/tests/test_examples.py +++ b/tests/test_examples.py @@ -1,8 +1,6 @@ """Functions to test the example data.""" from __future__ import annotations -import platform - import geoutils as gu import numpy as np import pytest @@ -36,11 +34,11 @@ class TestExamples: ddem, np.array( [ - -2.423095703125000000e-02, - -7.189941406250000000e-01, - 1.425781250000000000e-01, - 1.101867675781250000e00, - -5.920959472656250000e00, + -4.669189453125000000e-02, + -7.413940429687500000e-01, + 1.499481201171875000e-01, + 1.095550537109375000e00, + -5.904846191406250000e00, ], dtype=np.float32, ), @@ -50,14 +48,12 @@ class TestExamples: def test_array_content(self, rst_and_truevals: tuple[Raster, NDArrayf]) -> None: """Let's ensure the data arrays in the examples are always the same by checking randomly some values""" - # TODO: this currently fails on Mac while exactly the same on Linux and Windows... why? - if platform.system() in ["Linux", "Windows"]: - rst = rst_and_truevals[0] - truevals = rst_and_truevals[1] - np.random.seed(42) - values = np.random.choice(rst.data.data.flatten(), size=5, replace=False) + rst = rst_and_truevals[0] + truevals = rst_and_truevals[1] + np.random.seed(42) + values = np.random.choice(rst.data.data.flatten(), size=5, replace=False) - assert values == pytest.approx(truevals) + assert values == pytest.approx(truevals) @pytest.mark.parametrize("rst_and_truenodata", [(ref_dem, 0), (tba_dem, 0), (ddem, 2316)]) # type: ignore def test_array_nodata(self, rst_and_truenodata: tuple[Raster, int]) -> None: diff --git a/tests/test_spatialstats.py b/tests/test_spatialstats.py index 43c289f8..3e3d7719 100644 --- a/tests/test_spatialstats.py +++ b/tests/test_spatialstats.py @@ -390,7 +390,7 @@ def test_sample_multirange_variogram_default(self) -> None: # Check the variogram output is consistent for a random state df = xdem.spatialstats.sample_empirical_variogram(values=self.diff, subsample=10, random_state=42) - assert df["exp"][15] == pytest.approx(23.574527740478516, abs=1e-3) + assert df["exp"][15] == pytest.approx(23.517837524414062, abs=1e-3) assert df["lags"][15] == pytest.approx(5120) assert df["count"][15] == 2 # With a single run, no error can be estimated @@ -1180,7 +1180,7 @@ def test_patches_method_loop_quadrant(self) -> None: assert all(df.columns == ["nmad", "nb_indep_patches", "exact_areas", "areas"]) # Check the sampling is fixed for a random state - assert df["nmad"][0] == pytest.approx(1.8697986129910111, abs=1e-3) + assert df["nmad"][0] == pytest.approx(1.8663623135417342, abs=1e-3) assert df["nb_indep_patches"][0] == 100 assert df["exact_areas"][0] == pytest.approx(df["areas"][0], rel=0.2) @@ -1189,7 +1189,7 @@ def test_patches_method_loop_quadrant(self) -> None: # Check the sampling is always fixed for a random state assert df_full["tile"].values[0] == "8_16" - assert df_full["nanmean"].values[0] == pytest.approx(0.24107581448842244, abs=1e-3) + assert df_full["nanmean"].values[0] == pytest.approx(0.24885657130475025, abs=1e-3) # Check that all counts respect the default minimum percentage of 80% valid pixels assert all(df_full["count"].values > 0.8 * np.max(df_full["count"].values)) diff --git a/xdem/coreg.py b/xdem/coreg.py index 4af8f296..7576590b 100644 --- a/xdem/coreg.py +++ b/xdem/coreg.py @@ -188,13 +188,13 @@ def residuals(parameters: tuple[float, float, float], y_values: NDArrayf, x_valu # Estimate the a, b, and c parameters with least square minimisation results = scipy.optimize.least_squares( - fun=residuals, x0=initial_guess, args=(y_medians, slice_bounds), xtol=1e-08, gtol=None, ftol=None + fun=residuals, x0=initial_guess, args=(y_medians, slice_bounds), xtol=1e-8, gtol=None, ftol=None ) # Round results above the tolerance to get fixed results on different OS a_parameter, b_parameter, c_parameter = results.x - a_parameter = np.round(a_parameter, 5) - b_parameter = np.round(b_parameter, 5) + a_parameter = np.round(a_parameter, 2) + b_parameter = np.round(b_parameter, 2) # Calculate the easting and northing offsets from the above parameters east_offset = a_parameter * np.sin(b_parameter)