Skip to content

Commit

Permalink
Get consistent scipy.optimize outputs out of NuthKaab (#322)
Browse files Browse the repository at this point in the history
* Try higher round up to fix test on Mac

* Add back Mac to test floating values

* Print results to see before rounding

* Update fixed tested values with new rounding

* Lower tolerance

* Update example

* Add test on example co-registration to check where OS difference come from

* Linting

* Try to lower tolerance even more

* Reup tolerance

* Try rounding more

* Try with new values

* Fix comma separator

* Fix values in patches tests

* Linting

* Remove unused changes
  • Loading branch information
rhugonnet authored Oct 25, 2022
1 parent 792a486 commit ecc7a9c
Show file tree
Hide file tree
Showing 4 changed files with 31 additions and 26 deletions.
21 changes: 15 additions & 6 deletions tests/test_coreg.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,6 +186,21 @@ def test_error_method(self) -> None:
dem3 = dem1.copy() + np.random.random(size=dem1.size).reshape(dem1.shape)
assert abs(biascorr.error(dem1, dem3, transform=affine, error_type="std") - np.std(dem3)) < 1e-6

def test_coreg_example(self) -> None:
"""
Test the co-registration outputs performed on the example are always the same. This overlaps with the test in
test_examples.py, but helps identify from where differences arise.
"""

# Run co-registration
nuth_kaab = xdem.coreg.NuthKaab()
nuth_kaab.fit(self.ref, self.tba, inlier_mask=self.inlier_mask)

# Check the output metadata is always the same
assert nuth_kaab._meta["offset_east_px"] == pytest.approx(-0.46255704521968716)
assert nuth_kaab._meta["offset_north_px"] == pytest.approx(-0.13618536563846081)
assert nuth_kaab._meta["bias"] == pytest.approx(-1.9815309753424906)

def test_nuth_kaab(self) -> None:
warnings.simplefilter("error")

Expand All @@ -209,12 +224,6 @@ def test_nuth_kaab(self) -> None:
assert nuth_kaab._meta["offset_north_px"] == pytest.approx(0, abs=0.03)
assert nuth_kaab._meta["bias"] == pytest.approx(-bias, 0.03)

# Check that the random states forces always the same results
# Note: in practice, the values are not exactly equal for different OS/conda config
assert nuth_kaab._meta["offset_east_px"] == pytest.approx(2.00019, abs=1e-5)
assert nuth_kaab._meta["offset_north_px"] == pytest.approx(-0.00012, abs=1e-5)
assert nuth_kaab._meta["bias"] == -5.0

# Apply the estimated shift to "revert the DEM" to its original state.
unshifted_dem = nuth_kaab.apply(shifted_dem, transform=self.ref.transform)
# Measure the difference (should be more or less zero)
Expand Down
24 changes: 10 additions & 14 deletions tests/test_examples.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,6 @@
"""Functions to test the example data."""
from __future__ import annotations

import platform

import geoutils as gu
import numpy as np
import pytest
Expand Down Expand Up @@ -36,11 +34,11 @@ class TestExamples:
ddem,
np.array(
[
-2.423095703125000000e-02,
-7.189941406250000000e-01,
1.425781250000000000e-01,
1.101867675781250000e00,
-5.920959472656250000e00,
-4.669189453125000000e-02,
-7.413940429687500000e-01,
1.499481201171875000e-01,
1.095550537109375000e00,
-5.904846191406250000e00,
],
dtype=np.float32,
),
Expand All @@ -50,14 +48,12 @@ class TestExamples:
def test_array_content(self, rst_and_truevals: tuple[Raster, NDArrayf]) -> None:
"""Let's ensure the data arrays in the examples are always the same by checking randomly some values"""

# TODO: this currently fails on Mac while exactly the same on Linux and Windows... why?
if platform.system() in ["Linux", "Windows"]:
rst = rst_and_truevals[0]
truevals = rst_and_truevals[1]
np.random.seed(42)
values = np.random.choice(rst.data.data.flatten(), size=5, replace=False)
rst = rst_and_truevals[0]
truevals = rst_and_truevals[1]
np.random.seed(42)
values = np.random.choice(rst.data.data.flatten(), size=5, replace=False)

assert values == pytest.approx(truevals)
assert values == pytest.approx(truevals)

@pytest.mark.parametrize("rst_and_truenodata", [(ref_dem, 0), (tba_dem, 0), (ddem, 2316)]) # type: ignore
def test_array_nodata(self, rst_and_truenodata: tuple[Raster, int]) -> None:
Expand Down
6 changes: 3 additions & 3 deletions tests/test_spatialstats.py
Original file line number Diff line number Diff line change
Expand Up @@ -390,7 +390,7 @@ def test_sample_multirange_variogram_default(self) -> None:

# Check the variogram output is consistent for a random state
df = xdem.spatialstats.sample_empirical_variogram(values=self.diff, subsample=10, random_state=42)
assert df["exp"][15] == pytest.approx(23.574527740478516, abs=1e-3)
assert df["exp"][15] == pytest.approx(23.517837524414062, abs=1e-3)
assert df["lags"][15] == pytest.approx(5120)
assert df["count"][15] == 2
# With a single run, no error can be estimated
Expand Down Expand Up @@ -1180,7 +1180,7 @@ def test_patches_method_loop_quadrant(self) -> None:
assert all(df.columns == ["nmad", "nb_indep_patches", "exact_areas", "areas"])

# Check the sampling is fixed for a random state
assert df["nmad"][0] == pytest.approx(1.8697986129910111, abs=1e-3)
assert df["nmad"][0] == pytest.approx(1.8663623135417342, abs=1e-3)
assert df["nb_indep_patches"][0] == 100
assert df["exact_areas"][0] == pytest.approx(df["areas"][0], rel=0.2)

Expand All @@ -1189,7 +1189,7 @@ def test_patches_method_loop_quadrant(self) -> None:

# Check the sampling is always fixed for a random state
assert df_full["tile"].values[0] == "8_16"
assert df_full["nanmean"].values[0] == pytest.approx(0.24107581448842244, abs=1e-3)
assert df_full["nanmean"].values[0] == pytest.approx(0.24885657130475025, abs=1e-3)

# Check that all counts respect the default minimum percentage of 80% valid pixels
assert all(df_full["count"].values > 0.8 * np.max(df_full["count"].values))
Expand Down
6 changes: 3 additions & 3 deletions xdem/coreg.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,13 +188,13 @@ def residuals(parameters: tuple[float, float, float], y_values: NDArrayf, x_valu

# Estimate the a, b, and c parameters with least square minimisation
results = scipy.optimize.least_squares(
fun=residuals, x0=initial_guess, args=(y_medians, slice_bounds), xtol=1e-08, gtol=None, ftol=None
fun=residuals, x0=initial_guess, args=(y_medians, slice_bounds), xtol=1e-8, gtol=None, ftol=None
)

# Round results above the tolerance to get fixed results on different OS
a_parameter, b_parameter, c_parameter = results.x
a_parameter = np.round(a_parameter, 5)
b_parameter = np.round(b_parameter, 5)
a_parameter = np.round(a_parameter, 2)
b_parameter = np.round(b_parameter, 2)

# Calculate the easting and northing offsets from the above parameters
east_offset = a_parameter * np.sin(b_parameter)
Expand Down

0 comments on commit ecc7a9c

Please sign in to comment.