From 64f08b0fb3883d2d9e104e9b65d9a6bf2eca8986 Mon Sep 17 00:00:00 2001 From: Erik Schytt Mannerfelt <33550973+erikmannerfelt@users.noreply.github.com> Date: Thu, 22 Apr 2021 11:57:03 +0200 Subject: [PATCH] Improve documentation, add documentation testing, and make all examples into scripts. (#101) * Moved all example code to actual scripts. This allows testing that they work. * Added new required dev-requirements and tests to check that the docs are working. * Fixed docstring to work with the documentation better. * Added opencv to dev-requirements.txt * PATH fixes * Removed dynamic doc versioning for now (didn't work with CI) * Set cv2 full installation as requirement instead of lightweight one. * Fixed environment variables to hopefully work better with CI * Changed copy_tree function to work with python=3.7 --- dev-requirements.txt | 3 + docs/source/code/comparison.py | 92 +++++++ ...on_plot_local_hypsometric_interpolation.py | 58 ++++ ...plot_regional_hypsometric_interpolation.py | 58 ++++ .../comparison_plot_spatial_interpolation.py | 45 ++++ .../code/comparison_print_cumulative_dh.py | 11 + docs/source/code/coregistration.py | 89 ++++++ .../code/coregistration_plot_nuth_kaab.py | 41 +++ docs/source/comparison.rst | 253 ++---------------- docs/source/conf.py | 32 ++- docs/source/coregistration.rst | 128 ++------- tests/test_docs.py | 63 +++++ xdem/examples.py | 3 +- xdem/spstats.py | 4 +- 14 files changed, 526 insertions(+), 354 deletions(-) create mode 100644 docs/source/code/comparison.py create mode 100644 docs/source/code/comparison_plot_local_hypsometric_interpolation.py create mode 100644 docs/source/code/comparison_plot_regional_hypsometric_interpolation.py create mode 100644 docs/source/code/comparison_plot_spatial_interpolation.py create mode 100644 docs/source/code/comparison_print_cumulative_dh.py create mode 100644 docs/source/code/coregistration.py create mode 100644 docs/source/code/coregistration_plot_nuth_kaab.py create mode 100644 tests/test_docs.py diff --git a/dev-requirements.txt b/dev-requirements.txt index d6d212d1..c7772f90 100644 --- a/dev-requirements.txt +++ b/dev-requirements.txt @@ -1,6 +1,8 @@ sphinx numpydoc sphinx_rtd_theme +sphinx_autodoc_typehints +sphinxcontrib-programoutput numpy scipy rasterio @@ -10,3 +12,4 @@ tqdm git+https://github.com/GlacioHack/GeoUtils.git scikit-gstat flake8 +opencv-contrib-python diff --git a/docs/source/code/comparison.py b/docs/source/code/comparison.py new file mode 100644 index 00000000..b8b16dae --- /dev/null +++ b/docs/source/code/comparison.py @@ -0,0 +1,92 @@ +"""Example code for the DEM comparison chapter (it's made like this to test the syntax).""" +####################### +# SECTION: Example data +####################### +from datetime import datetime + +import geoutils as gu +import numpy as np + +import xdem + +# Download the necessary data. This may take a few minutes. +xdem.examples.download_longyearbyen_examples(overwrite=False) + +# Load a reference DEM from 2009 +dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"], datetime=datetime(2009, 8, 1)) +# Load a DEM from 1990 +dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"], datetime=datetime(1990, 8, 1)) +# Load glacier outlines from 1990. +glaciers_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) +glaciers_2010 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines_2010"]) + +# Make a dictionary of glacier outlines where the key represents the associated date. +outlines = { + datetime(1990, 8, 1): glaciers_1990, + datetime(2009, 8, 1): glaciers_2010, +} + +# Fake a future DEM to have a time-series of three DEMs +dem_2060 = dem_2009.copy() +# Assume that all glacier values will be 30 m lower than in 2009 +dem_2060.data[glaciers_2010.create_mask(dem_2060)] -= 30 +dem_2060.datetime = datetime(2060, 8, 1) + +############################## +# SECTION: Subtracting rasters +############################## + +dem1, dem2 = dem_2009, dem_1990 + +ddem_data = dem1.data - dem2.data +# If we want to inherit the georeferencing information: +ddem_raster = xdem.DEM.from_array(ddem_data, dem1.transform, dem2.crs) + +# TEXT HERE + +ddem_raster = xdem.spatial_tools.subtract_rasters(dem1, dem2) + +############################# +# SECTION: dDEM interpolation +############################# + +ddem = xdem.dDEM( + raster=xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990), + start_time=dem_1990.datetime, + end_time=dem_2009.datetime +) + +# The example DEMs are void-free, so let's make some random voids. +ddem.data.mask = np.zeros_like(ddem.data, dtype=bool) # Reset the mask +# Introduce 50000 nans randomly throughout the dDEM. +ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True + +# SUBSECTION: Linear spatial interpolation + +ddem.interpolate(method="linear") + +# SUBSECTION: Local hypsometric interpolation + +ddem.interpolate(method="local_hypsometric", reference_elevation=dem_2009, mask=glaciers_1990) + +# SUBSECTION: Regional hypsometric interpolation + +ddem.interpolate(method="regional_hypsometric", reference_elevation=dem_2009, mask=glaciers_1990) + +################################### +# SECTION: The DEMCollection object +################################### + +dems = xdem.DEMCollection( + [dem_1990, dem_2009, dem_2060], + outlines=outlines, + reference_dem=dem_2009 +) + +# TEXT HERE + +dems.subtract_dems() +dems.get_cumulative_series(kind="dh", outlines_filter="NAME == 'Scott Turnerbreen'") + +# Create an object that can be printed in the documentation. +scott_series = dems.get_cumulative_series(kind="dh", outlines_filter="NAME == 'Scott Turnerbreen'") diff --git a/docs/source/code/comparison_plot_local_hypsometric_interpolation.py b/docs/source/code/comparison_plot_local_hypsometric_interpolation.py new file mode 100644 index 00000000..604f55a1 --- /dev/null +++ b/docs/source/code/comparison_plot_local_hypsometric_interpolation.py @@ -0,0 +1,58 @@ +"""Plot an example of local hypsometric interpolation at Scott Turnerbreen, Svalbard.""" +import geoutils as gu +import matplotlib.pyplot as plt +import numpy as np + +import xdem + +xdem.examples.download_longyearbyen_examples(overwrite=False) + +dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) +dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) +outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) + +ddem = xdem.dDEM( + xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), + start_time=np.datetime64("1990-08-01"), + end_time=np.datetime64("2009-08-01") +) + +ddem.data /= (2009 - 1990) + +scott_1990 = outlines_1990.query("NAME == 'Scott Turnerbreen'") +mask = scott_1990.create_mask(ddem) + +ddem_bins = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask]) +stds = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask], aggregation_function=np.std) + +plt.figure(figsize=(8, 8)) +plt.grid(zorder=0) +plt.plot(ddem_bins["value"], ddem_bins.index.mid, linestyle="--", zorder=1) + +plt.barh( + y=ddem_bins.index.mid, + width=stds["value"], + left=ddem_bins["value"] - stds["value"] / 2, + height=(ddem_bins.index.left - ddem_bins.index.right) * 1, + zorder=2, + edgecolor="black", +) +for bin in ddem_bins.index: + plt.vlines(ddem_bins.loc[bin, "value"], bin.left, bin.right, color="black", zorder=3) + +plt.xlabel("Elevation change (m / a)") +plt.twiny() +plt.barh( + y=ddem_bins.index.mid, + width=ddem_bins["count"] / ddem_bins["count"].sum(), + left=0, + height=(ddem_bins.index.left - ddem_bins.index.right) * 1, + zorder=2, + alpha=0.2, +) +plt.xlabel("Normalized area distribution (hypsometry)") + +plt.ylabel("Elevation (m a.s.l.)") + +plt.tight_layout() +plt.show() diff --git a/docs/source/code/comparison_plot_regional_hypsometric_interpolation.py b/docs/source/code/comparison_plot_regional_hypsometric_interpolation.py new file mode 100644 index 00000000..03fc3ad6 --- /dev/null +++ b/docs/source/code/comparison_plot_regional_hypsometric_interpolation.py @@ -0,0 +1,58 @@ +"""Plot an example of regional hypsometric interpolation in central Svalbard.""" +import geoutils as gu +import matplotlib.pyplot as plt +import numpy as np + +import xdem + +xdem.examples.download_longyearbyen_examples(overwrite=False) + +dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) +dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) +outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) + +ddem = xdem.dDEM( + xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), + start_time=np.datetime64("1990-08-01"), + end_time=np.datetime64("2009-08-01") +) + +ddem.data /= (2009 - 1990) + +mask = outlines_1990.create_mask(ddem) + +ddem_bins = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask]) +stds = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask], aggregation_function=np.std) + +plt.figure(figsize=(8, 8)) +plt.grid(zorder=0) + + +plt.plot(ddem_bins["value"], ddem_bins.index.mid, linestyle="--", zorder=1) + +plt.barh( + y=ddem_bins.index.mid, + width=stds["value"], + left=ddem_bins["value"] - stds["value"] / 2, + height=(ddem_bins.index.left - ddem_bins.index.right) * 1, + zorder=2, + edgecolor="black", +) +for bin in ddem_bins.index: + plt.vlines(ddem_bins.loc[bin, "value"], bin.left, bin.right, color="black", zorder=3) + +plt.xlabel("Elevation change (m / a)") +plt.twiny() +plt.barh( + y=ddem_bins.index.mid, + width=ddem_bins["count"] / ddem_bins["count"].sum(), + left=0, + height=(ddem_bins.index.left - ddem_bins.index.right) * 1, + zorder=2, + alpha=0.2, +) +plt.xlabel("Normalized area distribution (hypsometry)") +plt.ylabel("Elevation (m a.s.l.)") + +plt.tight_layout() +plt.show() diff --git a/docs/source/code/comparison_plot_spatial_interpolation.py b/docs/source/code/comparison_plot_spatial_interpolation.py new file mode 100644 index 00000000..efbade02 --- /dev/null +++ b/docs/source/code/comparison_plot_spatial_interpolation.py @@ -0,0 +1,45 @@ +"""Plot an example of spatial interpolation of randomly generated errors.""" +import geoutils as gu +import matplotlib.pyplot as plt +import numpy as np + +import xdem + +xdem.examples.download_longyearbyen_examples(overwrite=False) + +dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) +dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) +outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) + +ddem = xdem.dDEM( + xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), + start_time=np.datetime64("1990-08-01"), + end_time=np.datetime64("2009-08-01") +) +# The example DEMs are void-free, so let's make some random voids. +ddem.data.mask = np.zeros_like(ddem.data, dtype=bool) # Reset the mask +# Introduce 50000 nans randomly throughout the dDEM. +ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True + +ddem.interpolate(method="linear") + +ylim = (300, 100) +xlim = (800, 1050) + +plt.figure(figsize=(8, 5)) +plt.subplot(121) +plt.imshow(ddem.data.squeeze(), cmap="coolwarm_r", vmin=-50, vmax=50) +plt.ylim(ylim) +plt.xlim(xlim) +plt.axis("off") +plt.title("dDEM with random voids") +plt.subplot(122) +plt.imshow(ddem.filled_data.squeeze(), cmap="coolwarm_r", vmin=-50, vmax=50) +plt.ylim(ylim) +plt.xlim(xlim) +plt.axis("off") +plt.title("Linearly interpolated dDEM") + + +plt.tight_layout() +plt.show() diff --git a/docs/source/code/comparison_print_cumulative_dh.py b/docs/source/code/comparison_print_cumulative_dh.py new file mode 100644 index 00000000..7f29f1f4 --- /dev/null +++ b/docs/source/code/comparison_print_cumulative_dh.py @@ -0,0 +1,11 @@ +"""Print the Scott Turnerbreen dH series.""" +import contextlib +import io +import sys + +sys.path.insert(0, "code/") # The base directory is source/, so to find comparison.py, it has to be source/code/ + +with contextlib.redirect_stdout(io.StringIO()): # Import the script without printing anything. + import comparison + +print(comparison.scott_series) diff --git a/docs/source/code/coregistration.py b/docs/source/code/coregistration.py new file mode 100644 index 00000000..763df13d --- /dev/null +++ b/docs/source/code/coregistration.py @@ -0,0 +1,89 @@ +"""Example code for the DEM coregistration chapter (it's made like this to test the syntax). """ +####################### +# SECTION: Example data +####################### +import geoutils as gu +import numpy as np + +import xdem +from xdem import coreg + +# Download the necessary data. This may take a few minutes. +xdem.examples.download_longyearbyen_examples(overwrite=False) + +# Load the data using xdem and geoutils (could be with rasterio and geopandas instead) +# Load a reference DEM from 2009 +reference_dem = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) +# Load a moderately well aligned DEM from 1990 +dem_to_be_aligned = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]).reproject(reference_dem, silent=True) +# Load glacier outlines from 1990. This will act as the unstable ground. +glacier_outlines = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) + +# Prepare the inputs for coregistration. +ref_data = reference_dem.data.squeeze() # This is a numpy 2D array/masked_array +tba_data = dem_to_be_aligned.data.squeeze() # This is a numpy 2D array/masked_array +# This is a boolean numpy 2D array. Note the bitwise not (~) symbol +inlier_mask = ~glacier_outlines.create_mask(reference_dem) +transform = reference_dem.transform # This is a rio.transform.Affine object. + +######################## +# SECTION: Nuth and Kääb +######################## + +nuth_kaab = coreg.NuthKaab() +# Fit the data to a suitable x/y/z offset. +nuth_kaab.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) + +# Apply the transformation to the data (or any other data) +aligned_dem = nuth_kaab.apply(tba_data, transform=transform) + +#################### +# SECTION: Deramping +#################### + +# Instantiate a 1st order deramping object. +deramp = coreg.Deramp(degree=1) +# Fit the data to a suitable polynomial solution. +deramp.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) + +# Apply the transformation to the data (or any other data) +deramped_dem = deramp.apply(dem_to_be_aligned.data, transform=dem_to_be_aligned.transform) + +########################## +# SECTION: Bias correction +########################## + +bias_corr = coreg.BiasCorr() +# Note that the transform argument is not needed, since it is a simple vertical correction. +bias_corr.fit(ref_data, tba_data, inlier_mask=inlier_mask) + +# Apply the bias to a DEM +corrected_dem = bias_corr.apply(tba_data, transform=None) # The transform does not need to be given for bias + +# Use median bias instead +bias_median = coreg.BiasCorr(bias_func=np.median) + +# bias_median.fit(... # etc. + +############## +# SECTION: ICP +############## + +# Instantiate the object with default parameters +icp = coreg.ICP() +# Fit the data to a suitable transformation. +icp.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) + +# Apply the transformation matrix to the data (or any other data) +aligned_dem = icp.apply(tba_data, transform=transform) + +################### +# SECTION: Pipeline +################### + +pipeline = coreg.CoregPipeline([coreg.BiasCorr(), coreg.ICP()]) + +# pipeline.fit(... # etc. + +# This works identically to the syntax above +pipeline2 = coreg.BiasCorr() + coreg.ICP() diff --git a/docs/source/code/coregistration_plot_nuth_kaab.py b/docs/source/code/coregistration_plot_nuth_kaab.py new file mode 100644 index 00000000..fcaa9045 --- /dev/null +++ b/docs/source/code/coregistration_plot_nuth_kaab.py @@ -0,0 +1,41 @@ +"""Plot the comparison between a dDEM before and after Nuth and Kääb (2011) coregistration.""" +import geoutils as gu +import matplotlib.pyplot as plt +import numpy as np + +import xdem + +xdem.examples.download_longyearbyen_examples(overwrite=False) + +dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) +dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) +outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) +inlier_mask = ~outlines_1990.create_mask(dem_2009) + +nuth_kaab = xdem.coreg.NuthKaab() +nuth_kaab.fit(dem_2009.data, dem_1990.data, transform=dem_2009.transform, inlier_mask=inlier_mask) +dem_coreg = nuth_kaab.apply(dem_1990.data, transform=dem_1990.transform) + +ddem_pre = (dem_2009.data - dem_1990.data).filled(np.nan).squeeze() +ddem_post = (dem_2009.data - dem_coreg).filled(np.nan).squeeze() + +nmad_pre = xdem.spatial_tools.nmad(ddem_pre[inlier_mask.squeeze()]) +nmad_post = xdem.spatial_tools.nmad(ddem_post[inlier_mask.squeeze()]) + +vlim = 20 +plt.figure(figsize=(8, 5)) +plt.subplot2grid((1, 15), (0, 0), colspan=7) +plt.title(f"Before coregistration. NMAD={nmad_pre:.1f} m") +plt.imshow(ddem_pre, cmap="coolwarm_r", vmin=-vlim, vmax=vlim) +plt.axis("off") +plt.subplot2grid((1, 15), (0, 7), colspan=7) +plt.title(f"After coregistration. NMAD={nmad_post:.1f} m") +img = plt.imshow(ddem_post, cmap="coolwarm_r", vmin=-vlim, vmax=vlim) +plt.axis("off") +plt.subplot2grid((1, 15), (0, 14), colspan=1) +cbar = plt.colorbar(img, fraction=0.4) +cbar.set_label("Elevation change (m)") +plt.axis("off") + +plt.tight_layout() +plt.show() diff --git a/docs/source/comparison.rst b/docs/source/comparison.rst index 74703b33..a712a6db 100644 --- a/docs/source/comparison.rst +++ b/docs/source/comparison.rst @@ -9,35 +9,9 @@ DEM subtraction and volume change Example data in this chapter are loaded as follows: -.. code-block:: python +.. literalinclude:: code/comparison.py + :lines: 5-33 - from datetime import datetime - - import geoutils as gu - import xdem - - # Download the necessary data. This may take a few minutes. - xdem.examples.download_longyearbyen_examples(overwrite=False) - - # Load a reference DEM from 2009 - dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"], datetime=datetime(2009, 8, 1)) - # Load a DEM from 1990 - dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"], datetime=datetime(1990, 8, 1)) - # Load glacier outlines from 1990. - glaciers_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - glaciers_2010 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines_2010"]) - - # Make a dictionary of glacier outlines where the key represents the associated date. - outlines = { - datetime(1990, 8, 1): glaciers_1990, - datetime(2009, 8, 1): glaciers_2010 - } - - # Fake a future DEM to have a time-series of three DEMs - dem_2060 = dem_2009.copy() - # Assume that all glacier values will be 30 m lower than in 2009 - dem_2060.data[glaciers_2010.create_mask(dem_2060) == 255] -= 30 - dem_2060.datetime.year = 2060 Subtracting rasters ^^^^^^^^^^^^^^^^^^^ @@ -47,18 +21,15 @@ In ``xdem``, the aim is to minimize or completely remove potential pitfalls in t Let's assume we have two perfectly aligned DEMs, with the same shape, extent, resolution and coordinate referencesystem (CRS) as each other. Calculating a dDEM would then be as simple as: -.. code-block:: python - - ddem_data = dem1.data - dem2.data - # If we want to inherit the georeferencing information: - ddem_raster = xdem.DEM.from_array(ddem_data, dem1.transform, dem1.crs) +.. literalinclude:: code/comparison.py + :lines: 39-43 But in practice, our two DEMs are most often not perfectly aligned, which is why we might need a helper function for this: :func:`xdem.spatial_tools.subtract_rasters` -.. code-block:: python +.. literalinclude:: code/comparison.py + :lines: 47 - ddem_raster = xdem.spatial_tools.subtract_rasters(dem1, dem2) So what does this magical function do? First, the nonreference; ``dem2``, will be reprojected to fit the shape, extent, resolution and CRS of ``dem1``. @@ -80,74 +51,20 @@ So far, ``xdem`` has three types of interpolation: Let's first create a :class:`xdem.ddem.dDEM` object to experiment on: -.. code-block:: python - - ddem = xdem.dDEM( - raster=xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990), - start_time=dem_1990.datetime, - end_time=dem_2009.datetime - ) - - # The example DEMs are void-free, so let's make some random voids. - ddem.data.mask = np.zeros_like(ddem.data, dtype=bool) # Reset the mask - # Introduce 50000 nans randomly throughout the dDEM. - ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True - +.. literalinclude:: code/comparison.py + :lines: 53-62 Linear spatial interpolation **************************** Linear spatial interpolation (also often called bilinear interpolation) of dDEMs is arguably the simplest approach: voids are filled by a an average of the surrounding pixels values, weighted by their distance to the void pixel. -.. code-block:: python - ddem.interpolate(method="linear") - - -.. plot:: - - import xdem - import numpy as np - import geoutils as gu - - xdem.examples.download_longyearbyen_examples(overwrite=False) - - dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) - dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) - outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - - ddem = xdem.dDEM( - xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), - start_time=np.datetime64("1990-08-01"), - end_time=np.datetime64("2009-08-01") - ) - # The example DEMs are void-free, so let's make some random voids. - ddem.data.mask = np.zeros_like(ddem.data, dtype=bool) # Reset the mask - # Introduce 50000 nans randomly throughout the dDEM. - ddem.data.mask.ravel()[np.random.choice(ddem.data.size, 50000, replace=False)] = True - - ddem.interpolate(method="linear") - - ylim = (300, 100) - xlim = (800, 1050) - - plt.figure(figsize=(8, 5)) - plt.subplot(121) - plt.imshow(ddem.data.squeeze(), cmap="coolwarm_r", vmin=-50, vmax=50) - plt.ylim(ylim) - plt.xlim(xlim) - plt.axis("off") - plt.title("dDEM with random voids") - plt.subplot(122) - plt.imshow(ddem.filled_data.squeeze(), cmap="coolwarm_r", vmin=-50, vmax=50) - plt.ylim(ylim) - plt.xlim(xlim) - plt.axis("off") - plt.title("Linearly interpolated dDEM") +.. literalinclude:: code/comparison.py + :lines: 66 +.. plot:: code/comparison_plot_spatial_interpolation.py - plt.tight_layout() - plt.show() Local hypsometric interpolation ******************************* @@ -158,69 +75,12 @@ With the local (glacier specific) hypsometric approach, elevation change gradien This is simply a linear or polynomial model estimated with the dDEM and a reference DEM. Then, voids are interpolated by replacing them with what "should be there" at that elevation, according to the model. -.. code-block:: python - - ddem.interpolate(method="local_hypsometric", reference_elevation=dem_2009, mask=outlines_1990) +.. literalinclude:: code/comparison.py + :lines: 70 -.. plot:: - - import xdem - import geoutils as gu - import numpy as np - import matplotlib.pyplot as plt - - xdem.examples.download_longyearbyen_examples(overwrite=False) - - dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) - dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) - outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - - ddem = xdem.dDEM( - xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), - start_time=np.datetime64("1990-08-01"), - end_time=np.datetime64("2009-08-01") - ) - - ddem.data /= (2009 - 1990) - - scott_1990 = outlines_1990.query("NAME == 'Scott Turnerbreen'") - mask = (scott_1990.create_mask(ddem) == 255).reshape(ddem.data.shape) - - ddem_bins = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask]) - stds = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask], aggregation_function=np.std) - - plt.figure(figsize=(8, 8)) - plt.grid(zorder=0) - plt.plot(ddem_bins["value"], ddem_bins.index.mid, linestyle="--", zorder=1) - - plt.barh( - y=ddem_bins.index.mid, - width=stds["value"], - left=ddem_bins["value"] - stds["value"] / 2, - height=(ddem_bins.index.left - ddem_bins.index.right) * 1, - zorder=2, - edgecolor="black", - ) - for bin in ddem_bins.index: - plt.vlines(ddem_bins.loc[bin, "value"], bin.left, bin.right, color="black", zorder=3) - - plt.xlabel("Elevation change (m / a)") - plt.twiny() - plt.barh( - y=ddem_bins.index.mid, - width=ddem_bins["count"] / ddem_bins["count"].sum(), - left=0, - height=(ddem_bins.index.left - ddem_bins.index.right) * 1, - zorder=2, - alpha=0.2, - ) - plt.xlabel("Normalized area distribution (hypsometry)") +.. plot:: code/comparison_plot_local_hypsometric_interpolation.py - plt.ylabel("Elevation (m a.s.l.)") - - plt.tight_layout() - plt.show() *Caption: The elevation dependent elevation change of Scott Turnerbreen on Svalbard from 1990--2009. The width of the bars indicate the standard devation of the bin. The light blue background bars show the area distribution with elevation.* @@ -232,70 +92,12 @@ With the regional approach (often also called "global"), elevation change gradie This is advantageous in respect to areas where voids are frequent, as not even a single dDEM value has to exist on a glacier in order to reconstruct it. Of course, the accuracy of such an averaging is much lower than if the local hypsometric approach is used (assuming it is possible). -.. code-block:: python - - ddem.interpolate(method="regional_hypsometric", reference_elevation=dem_2009, mask=outlines_1990) - -.. plot:: - - import xdem - import geoutils as gu - import numpy as np - import matplotlib.pyplot as plt - - xdem.examples.download_longyearbyen_examples(overwrite=False) - - dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) - dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) - outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - - ddem = xdem.dDEM( - xdem.spatial_tools.subtract_rasters(dem_2009, dem_1990, resampling_method="nearest"), - start_time=np.datetime64("1990-08-01"), - end_time=np.datetime64("2009-08-01") - ) - - ddem.data /= (2009 - 1990) - - mask = (outlines_1990.create_mask(ddem) == 255).reshape(ddem.data.shape) - - ddem_bins = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask]) - stds = xdem.volume.hypsometric_binning(ddem.data[mask], dem_2009.data[mask], aggregation_function=np.std) - - plt.figure(figsize=(8, 8)) - plt.grid(zorder=0) +.. literalinclude:: code/comparison.py + :lines: 74 +.. plot:: code/comparison_plot_regional_hypsometric_interpolation.py - plt.plot(ddem_bins["value"], ddem_bins.index.mid, linestyle="--", zorder=1) - - plt.barh( - y=ddem_bins.index.mid, - width=stds["value"], - left=ddem_bins["value"] - stds["value"] / 2, - height=(ddem_bins.index.left - ddem_bins.index.right) * 1, - zorder=2, - edgecolor="black", - ) - for bin in ddem_bins.index: - plt.vlines(ddem_bins.loc[bin, "value"], bin.left, bin.right, color="black", zorder=3) - - plt.xlabel("Elevation change (m / a)") - plt.twiny() - plt.barh( - y=ddem_bins.index.mid, - width=ddem_bins["count"] / ddem_bins["count"].sum(), - left=0, - height=(ddem_bins.index.left - ddem_bins.index.right) * 1, - zorder=2, - alpha=0.2, - ) - plt.xlabel("Normalized area distribution (hypsometry)") - plt.ylabel("Elevation (m a.s.l.)") - - plt.tight_layout() - plt.show() - *Caption: The regional elevation dependent elevation change in central Svalbard from 1990--2009. The width of the bars indicate the standard devation of the bin. The light blue background bars show the area distribution with elevation.* The DEMCollection object @@ -308,27 +110,18 @@ Multiple outlines are provided as a dictionary in the shape of ``{datetime: outl In the examples, we have three DEMs and glacier outlines with known dates, so we can create a collection from them: -.. code-block:: python - dems = xdem.DEMCollection( - [dem_1990, dem_2009, dem_2060], - outlines=outlines, - reference_dem=dem_2009 - ) +.. literalinclude:: code/comparison.py + :lines: 80-84 Now, we can easily calculate the elevation or volume change between the DEMs, for example on the glacier Scott Turnerbreen: -.. code-block:: python - - dems.get_cumulative_series(kind="dh", outline_filter="NAME == 'Scott Turnerbreen'") +.. literalinclude:: code/comparison.py + :lines: 88-89 which will return a Pandas Series: -.. code-block:: python - - 1990-08-01 0.000000 - 2009-08-01 -13.379259 - 2060-08-01 -43.379259 - dtype: float64 +.. program-output:: $PYTHON code/comparison_print_cumulative_dh.py + :shell: `See here for the outline filtering syntax `_. diff --git a/docs/source/conf.py b/docs/source/conf.py index cae4a0aa..d42fc0ff 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -13,6 +13,8 @@ import os import sys +#import xdem.version + # Allow conf.py to find the xdem module sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../../")) # -- Project information ----------------------------------------------------- @@ -23,7 +25,10 @@ author = 'xdem contributors' # The full version, including alpha/beta/rc tags -release = '0.0.1' +release = "0.0.1" + + +os.environ["PYTHON"] = sys.executable # -- General configuration --------------------------------------------------- @@ -32,13 +37,13 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.viewcode", - 'matplotlib.sphinxext.plot_directive', - "numpydoc", - "sphinx.ext.autosummary", - "sphinx.ext.doctest", - "sphinx.ext.inheritance_diagram" + "sphinx.ext.autodoc", # Create the API documentation automatically + "sphinx.ext.viewcode", # Create the "[source]" button in the API to show the source code. + 'matplotlib.sphinxext.plot_directive', # Render matplotlib figures from code. + "sphinx.ext.autosummary", # Create API doc summary texts from the docstrings. + "sphinx.ext.inheritance_diagram", # For class inheritance diagrams (see coregistration.rst). + "sphinx_autodoc_typehints", # Include type hints in the API documentation. + "sphinxcontrib.programoutput" ] # Add any paths that contain templates here, relative to this directory. @@ -47,7 +52,9 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. -exclude_patterns = [] +exclude_patterns = [ + "api/modules.rst" # This is not really needed, but is created automatically by autodoc +] # -- Options for HTML output ------------------------------------------------- @@ -59,10 +66,15 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +# html_static_path = ['_static'] # Commented out as we have no custom static data def run_apidoc(_): + """ + Make sure readthedocs finds the module. + + Maybe this is not needed? + """ from sphinx.ext.apidoc import main import os import sys diff --git a/docs/source/coregistration.rst b/docs/source/coregistration.rst index 3027cc62..4ca35674 100644 --- a/docs/source/coregistration.rst +++ b/docs/source/coregistration.rst @@ -26,30 +26,8 @@ Below is a summary of how each method works, and when it should (and should not) Examples are given using data close to Longyearbyen on Svalbard. These can be loaded as: -.. code-block:: python - - import geoutils as gu - import xdem - - # Download the necessary data. This may take a few minutes. - xdem.examples.download_longyearbyen_examples(overwrite=False) - - ### Load the data using xdem and geoutils (could be with rasterio and geopandas instead) - # Load a reference DEM from 2009 - reference_dem = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) - # Load a moderately well aligned DEM from 1990 - dem_to_be_aligned = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]).resample(reference_dem) - # Load glacier outlines from 1990. This will act as the unstable ground. - glacier_outlines = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - - - # Prepare the inputs for coregistration. - ref_data = reference_dem.data.squeeze() # This is a numpy 2D array/masked_array - tba_data = dem_to_be_aligned.data.squeeze() # This is a numpy 2D array/masked_array - inlier_mask = ~glacier_outlines.create_mask(reference_dem) # This is a boolean numpy 2D array. Note the bitwise not (~) symbol - transform = reference_dem.transform # This is a rio.transform.Affine object. - - +.. literalinclude:: code/coregistration.py + :lines: 5-27 The Coreg object ^^^^^^^^^^^^^^^^^^^^ @@ -66,6 +44,9 @@ Each coregistration approach has the methods: First, ``.fit()`` is called to estimate the transform, and then this transform can be used or exported using the subsequent methods. +.. inheritance-diagram:: xdem.coreg + :top-classes: xdem.coreg.Coreg + Nuth and Kääb (2011) ^^^^^^^^^^^^^^^^^^^^ :class:`xdem.coreg.NuthKaab` @@ -82,66 +63,22 @@ A cosine function is solved using these products to find the most probable offse This is an iterative process, and cosine functions with suggested shifts are applied in a loop, continuously refining the total offset. The loop is stopped either when the maximum iteration limit is reached, or when the :ref:`spatial_stats_nmad` between the two products stops improving significantly. -.. plot:: - - import xdem - import geoutils as gu - import matplotlib.pyplot as plt - - xdem.examples.download_longyearbyen_examples(overwrite=False) - - dem_2009 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_ref_dem"]) - dem_1990 = xdem.DEM(xdem.examples.FILEPATHS["longyearbyen_tba_dem"]) - outlines_1990 = gu.Vector(xdem.examples.FILEPATHS["longyearbyen_glacier_outlines"]) - inlier_mask = ~outlines_1990.create_mask(dem_2009) - - nuth_kaab = xdem.coreg.NuthKaab() - nuth_kaab.fit(dem_2009.data, dem_1990.data, transform=dem_2009.transform, inlier_mask=inlier_mask) - dem_coreg = nuth_kaab.apply(dem_1990.data, transform=dem_1990.transform) - - ddem_pre = (dem_2009.data - dem_1990.data).filled(np.nan).squeeze() - ddem_post = (dem_2009.data - dem_coreg).filled(np.nan).squeeze() - - nmad_pre = xdem.spatial_tools.nmad(ddem_pre[inlier_mask.squeeze()]) - nmad_post = xdem.spatial_tools.nmad(ddem_post[inlier_mask.squeeze()]) - - vlim = 20 - plt.figure(figsize=(8, 5)) - plt.subplot2grid((1, 15), (0, 0), colspan=7) - plt.title(f"Before coregistration. NMAD={nmad_pre:.1f} m") - plt.imshow(ddem_pre, cmap="coolwarm_r", vmin=-vlim, vmax=vlim) - plt.axis("off") - plt.subplot2grid((1, 15), (0, 7), colspan=7) - plt.title(f"After coregistration. NMAD={nmad_post:.1f} m") - img = plt.imshow(ddem_post, cmap="coolwarm_r", vmin=-vlim, vmax=vlim) - plt.axis("off") - plt.subplot2grid((1, 15), (0, 14), colspan=1) - cbar = plt.colorbar(img, fraction=0.4) - cbar.set_label("Elevation change (m)") - plt.axis("off") - - plt.tight_layout() - plt.show() +.. plot:: code/coregistration_plot_nuth_kaab.py *Caption: Demonstration of the Nuth and Kääb (2011) approach from Svalbard. Note that large improvements are seen, but nonlinear offsets still exist. The NMAD is calculated from the off-glacier surfaces.* Limitations *********** -The Nuth and Kääb (2011) coregistation approach does not take rotation into account. +The Nuth and Kääb (2011) coregistration approach does not take rotation into account. Rotational corrections are often needed on for example satellite derived DEMs, so a complementary tool is required for a perfect fit. 1st or higher degree `Deramping`_ can be used for small rotational corrections. For large rotations, the Nuth and Kääb (2011) approach will not work properly, and `ICP`_ is recommended instead. Example ******* -.. code-block:: python - - nuth_kaab = coreg.NuthKaab() - # Fit the data to a suitable x/y/z offset. - nuth_kaab.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) - # Apply the transformation to the data (or any other data) - aligned_dem = nuth_kaab.apply(tba_data, transform=transform) +.. literalinclude:: code/coregistration.py + :lines: 33-38 Deramping ^^^^^^^^^ @@ -164,15 +101,9 @@ For large rotational corrections, `ICP`_ is recommended. Example ******* -.. code-block:: python - # Instantiate a 1st order deramping object. - deramp = coreg.Deramp(degree=1) - # Fit the data to a suitable polynomial solution. - deramp.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) - - # Apply the transformation to the data (or any other data) - deramped_dem = deramp.apply(dem_to_be_aligned.data, transform=dem_to_be_aligned.transform) +.. literalinclude:: code/coregistration.py + :lines: 44-50 Bias correction @@ -193,19 +124,8 @@ Only performs vertical corrections, so it should be combined with another approa Example ******* -.. code-block:: python - - bias_corr = coreg.BiasCorr() - # Note that the transform argument is not needed, since it is a simple vertical correction. - bias_corr.fit(ref_data, tba_data, inlier_mask=inlier_mask) - - # Apply the bias to a DEM - corrected_dem = bias_corr.apply(tba_data) - - # Use median bias instead - bias_median = coreg.BiasCorr(bias_func=np.median) - - bias_median.fit(... # etc. +.. literalinclude:: code/coregistration.py + :lines: 56-66 ICP ^^^ @@ -234,16 +154,8 @@ Due to the repeated nearest neighbour calculations, ICP is often the slowest cor Example ******* -.. code-block:: python - - # Instantiate the object with default parameters - icp = coreg.ICP() - # Fit the data to a suitable transformation. - icp.fit(ref_data, tba_data, transform=transform, inlier_mask=inlier_mask) - - # Apply the transformation matrix to the data (or any other data) - aligned_dem = icp.apply(tba_data, transform=transform) - +.. literalinclude:: code/coregistration.py + :lines: 72-78 The CoregPipeline object ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -252,14 +164,8 @@ The CoregPipeline object Often, more than one coregistration approach is necessary to obtain the best results. For example, ICP works poorly with large initial biases, so a ``CoregPipeline`` can be constructed to perform both sequentially: -.. code-block:: python - - pipeline = coreg.CoregPipeline([coreg.BiasCorr(), coreg.ICP()]) - - pipeline.fit(... # etc. - - # This works identically to the syntax above - pipeline2 = coreg.BiasCorr() + coreg.ICP() +.. literalinclude:: code/coregistration.py + :lines: 84-89 The ``CoregPipeline`` object exposes the same interface as the ``Coreg`` object. The results of a pipeline can be used in other programs by exporting the combined transformation matrix: diff --git a/tests/test_docs.py b/tests/test_docs.py new file mode 100644 index 00000000..0b0b1d8c --- /dev/null +++ b/tests/test_docs.py @@ -0,0 +1,63 @@ +import os +import shutil +import subprocess +import sys +import warnings + + +class TestDocs: + docs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "../", "docs/") + + def test_example_code(self): + """Try running each python script in the docs/source/code\ + directory and check that it doesn't raise an error.""" + current_dir = os.getcwd() + os.chdir(os.path.join(self.docs_dir, "source")) + + # Copy the environment and unset the DISPLAY variable to hide matplotlib plots. + env = os.environ.copy() + env["DISPLAY"] = "" + + for filename in os.listdir("code/"): + if not filename.endswith(".py"): + continue + print(f"Running {os.path.join(os.getcwd(), 'code/', filename)}") + subprocess.run([sys.executable, f"code/{filename}"], check=True, env=env) + + os.chdir(current_dir) + + def test_build(self): + """Try building the docs and see if it works.""" + current_dir = os.getcwd() + # Change into the docs directory. + os.chdir(self.docs_dir) + + # Remove the build directory if it exists. + if os.path.isdir("build/"): + shutil.rmtree("build/") + + # Copy the environment and set the SPHINXBUILD variable to call the module. + # This is for it to work properly with GitHub Workflows + env = os.environ.copy() + env["SPHINXBUILD"] = f"{sys.executable} -m sphinx" + + # Run the makefile + build_commands = ["make", "html"] + result = subprocess.run( + build_commands, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + encoding="utf-8", + env=env + ) + + # Raise an error if the string "error" is in the stderr. + if "error" in str(result.stderr).lower(): + raise RuntimeError(result.stderr) + + # If "error" is not in the stderr string but it exists, show it as a warning. + if len(result.stderr) > 0: + warnings.warn(result.stderr) + + os.chdir(current_dir) diff --git a/xdem/examples.py b/xdem/examples.py index 2d411f8a..94b377d9 100644 --- a/xdem/examples.py +++ b/xdem/examples.py @@ -4,6 +4,7 @@ import tarfile import tempfile import urllib.request +from distutils.dir_util import copy_tree EXAMPLES_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), "../", "examples/")) # Absolute filepaths to the example files. @@ -60,4 +61,4 @@ def download_longyearbyen_examples(overwrite: bool = False): ) # Copy the data to the examples directory. - shutil.copytree(dir_name, os.path.join(EXAMPLES_DIRECTORY, "Longyearbyen/data"), dirs_exist_ok=True) + copy_tree(dir_name, os.path.join(EXAMPLES_DIRECTORY, "Longyearbyen/data")) diff --git a/xdem/spstats.py b/xdem/spstats.py index 22b5ee8c..018e1945 100644 --- a/xdem/spstats.py +++ b/xdem/spstats.py @@ -97,12 +97,12 @@ def sample_multirange_empirical_variogram(dh: np.ndarray, gsd: float = None, coo **kwargs) -> pd.DataFrame: """ Wrapper to sample multi-range empirical variograms from the data. + If no option is passed, a varying binning is used with adapted ranges and data subsampling :param dh: elevation differences :param gsd: ground sampling distance (if array is 2D on structured grid) - :param coords: coordinates, to be used only with a flattened elevation differences array and passed as an array of - the pairs of coordinates: one dimension equal to two and the other to that of the flattened elevation differences + :param coords: coordinates, to be used only with a flattened elevation differences array and passed as an array of \the pairs of coordinates: one dimension equal to two and the other to that of the flattened elevation differences :param range_list: successive ranges with even binning :param nsamp: number of samples to randomly draw from the elevation differences :param nrun: number of samplings