diff --git a/.github/workflows/build-and-test.yml b/.github/workflows/build-and-test.yml index a15058a679..86b002cf36 100644 --- a/.github/workflows/build-and-test.yml +++ b/.github/workflows/build-and-test.yml @@ -28,10 +28,10 @@ jobs: name: Check for syntax errors runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Set up python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }} @@ -46,13 +46,13 @@ jobs: # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide python -m flake8 src --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - check-history-rst-syntax: - name: Check HISTORY RST syntax + check-rst-syntax: + name: Check RST syntax runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 name: Set up python with: python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }} @@ -63,7 +63,7 @@ jobs: - name: Lint with doc8 run: | # Skip line-too-long errors (D001) - python -m doc8 --ignore D001 HISTORY.rst + python -m doc8 --ignore D001 HISTORY.rst README_PYTHON.rst run-model-tests: name: Run model tests @@ -75,7 +75,7 @@ jobs: python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] os: [windows-latest, macos-13] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning @@ -127,13 +127,13 @@ jobs: run: make test - name: Upload wheel artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Wheel for ${{ matrix.os }} ${{ matrix.python-version }} path: dist - name: Upload conda env artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 continue-on-error: true with: name: Conda Env for ${{ matrix.os }} ${{ matrix.python-version }} @@ -141,13 +141,13 @@ jobs: - name: Authenticate GCP if: github.event_name != 'pull_request' - uses: google-github-actions/auth@v0 + uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }} - name: Set up GCP if: github.event_name != 'pull_request' - uses: google-github-actions/setup-gcloud@v0 + uses: google-github-actions/setup-gcloud@v2 - name: Deploy artifacts to GCS if: github.event_name != 'pull_request' @@ -162,7 +162,7 @@ jobs: python-version: [3.8, 3.9, "3.10", "3.11", "3.12"] os: [windows-latest, macos-13] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning @@ -170,7 +170,7 @@ jobs: with: python-version: ${{ matrix.python-version }} requirements-files: requirements.txt - requirements: ${{ env.CONDA_DEFAULT_DEPENDENCIES }} + requirements: ${{ env.CONDA_DEFAULT_DEPENDENCIES }} twine - name: Build source distribution run: | @@ -193,9 +193,12 @@ jobs: # natcap.invest from source and that it imports. python -c "from natcap.invest import *" - - uses: actions/upload-artifact@v3 + - name: Check long description with twine + run: twine check $(find dist -name "natcap[._-]invest*") + + - uses: actions/upload-artifact@v4 with: - name: Source distribution + name: Source distribution ${{ matrix.os }} ${{ matrix.python-version }} path: dist # Secrets not available in PR so don't use GCP. @@ -204,13 +207,13 @@ jobs: # different extensions) - name: Authenticate GCP if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION - uses: google-github-actions/auth@v0 + uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }} - name: Set up GCP if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION - uses: google-github-actions/setup-gcloud@v0 + uses: google-github-actions/setup-gcloud@v2 - name: Deploy artifacts to GCS if: github.event_name != 'pull_request' && matrix.os == 'macos-13' && matrix.python-version == env.LATEST_SUPPORTED_PYTHON_VERSION @@ -221,7 +224,7 @@ jobs: runs-on: windows-latest needs: check-syntax-errors steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning @@ -252,7 +255,7 @@ jobs: steps: - name: Check out repo - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning @@ -309,7 +312,7 @@ jobs: workspace-path: ${{ github.workspace }} binary-extension: exe steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning @@ -383,13 +386,13 @@ jobs: - name: Authenticate GCP if: github.event_name != 'pull_request' - uses: google-github-actions/auth@v0 + uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }} - name: Set up GCP if: github.event_name != 'pull_request' - uses: google-github-actions/setup-gcloud@v0 + uses: google-github-actions/setup-gcloud@v2 - name: Build Workbench (PRs) if: github.event_name == 'pull_request' @@ -458,20 +461,20 @@ jobs: - name: Upload workbench binary artifact if: always() - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: Workbench-${{ runner.os }}-binary path: workbench/dist/*.${{ matrix.binary-extension }} - name: Upload user's guide artifact (Windows) if: matrix.os == 'windows-latest' - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: InVEST-user-guide path: dist/InVEST_*_userguide.zip - name: Upload workbench logging from puppeteer - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: ${{ runner.os }}_puppeteer_log.zip' @@ -490,7 +493,7 @@ jobs: - name: Upload workspace on failure if: ${{ failure() }} - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: InVEST-failed-${{ runner.os }}-workspace path: ${{ matrix.workspace-path}} @@ -500,11 +503,11 @@ jobs: needs: check-syntax-errors runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 # Fetch complete history for accurate versioning - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: ${{ env.LATEST_SUPPORTED_PYTHON_VERSION }} @@ -515,20 +518,20 @@ jobs: - run: make sampledata sampledata_single - name: Upload sample data artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: InVEST-sample-data path: dist/*.zip - name: Authenticate GCP if: github.event_name != 'pull_request' - uses: google-github-actions/auth@v0 + uses: google-github-actions/auth@v2 with: credentials_json: ${{ secrets.GOOGLE_SERVICE_ACC_KEY }} - name: Set up GCP if: github.event_name != 'pull_request' - uses: google-github-actions/setup-gcloud@v0 + uses: google-github-actions/setup-gcloud@v2 - name: Deploy artifacts to GCS if: github.event_name != 'pull_request' diff --git a/HISTORY.rst b/HISTORY.rst index 47406cd82c..a8bcf05148 100644 --- a/HISTORY.rst +++ b/HISTORY.rst @@ -37,8 +37,41 @@ Unreleased Changes ------------------ +* General + * InVEST has been updated to build against numpy 2. + https://github.com/natcap/invest/issues/1641 + * Updating validation to handle a change in exceptions raised by GDAL in + ``pygeoprocessing.get_raster_info`` and + ``pygeoprocessing.get_vector_info``. + https://github.com/natcap/invest/issues/1645 +* Forest Carbon Edge Effects + * Updating vector reprojection to allow partial reprojection. Related to + https://github.com/natcap/invest/issues/1645 +* Urban Nature Access + * The model now works as expected when the user provides an LULC raster + that does not have a nodata value defined. + https://github.com/natcap/invest/issues/1293 * Workbench - * Several small updates to the model input form UI to improve usability and visual consistency (https://github.com/natcap/invest/issues/912) + * Several small updates to the model input form UI to improve usability + and visual consistency (https://github.com/natcap/invest/issues/912). + * Fixed a bug that caused the application to crash when attempting to + open a workspace without a valid logfile + (https://github.com/natcap/invest/issues/1598). + * Fixed a bug that was allowing readonly workspace directories on Windows + (https://github.com/natcap/invest/issues/1599). + * Fixed a bug that, in certain scenarios, caused a datastack to be saved + with relative paths when the Relative Paths checkbox was left unchecked + (https://github.com/natcap/invest/issues/1609). + * Improved error handling when a datastack cannot be saved with relative + paths across drives (https://github.com/natcap/invest/issues/1608). +* Habitat Quality + * Access raster is now generated from the reprojected access vector + (https://github.com/natcap/invest/issues/1615). + * Rarity values are now output in CSV format (as well as in raster format) + (https://github.com/natcap/invest/issues/721). +* Urban Flood Risk + * Fields present on the input AOI vector are now retained in the output. + (https://github.com/natcap/invest/issues/1600) 3.14.2 (2024-05-29) ------------------- diff --git a/Makefile b/Makefile index 234e814bbf..f225958acd 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ GIT_TEST_DATA_REPO_REV := 324abde73e1d770ad75921466ecafd1ec6297752 GIT_UG_REPO := https://github.com/natcap/invest.users-guide GIT_UG_REPO_PATH := doc/users-guide -GIT_UG_REPO_REV := 0404bc5d4d43085cdc58f50f8fc29944b10cefb1 +GIT_UG_REPO_REV := f203ec069f9f03560c9a85b268e67ebb6b994953 ENV = "./env" ifeq ($(OS),Windows_NT) diff --git a/constraints_tests.txt b/constraints_tests.txt index 92920cea3f..b0056a12e1 100644 --- a/constraints_tests.txt +++ b/constraints_tests.txt @@ -6,5 +6,7 @@ # occur with regular use of invest. https://github.com/OSGeo/gdal/issues/8497 GDAL!=3.6.*,!=3.7.* -# https://github.com/natcap/pygeoprocessing/issues/387 -GDAL<3.8.5 +# Pyinstaller 6.10 breaks our windows builds. Until we can figure out the +# root cause, let's cap the versions to those that work. +# https://github.com/natcap/invest/issues/1622 +#pyinstaller<6.10 diff --git a/doc/api-docs/conf.py b/doc/api-docs/conf.py index 495e68e8b8..52205cf614 100644 --- a/doc/api-docs/conf.py +++ b/doc/api-docs/conf.py @@ -78,13 +78,11 @@ # -- Options for HTML output ---------------------------------------------- import sphinx_rtd_theme + # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinx_rtd_theme' -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/invest-logo.png" @@ -130,7 +128,7 @@ # dir menu entry, description, category) texinfo_documents = [ ('index', 'InVEST', 'InVEST Documentation', - 'The Natural Capital Project', 'InVEST', + 'The Natural Capital Project', 'InVEST', 'Integrated Valuation of Ecosystem Services and Tradeoffs', 'Scientific Software'), ] @@ -138,8 +136,8 @@ # -- Prepare for sphinx build --------------------------------------------- -# Use sphinx apidoc tool to generate documentation for invest. Generated rst -# files go into the api/ directory. Note that some apidoc options may not work +# Use sphinx apidoc tool to generate documentation for invest. Generated rst +# files go into the api/ directory. Note that some apidoc options may not work # the same because we aren't using their values in the custom templates apidoc.main([ '--force', # overwrite any files from previous run @@ -164,7 +162,7 @@ All InVEST models share a consistent python API: - - Every InVEST model has a corresponding module or subpackage in the + - Every InVEST model has a corresponding module or subpackage in the ``natcap.invest`` package - The model modules contain a function called ``execute`` - The ``execute`` function takes a single argument (``args``), a dictionary diff --git a/exe/hooks/rthook.py b/exe/hooks/rthook.py index 53a9af14b6..ec0b3264a8 100644 --- a/exe/hooks/rthook.py +++ b/exe/hooks/rthook.py @@ -1,9 +1,6 @@ -import sys import os -import multiprocessing import platform - -multiprocessing.freeze_support() +import sys os.environ['PROJ_LIB'] = os.path.join(sys._MEIPASS, 'proj') diff --git a/exe/invest.spec b/exe/invest.spec index ac4ec440bd..b0f3085ec7 100644 --- a/exe/invest.spec +++ b/exe/invest.spec @@ -30,7 +30,9 @@ kwargs = { 'pkg_resources.py2_warn', 'cmath', 'charset_normalizer', - 'scipy.special._cdflib' + 'scipy.special._cdflib', + 'scipy.special._special_ufuncs', + 'scipy._lib.array_api_compat.numpy.fft', ], 'datas': [proj_datas], 'cipher': block_cipher, diff --git a/pyproject.toml b/pyproject.toml index fadad29609..b16e94eead 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,7 +46,8 @@ invest = "natcap.invest.cli:main" # available at runtime. requires = [ 'setuptools>=61', 'wheel', 'setuptools_scm>=8.0', 'cython>=3.0.0', 'babel', - 'oldest-supported-numpy' + 'oldest-supported-numpy; python_version<="3.8"', + 'numpy>=2; python_version>="3.9"', # numpy 2 only available for 3.9+ ] build-backend = "setuptools.build_meta" diff --git a/requirements.txt b/requirements.txt index cf2d3aeb76..0ab53af935 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,11 +13,11 @@ GDAL>=3.4.2 Pyro4==4.77 # pip-only pandas>=1.2.1 -numpy>=1.11.0,!=1.16.0,<2.0 +numpy>=1.11.0,!=1.16.0 Rtree>=0.8.2,!=0.9.1 shapely>=2.0.0 scipy>=1.9.0,!=1.12.* -pygeoprocessing>=2.4.2 # pip-only +pygeoprocessing>=2.4.6 # pip-only taskgraph>=0.11.0 psutil>=5.6.6 chardet>=3.0.4 diff --git a/scripts/invest-autovalidate.py b/scripts/invest-autovalidate.py index 02ba843db9..d4a1a1a1de 100644 --- a/scripts/invest-autovalidate.py +++ b/scripts/invest-autovalidate.py @@ -1,14 +1,14 @@ #!python -import os -import tempfile -import logging import argparse -import unittest import glob import importlib -import shutil +import logging +import os import pprint +import shutil +import tempfile +import unittest from natcap.invest import datastack @@ -81,7 +81,7 @@ def main(sampledatadir): model_warnings = [] # define here in case of uncaught exception. try: - LOGGER.info('validating %s ', datastack_path) + LOGGER.info('validating %s ', os.path.abspath(datastack_path)) model_warnings = getattr( model_module, 'validate')(paramset.args) except AttributeError as err: diff --git a/src/natcap/invest/__main__.py b/src/natcap/invest/__main__.py index 1f68c51913..167e0fc170 100644 --- a/src/natcap/invest/__main__.py +++ b/src/natcap/invest/__main__.py @@ -1,5 +1,11 @@ +import multiprocessing import sys +# We want to guarantee that this is called BEFORE any other processes start, +# which could happen at import time. +if __name__ == '__main__': + multiprocessing.freeze_support() + from . import cli if __name__ == '__main__': diff --git a/src/natcap/invest/datastack.py b/src/natcap/invest/datastack.py index 461a12d60e..0fb2cabed4 100644 --- a/src/natcap/invest/datastack.py +++ b/src/natcap/invest/datastack.py @@ -535,6 +535,9 @@ def build_parameter_set(args, model_name, paramset_path, relative=False): Returns: ``None`` + + Raises: + ValueError if creating a relative path fails. """ def _recurse(args_param): if isinstance(args_param, dict): @@ -552,8 +555,16 @@ def _recurse(args_param): if (normalized_path == '.' or os.path.dirname(paramset_path) == normalized_path): return '.' - temp_rel_path = os.path.relpath( - normalized_path, os.path.dirname(paramset_path)) + try: + temp_rel_path = os.path.relpath( + normalized_path, os.path.dirname(paramset_path)) + except ValueError: + # On Windows, ValueError is raised when ``path`` and + # ``start`` are on different drives + raise ValueError( + """Error: Cannot save datastack with relative + paths across drives. Choose a different save + location, or use absolute paths.""") # Always save unix paths. linux_style_path = temp_rel_path.replace('\\', '/') else: diff --git a/src/natcap/invest/forest_carbon_edge_effect.py b/src/natcap/invest/forest_carbon_edge_effect.py index 3b7c8c4b54..a72e7a258e 100644 --- a/src/natcap/invest/forest_carbon_edge_effect.py +++ b/src/natcap/invest/forest_carbon_edge_effect.py @@ -596,13 +596,14 @@ def _aggregate_carbon_map( for poly_feat in target_aggregate_layer: poly_fid = poly_feat.GetFID() poly_feat.SetField( - 'c_sum', serviceshed_stats[poly_fid]['sum']) + 'c_sum', float(serviceshed_stats[poly_fid]['sum'])) # calculates mean pixel value per ha in for each feature in AOI poly_geom = poly_feat.GetGeometryRef() poly_area_ha = poly_geom.GetArea() / 1e4 # converts m^2 to hectare poly_geom = None poly_feat.SetField( - 'c_ha_mean', serviceshed_stats[poly_fid]['sum']/poly_area_ha) + 'c_ha_mean', + float(serviceshed_stats[poly_fid]['sum'] / poly_area_ha)) target_aggregate_layer.SetFeature(poly_feat) target_aggregate_layer.CommitTransaction() @@ -778,9 +779,11 @@ def _build_spatial_index( local_model_dir, 'local_carbon_shape.shp') lulc_projection_wkt = pygeoprocessing.get_raster_info( base_raster_path)['projection_wkt'] - pygeoprocessing.reproject_vector( - tropical_forest_edge_carbon_model_vector_path, lulc_projection_wkt, - carbon_model_reproject_path) + + with utils._set_gdal_configuration('OGR_ENABLE_PARTIAL_REPROJECTION', 'TRUE'): + pygeoprocessing.reproject_vector( + tropical_forest_edge_carbon_model_vector_path, lulc_projection_wkt, + carbon_model_reproject_path) model_vector = gdal.OpenEx(carbon_model_reproject_path) model_layer = model_vector.GetLayer() diff --git a/src/natcap/invest/habitat_quality.py b/src/natcap/invest/habitat_quality.py index 3d5ff9889f..f2d37f005a 100644 --- a/src/natcap/invest/habitat_quality.py +++ b/src/natcap/invest/habitat_quality.py @@ -1,6 +1,7 @@ # coding=UTF-8 """InVEST Habitat Quality model.""" import collections +import csv import logging import os @@ -222,36 +223,36 @@ "output": { "type": "directory", "contents": { - "deg_sum_out_c.tif": { + "deg_sum_c.tif": { "about": ( "Relative level of habitat degradation on the current " "landscape."), "bands": {1: {"type": "ratio"}} }, - "deg_sum_out_f.tif": { + "deg_sum_f.tif": { "about": ( "Relative level of habitat degradation on the future " "landscape."), "bands": {1: {"type": "ratio"}}, "created_if": "lulc_fut_path" }, - "quality_out_c.tif": { + "quality_c.tif": { "about": ( "Relative level of habitat quality on the current " "landscape."), "bands": {1: {"type": "ratio"}} }, - "quality_out_f.tif": { + "quality_f.tif": { "about": ( "Relative level of habitat quality on the future " "landscape."), "bands": {1: {"type": "ratio"}}, "created_if": "lulc_fut_path" }, - "rarity_out_c.tif": { + "rarity_c.tif": { "about": ( "Relative habitat rarity on the current landscape " - "vis-a-vis the baseline map. The grid cell’s values " + "vis-a-vis the baseline map. The grid cell's values " "are defined between a range of 0 and 1 where 0.5 " "indicates no abundance change between the baseline " "and current or projected map. Values between 0 and 0.5 " @@ -267,10 +268,10 @@ "created_if": "lulc_bas_path", "bands": {1: {"type": "ratio"}} }, - "rarity_out_f.tif": { + "rarity_f.tif": { "about": ( "Relative habitat rarity on the future landscape " - "vis-a-vis the baseline map. The grid cell’s values " + "vis-a-vis the baseline map. The grid cell's values " "are defined between a range of 0 and 1 where 0.5 " "indicates no abundance change between the baseline " "and current or projected map. Values between 0 and " @@ -287,6 +288,70 @@ "created_if": "lulc_bas_path and lulc_fut_path", "bands": {1: {"type": "ratio"}} }, + "rarity_c.csv": { + "about": ("Table of rarity values by LULC code for the " + "current landscape."), + "index_col": "lulc_code", + "columns": { + "lulc_code": { + "type": "number", + "units": u.none, + "about": "LULC class", + }, + "rarity_value": { + "type": "number", + "units": u.none, + "about": ( + "Relative habitat rarity on the current landscape " + "vis-a-vis the baseline map. The rarity values " + "are defined between a range of 0 and 1 where 0.5 " + "indicates no abundance change between the baseline " + "and current or projected map. Values between 0 and 0.5 " + "indicate a habitat is more abundant and the closer " + "the value is to 0 the lesser the likelihood that the " + "preservation of that habitat type on the current or " + "future landscape is important to biodiversity conservation. " + "Values between 0.5 and 1 indicate a habitat is less " + "abundant and the closer the value is to 1 the greater " + "the likelihood that the preservation of that habitat " + "type on the current or future landscape is important " + "to biodiversity conservation."), + }, + }, + "created_if": "lulc_bas_path", + }, + "rarity_f.csv": { + "about": ("Table of rarity values by LULC code for the " + "future landscape."), + "index_col": "lulc_code", + "columns": { + "lulc_code": { + "type": "number", + "units": u.none, + "about": "LULC class", + }, + "rarity_value": { + "type": "number", + "units": u.none, + "about": ( + "Relative habitat rarity on the future landscape " + "vis-a-vis the baseline map. The rarity values " + "are defined between a range of 0 and 1 where 0.5 " + "indicates no abundance change between the baseline " + "and current or projected map. Values between 0 and 0.5 " + "indicate a habitat is more abundant and the closer " + "the value is to 0 the lesser the likelihood that the " + "preservation of that habitat type on the current or " + "future landscape is important to biodiversity conservation. " + "Values between 0.5 and 1 indicate a habitat is less " + "abundant and the closer the value is to 1 the greater " + "the likelihood that the preservation of that habitat " + "type on the current or future landscape is important " + "to biodiversity conservation."), + }, + }, + "created_if": "lulc_bas_path and lulc_fut_path", + }, } }, "intermediate": { @@ -564,7 +629,7 @@ def execute(args): rasterize_access_task = task_graph.add_task( func=pygeoprocessing.rasterize, - args=(args['access_vector_path'], access_raster_path), + args=(reprojected_access_path, access_raster_path), kwargs={ 'option_list': ['ATTRIBUTE=ACCESS'], 'burn_values': None @@ -752,13 +817,16 @@ def execute(args): intermediate_output_dir, f'new_cover{lulc_key}{file_suffix}.tif') - rarity_path = os.path.join( + rarity_raster_path = os.path.join( output_dir, f'rarity{lulc_key}{file_suffix}.tif') + rarity_csv_path = os.path.join( + output_dir, f'rarity{lulc_key}{file_suffix}.csv') + _ = task_graph.add_task( func=_compute_rarity_operation, args=((lulc_base_path, 1), (lulc_path, 1), (new_cover_path, 1), - rarity_path), + rarity_raster_path, rarity_csv_path), dependent_task_list=[align_task], task_name=f'rarity{lulc_time}') @@ -782,7 +850,7 @@ def _calculate_habitat_quality(deg_hab_raster_list, quality_out_path, ksq): pygeoprocessing.raster_map( op=lambda degradation, habitat: ( habitat * (1 - (degradation**_SCALING_PARAM) / - (degradation**_SCALING_PARAM + ksq))), + (degradation**_SCALING_PARAM + ksq))), rasters=deg_hab_raster_list, target_path=quality_out_path) @@ -838,8 +906,9 @@ def total_degradation(*arrays): def _compute_rarity_operation( - base_lulc_path_band, lulc_path_band, new_cover_path, rarity_path): - """Calculate habitat rarity. + base_lulc_path_band, lulc_path_band, new_cover_path, + rarity_raster_path, rarity_csv_path): + """Calculate habitat rarity and generate raster and CSV output. Output rarity values will be an index from 0 - 1 where: pixel > 0.5 - more rare @@ -855,7 +924,8 @@ def _compute_rarity_operation( new_cover_path (tuple): a 2 tuple for the path to intermediate raster file for trimming ``lulc_path_band`` to ``base_lulc_path_band`` of the form (path, band index). - rarity_path (string): path to output rarity raster. + rarity_raster_path (string): path to output rarity raster. + rarity_csv_path (string): path to output rarity CSV. Returns: None @@ -866,7 +936,6 @@ def _compute_rarity_operation( base_lulc_path_band[0]) base_pixel_size = base_raster_info['pixel_size'] base_area = float(abs(base_pixel_size[0]) * abs(base_pixel_size[1])) - base_nodata = base_raster_info['nodata'][0] lulc_code_count_b = _raster_pixel_count(base_lulc_path_band) @@ -874,7 +943,6 @@ def _compute_rarity_operation( lulc_raster_info = pygeoprocessing.get_raster_info(lulc_path_band[0]) lulc_pixel_size = lulc_raster_info['pixel_size'] lulc_area = float(abs(lulc_pixel_size[0]) * abs(lulc_pixel_size[1])) - lulc_nodata = lulc_raster_info['nodata'][0] # Trim cover_x to the mask of base. pygeoprocessing.raster_map( @@ -904,13 +972,34 @@ def _compute_rarity_operation( code_index[code] = 0.0 pygeoprocessing.reclassify_raster( - new_cover_path, code_index, rarity_path, gdal.GDT_Float32, + new_cover_path, code_index, rarity_raster_path, gdal.GDT_Float32, _OUT_NODATA) + _generate_rarity_csv(code_index, rarity_csv_path) + LOGGER.info('Finished rarity computation on' f' {os.path.basename(lulc_path_band[0])} land cover.') +def _generate_rarity_csv(rarity_dict, target_csv_path): + """Generate CSV containing rarity values by LULC code. + + Args: + rarity_dict (dict): dictionary containing LULC codes (as keys) + and their associated rarity values (as values). + target_csv_path (string): path to output CSV. + + Returns: + None + """ + lulc_codes = sorted(rarity_dict) + with open(target_csv_path, 'w', newline='') as csvfile: + writer = csv.writer(csvfile, delimiter=',') + writer.writerow(['lulc_code', 'rarity_value']) + for lulc_code in lulc_codes: + writer.writerow([lulc_code, rarity_dict[lulc_code]]) + + def _raster_pixel_count(raster_path_band): """Count unique pixel values in single band raster. diff --git a/src/natcap/invest/pollination.py b/src/natcap/invest/pollination.py index cae1cbf738..131e1e46a3 100644 --- a/src/natcap/invest/pollination.py +++ b/src/natcap/invest/pollination.py @@ -1045,28 +1045,28 @@ def execute(args): # this is YT from the user's guide (y_tot) farm_feature.SetField( _TOTAL_FARM_YIELD_FIELD_ID, - 1 - nu * ( + float(1 - nu * ( 1 - total_farm_results[fid]['sum'] / - float(total_farm_results[fid]['count']))) + float(total_farm_results[fid]['count'])))) # this is PYW ('pdep_y_w') farm_feature.SetField( _POLLINATOR_PROPORTION_FARM_YIELD_FIELD_ID, - (wild_pollinator_yield_aggregate[fid]['sum'] / + float(wild_pollinator_yield_aggregate[fid]['sum'] / float(wild_pollinator_yield_aggregate[fid]['count']))) # this is YW ('y_wild') farm_feature.SetField( _WILD_POLLINATOR_FARM_YIELD_FIELD_ID, - nu * (wild_pollinator_yield_aggregate[fid]['sum'] / - float(wild_pollinator_yield_aggregate[fid]['count']))) + float(nu * (wild_pollinator_yield_aggregate[fid]['sum'] / + float(wild_pollinator_yield_aggregate[fid]['count'])))) # this is PAT ('p_abund') farm_season = farm_feature.GetField(_FARM_SEASON_FIELD) farm_feature.SetField( _POLLINATOR_ABUNDANCE_FARM_FIELD_ID, - pollinator_abundance_results[farm_season][fid]['sum'] / - float(pollinator_abundance_results[farm_season][fid]['count'])) + float(pollinator_abundance_results[farm_season][fid]['sum'] / + float(pollinator_abundance_results[farm_season][fid]['count']))) target_farm_layer.SetFeature(farm_feature) target_farm_layer.SyncToDisk() diff --git a/src/natcap/invest/recreation/recmodel_client.py b/src/natcap/invest/recreation/recmodel_client.py index bba897ab6a..d043ad265e 100644 --- a/src/natcap/invest/recreation/recmodel_client.py +++ b/src/natcap/invest/recreation/recmodel_client.py @@ -1043,10 +1043,12 @@ def _raster_sum_mean( numpy.array(fid_raster_values['sum']) / numpy.array(fid_raster_values['count'])) predictor_results = dict( - zip(fid_raster_values['fid'], mean_results)) + zip(fid_raster_values['fid'], + (i.item() for i in mean_results))) else: predictor_results = dict( - zip(fid_raster_values['fid'], fid_raster_values['sum'])) + zip(fid_raster_values['fid'], + (i.item() for i in fid_raster_values['sum']))) with open(predictor_target_path, 'w') as jsonfile: json.dump(predictor_results, jsonfile) diff --git a/src/natcap/invest/scenario_gen_proximity.py b/src/natcap/invest/scenario_gen_proximity.py index 08c6ebd09b..db97c7c23e 100644 --- a/src/natcap/invest/scenario_gen_proximity.py +++ b/src/natcap/invest/scenario_gen_proximity.py @@ -500,7 +500,7 @@ def _convert_landscape( def _mask_base_op(lulc_array): """Create a mask of valid non-base pixels only.""" - base_mask = numpy.in1d( + base_mask = numpy.isin( lulc_array.flatten(), focal_landcover_codes).reshape( lulc_array.shape) if invert_mask: @@ -545,7 +545,7 @@ def _combine_masks(base_distance_array, non_base_distance_array): # turn inside and outside masks into a single mask def _mask_to_convertible_codes(distance_from_base_edge, lulc): """Mask out the distance transform to a set of lucodes.""" - convertible_mask = numpy.in1d( + convertible_mask = numpy.isin( lulc.flatten(), convertible_type_list).reshape(lulc.shape) return numpy.where( convertible_mask, distance_from_base_edge, diff --git a/src/natcap/invest/sdr/sdr_core.pyx b/src/natcap/invest/sdr/sdr_core.pyx index 69fc7500e4..345ef6ab76 100644 --- a/src/natcap/invest/sdr/sdr_core.pyx +++ b/src/natcap/invest/sdr/sdr_core.pyx @@ -356,13 +356,13 @@ def calculate_sediment_deposition( This algorithm outputs both sediment deposition (t_i) and flux (f_i):: - t_i = dr_i * (sum over j ∈ J of f_j * p(i,j)) + E'_i + t_i = dt_i * (sum over j ∈ J of f_j * p(j,i)) - f_i = (1 - dr_i) * (sum over j ∈ J of f_j * p(i,j)) + E'_i + f_i = (1 - dt_i) * (sum over j ∈ J of f_j * p(j,i)) + E'_i (sum over k ∈ K of SDR_k * p(i,k)) - SDR_i - dr_i = -------------------------------------------- + dt_i = -------------------------------------------- (1 - SDR_i) where: @@ -645,21 +645,21 @@ def calculate_sediment_deposition( if sdr_i == 1: # This reflects property B in the user's guide and is # an edge case to avoid division-by-zero. - dr_i = 1 + dt_i = 1 else: - dr_i = (downslope_sdr_weighted_sum - sdr_i) / (1 - sdr_i) + dt_i = (downslope_sdr_weighted_sum - sdr_i) / (1 - sdr_i) # Lisa's modified equations - t_i = dr_i * f_j_weighted_sum # deposition, a.k.a trapped sediment - f_i = (1 - dr_i) * f_j_weighted_sum + e_prime_i # flux + t_i = dt_i * f_j_weighted_sum # deposition, a.k.a trapped sediment + f_i = (1 - dt_i) * f_j_weighted_sum + e_prime_i # flux - # On large flow paths, it's possible for dr_i, f_i and t_i + # On large flow paths, it's possible for dt_i, f_i and t_i # to have very small negative values that are numerically # equivalent to 0. These negative values were raising # questions on the forums and it's easier to clamp the # values here than to explain IEEE 754. - if dr_i < 0: - dr_i = 0 + if dt_i < 0: + dt_i = 0 if t_i < 0: t_i = 0 if f_i < 0: diff --git a/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py b/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py index 85254b2ef4..589b0aa7d0 100644 --- a/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py +++ b/src/natcap/invest/seasonal_water_yield/seasonal_water_yield.py @@ -654,6 +654,7 @@ def execute(args): # ValueError when n_workers is an empty string. # TypeError when n_workers is None. n_workers = -1 # Synchronous mode. + LOGGER.debug('n_workers: %s', n_workers) task_graph = taskgraph.TaskGraph( os.path.join(args['workspace_dir'], 'taskgraph_cache'), n_workers, reporting_interval=5) @@ -664,6 +665,9 @@ def execute(args): (_INTERMEDIATE_BASE_FILES, intermediate_output_dir)], file_suffix) LOGGER.info('Checking that the AOI is not the output aggregate vector') + LOGGER.debug("aoi_path: %s", args['aoi_path']) + LOGGER.debug("aggregate_vector_path: %s", + os.path.normpath(file_registry['aggregate_vector_path'])) if (os.path.normpath(args['aoi_path']) == os.path.normpath(file_registry['aggregate_vector_path'])): raise ValueError( @@ -1256,7 +1260,7 @@ def cn_op(lulc_array, soil_group_array): # if lulc_array value not in lulc_to_soil[soil_group_id]['lulc_values'] # then numpy.digitize will not bin properly and cause an IndexError # during the reshaping call - lulc_unique = set(numpy.unique(lulc_array)) + lulc_unique = set(i.item() for i in numpy.unique(lulc_array)) if not lulc_unique.issubset(lucodes_set): # cast to list to conform with similar error messages in InVEST missing_lulc_values = sorted(lulc_unique.difference(lucodes_set)) diff --git a/src/natcap/invest/ui_server.py b/src/natcap/invest/ui_server.py index 99814902f9..b73191c086 100644 --- a/src/natcap/invest/ui_server.py +++ b/src/natcap/invest/ui_server.py @@ -188,7 +188,9 @@ def write_parameter_set_file(): relativePaths: boolean Returns: - A string. + A dictionary with the following key/value pairs: + - message (string): for logging and/or rendering in the UI. + - error (boolean): True if an error occurred, otherwise False. """ payload = request.get_json() filepath = payload['filepath'] @@ -196,9 +198,19 @@ def write_parameter_set_file(): args = json.loads(payload['args']) relative_paths = payload['relativePaths'] - datastack.build_parameter_set( - args, modulename, filepath, relative=relative_paths) - return 'parameter set saved' + try: + datastack.build_parameter_set( + args, modulename, filepath, relative=relative_paths) + except ValueError as message: + LOGGER.error(str(message)) + return { + 'message': str(message), + 'error': True + } + return { + 'message': 'Parameter set saved', + 'error': False + } @app.route(f'/{PREFIX}/save_to_python', methods=['POST']) @@ -234,15 +246,26 @@ def build_datastack_archive(): args: JSON string of InVEST model args keys and values Returns: - A string. + A dictionary with the following key/value pairs: + - message (string): for logging and/or rendering in the UI. + - error (boolean): True if an error occurred, otherwise False. """ payload = request.get_json() - datastack.build_datastack_archive( - json.loads(payload['args']), - payload['moduleName'], - payload['filepath']) - - return 'datastack archive created' + try: + datastack.build_datastack_archive( + json.loads(payload['args']), + payload['moduleName'], + payload['filepath']) + except ValueError as message: + LOGGER.error(str(message)) + return { + 'message': str(message), + 'error': True + } + return { + 'message': 'Datastack archive created', + 'error': False + } @app.route(f'/{PREFIX}/log_model_start', methods=['POST']) @@ -264,6 +287,7 @@ def log_model_exit(): payload['status']) return 'OK' + @app.route(f'/{PREFIX}/languages', methods=['GET']) def get_supported_languages(): """Return a mapping of supported languages to their display names.""" diff --git a/src/natcap/invest/urban_cooling_model.py b/src/natcap/invest/urban_cooling_model.py index 70cc5ef9a1..96b897b821 100644 --- a/src/natcap/invest/urban_cooling_model.py +++ b/src/natcap/invest/urban_cooling_model.py @@ -978,13 +978,13 @@ def calculate_uhi_result_vector( if cc_stats[feature_id]['count'] > 0: mean_cc = ( cc_stats[feature_id]['sum'] / cc_stats[feature_id]['count']) - feature.SetField('avg_cc', mean_cc) + feature.SetField('avg_cc', float(mean_cc)) mean_t_air = None if t_air_stats[feature_id]['count'] > 0: mean_t_air = ( t_air_stats[feature_id]['sum'] / t_air_stats[feature_id]['count']) - feature.SetField('avg_tmp_v', mean_t_air) + feature.SetField('avg_tmp_v', float(mean_t_air)) if mean_t_air: feature.SetField( @@ -994,7 +994,7 @@ def calculate_uhi_result_vector( wbgt = ( wbgt_stats[feature_id]['sum'] / wbgt_stats[feature_id]['count']) - feature.SetField('avg_wbgt_v', wbgt) + feature.SetField('avg_wbgt_v', float(wbgt)) if light_loss_stats and light_loss_stats[feature_id]['count'] > 0: light_loss = ( diff --git a/src/natcap/invest/urban_flood_risk_mitigation.py b/src/natcap/invest/urban_flood_risk_mitigation.py index e1fb31b051..95d29c79c7 100644 --- a/src/natcap/invest/urban_flood_risk_mitigation.py +++ b/src/natcap/invest/urban_flood_risk_mitigation.py @@ -183,14 +183,14 @@ "about": "Map of runoff volume.", "bands": {1: {"type": "number", "units": u.meter**3}} }, - "reprojected_aoi.gpkg": { + "reprojected_aoi.shp": { "about": ( "Copy of AOI vector reprojected to the same spatial " "reference as the LULC."), "geometries": spec_utils.POLYGONS, "fields": {} }, - "structures_reprojected.gpkg": { + "structures_reprojected.shp": { "about": ( "Copy of built infrastructure vector reprojected to " "the same spatial reference as the LULC."), @@ -418,14 +418,14 @@ def execute(args): task_name='calculate service built raster') reprojected_aoi_path = os.path.join( - intermediate_dir, 'reprojected_aoi.gpkg') + intermediate_dir, 'reprojected_aoi') reprojected_aoi_task = task_graph.add_task( func=pygeoprocessing.reproject_vector, args=( args['aoi_watersheds_path'], target_sr_wkt, reprojected_aoi_path), - kwargs={'driver_name': 'GPKG'}, + kwargs={'driver_name': 'ESRI Shapefile'}, target_path_list=[reprojected_aoi_path], task_name='reproject aoi/watersheds') @@ -445,7 +445,7 @@ def execute(args): (runoff_retention_raster_path, 1), reprojected_aoi_path), store_result=True, - dependent_task_list=[runoff_retention_task], + dependent_task_list=[runoff_retention_task, reprojected_aoi_task], task_name='zonal_statistics over runoff_retention raster') runoff_retention_volume_stats_task = task_graph.add_task( @@ -454,7 +454,7 @@ def execute(args): (runoff_retention_vol_raster_path, 1), reprojected_aoi_path), store_result=True, - dependent_task_list=[runoff_retention_vol_task], + dependent_task_list=[runoff_retention_vol_task, reprojected_aoi_task], task_name='zonal_statistics over runoff_retention_volume raster') damage_per_aoi_stats = None @@ -467,13 +467,13 @@ def execute(args): args['built_infrastructure_vector_path'] not in ('', None)): # Reproject the built infrastructure vector to the target SRS. reprojected_structures_path = os.path.join( - intermediate_dir, 'structures_reprojected.gpkg') + intermediate_dir, 'structures_reprojected') reproject_built_infrastructure_task = task_graph.add_task( func=pygeoprocessing.reproject_vector, args=(args['built_infrastructure_vector_path'], target_sr_wkt, reprojected_structures_path), - kwargs={'driver_name': 'GPKG'}, + kwargs={'driver_name': 'ESRI Shapefile'}, target_path_list=[reprojected_structures_path], task_name='reproject built infrastructure to target SRS') @@ -522,7 +522,7 @@ def _write_summary_vector( runoff_ret_vol_stats, flood_volume_stats, damage_per_aoi_stats=None): """Write a vector with summary statistics. - This vector will always contain two fields:: + This vector will always contain three fields:: * ``'flood_vol'``: The volume of flood (runoff), in m3, per watershed. * ``'rnf_rt_idx'``: Average of runoff retention values per watershed @@ -563,21 +563,11 @@ def _write_summary_vector( ``None`` """ source_aoi_vector = gdal.OpenEx(source_aoi_vector_path, gdal.OF_VECTOR) - source_aoi_layer = source_aoi_vector.GetLayer() - source_geom_type = source_aoi_layer.GetGeomType() - source_srs_wkt = pygeoprocessing.get_vector_info( - source_aoi_vector_path)['projection_wkt'] - source_srs = osr.SpatialReference() - source_srs.ImportFromWkt(source_srs_wkt) - esri_driver = gdal.GetDriverByName('ESRI Shapefile') - target_watershed_vector = esri_driver.Create( - target_vector_path, 0, 0, 0, gdal.GDT_Unknown) - layer_name = os.path.splitext(os.path.basename( - target_vector_path))[0] - LOGGER.debug(f"creating layer {layer_name}") - target_watershed_layer = target_watershed_vector.CreateLayer( - layer_name, source_srs, source_geom_type) + esri_driver.CreateCopy(target_vector_path, source_aoi_vector) + target_watershed_vector = gdal.OpenEx(target_vector_path, + gdal.OF_VECTOR | gdal.GA_Update) + target_watershed_layer = target_watershed_vector.GetLayer() target_fields = ['rnf_rt_idx', 'rnf_rt_m3', 'flood_vol'] if damage_per_aoi_stats is not None: @@ -589,13 +579,9 @@ def _write_summary_vector( field_def.SetPrecision(11) target_watershed_layer.CreateField(field_def) - target_layer_defn = target_watershed_layer.GetLayerDefn() - for base_feature in source_aoi_layer: - feature_id = base_feature.GetFID() - target_feature = ogr.Feature(target_layer_defn) - base_geom_ref = base_feature.GetGeometryRef() - target_feature.SetGeometry(base_geom_ref.Clone()) - base_geom_ref = None + target_watershed_layer.ResetReading() + for target_feature in target_watershed_layer: + feature_id = target_feature.GetFID() pixel_count = runoff_ret_stats[feature_id]['count'] if pixel_count > 0: @@ -615,13 +601,14 @@ def _write_summary_vector( # This is the service_built equation. target_feature.SetField( - 'serv_blt', ( + 'serv_blt', float( damage_sum * runoff_ret_vol_stats[feature_id]['sum'])) target_feature.SetField( 'flood_vol', float(flood_volume_stats[feature_id]['sum'])) - target_watershed_layer.CreateFeature(target_feature) + target_watershed_layer.SetFeature(target_feature) + target_watershed_layer.SyncToDisk() target_watershed_layer = None target_watershed_vector = None @@ -959,7 +946,8 @@ def validate(args, limit_to=None): nan_mask = cn_df.isna() if nan_mask.any(axis=None): nan_lucodes = nan_mask[nan_mask.any(axis=1)].index - lucode_list = list(nan_lucodes.values) + # Convert numpy dtype values to native python types + lucode_list = [i.item() for i in nan_lucodes.values] validation_warnings.append(( ['curve_number_table_path'], f'Missing curve numbers for lucode(s) {lucode_list}')) diff --git a/src/natcap/invest/urban_nature_access.py b/src/natcap/invest/urban_nature_access.py index b0c9754fc8..e47a4d2794 100644 --- a/src/natcap/invest/urban_nature_access.py +++ b/src/natcap/invest/urban_nature_access.py @@ -767,13 +767,12 @@ def execute(args): squared_lulc_pixel_size = _square_off_pixels(args['lulc_raster_path']) lulc_alignment_task = graph.add_task( - pygeoprocessing.warp_raster, + _warp_lulc, kwargs={ - 'base_raster_path': args['lulc_raster_path'], - 'target_pixel_size': squared_lulc_pixel_size, - 'target_bb': target_bounding_box, - 'target_raster_path': file_registry['aligned_lulc'], - 'resample_method': 'near', + "source_lulc_path": args['lulc_raster_path'], + "target_lulc_path": file_registry['aligned_lulc'], + "target_pixel_size": squared_lulc_pixel_size, + "target_bounding_box": target_bounding_box, }, target_path_list=[file_registry['aligned_lulc']], task_name='Resample LULC to have square pixels' @@ -2540,6 +2539,40 @@ def _create_mask(*raster_arrays): _create_mask, target_mask_path, gdal.GDT_Byte, nodata_target=255) +def _warp_lulc(source_lulc_path, target_lulc_path, target_pixel_size, + target_bounding_box): + """Warp a LULC raster and set a nodata if needed. + + Args: + source_lulc_path (str): The path to a source LULC raster. + target_lulc_path (str): The path to the new LULC raster. + target_pixel_size (tuple): A 2-tuple of the target pixel size. + target_bounding_box (tuple): A 4-tuple of the target bounding box. + + Returns: + ``None``. + """ + source_raster_info = pygeoprocessing.get_raster_info(source_lulc_path) + target_nodata = source_raster_info['nodata'][0] + + pygeoprocessing.warp_raster( + source_lulc_path, target_pixel_size, target_lulc_path, + 'near', target_bb=target_bounding_box, + target_projection_wkt=source_raster_info['projection_wkt']) + + # if there is no defined nodata, set a default value + if target_nodata is None: + # Guarantee that our nodata cannot be represented by the datatype - + # select a nodata value that's out of range. + target_nodata = pygeoprocessing.choose_nodata( + source_raster_info['numpy_type']) + 1 + raster = gdal.OpenEx(target_lulc_path, gdal.GA_Update) + band = raster.GetRasterBand(1) + band.SetNoDataValue(target_nodata) + band = None + raster = None + + def _mask_raster(source_raster_path, mask_raster_path, target_raster_path): """Convert pixels to nodata given an existing mask raster. diff --git a/src/natcap/invest/utils.py b/src/natcap/invest/utils.py index f34c25d9e5..dcae5734dd 100644 --- a/src/natcap/invest/utils.py +++ b/src/natcap/invest/utils.py @@ -121,6 +121,25 @@ def capture_gdal_logging(): gdal.PopErrorHandler() +@contextlib.contextmanager +def _set_gdal_configuration(opt, value): + """Temporarily set a GDAL configuration option. + + Args: + opt (string): The GDAL configuration option to set. + value (string): The value to set the option to. + + Returns: + ``None`` + """ + prior_value = gdal.GetConfigOption(opt) + gdal.SetConfigOption(opt, value) + try: + yield + finally: + gdal.SetConfigOption(opt, prior_value) + + def _format_time(seconds): """Render the integer number of seconds as a string. Returns a string.""" hours, remainder = divmod(seconds, 3600) diff --git a/src/natcap/invest/validation.py b/src/natcap/invest/validation.py index 9e9ae2f5f1..b000d49e21 100644 --- a/src/natcap/invest/validation.py +++ b/src/natcap/invest/validation.py @@ -166,9 +166,8 @@ def check_directory(dirpath, must_exist=True, permissions='rx', **kwargs): must_exist=True (bool): If ``True``, the directory at ``dirpath`` must already exist on the filesystem. permissions='rx' (string): A string that includes the lowercase - characters ``r``, ``w`` and/or ``x`` indicating required - permissions for this folder . See ``check_permissions`` for - details. + characters ``r``, ``w`` and/or ``x``, indicating read, write, and + execute permissions (respectively) required for this directory. Returns: A string error message if an error was found. ``None`` otherwise. @@ -193,9 +192,33 @@ def check_directory(dirpath, must_exist=True, permissions='rx', **kwargs): dirpath = parent break - permissions_warning = check_permissions(dirpath, permissions, True) - if permissions_warning: - return permissions_warning + MESSAGE_KEY = 'NEED_PERMISSION_DIRECTORY' + + if 'r' in permissions: + try: + os.scandir(dirpath).close() + except OSError: + return MESSAGES[MESSAGE_KEY].format(permission='read') + + # Check for x access before checking for w, + # since w operations to a dir are dependent on x access + if 'x' in permissions: + try: + cwd = os.getcwd() + os.chdir(dirpath) + except OSError: + return MESSAGES[MESSAGE_KEY].format(permission='execute') + finally: + os.chdir(cwd) + + if 'w' in permissions: + try: + temp_path = os.path.join(dirpath, 'temp__workspace_validation.txt') + with open(temp_path, 'w') as temp: + temp.close() + os.remove(temp_path) + except OSError: + return MESSAGES[MESSAGE_KEY].format(permission='write') def check_file(filepath, permissions='r', **kwargs): @@ -204,9 +227,8 @@ def check_file(filepath, permissions='r', **kwargs): Args: filepath (string): The filepath to validate. permissions='r' (string): A string that includes the lowercase - characters ``r``, ``w`` and/or ``x`` indicating required - permissions for this file. See ``check_permissions`` for - details. + characters ``r``, ``w`` and/or ``x``, indicating read, write, and + execute permissions (respectively) required for this file. Returns: A string error message if an error was found. ``None`` otherwise. @@ -215,36 +237,12 @@ def check_file(filepath, permissions='r', **kwargs): if not os.path.exists(filepath): return MESSAGES['FILE_NOT_FOUND'] - permissions_warning = check_permissions(filepath, permissions) - if permissions_warning: - return permissions_warning - - -def check_permissions(path, permissions, is_directory=False): - """Validate permissions on a filesystem object. - - This function uses ``os.access`` to determine permissions access. - - Args: - path (string): The path to examine for permissions. - permissions (string): a string including the characters ``r``, ``w`` - and/or ``x`` (lowercase), indicating read, write, and execute - permissions (respectively) that the filesystem object at ``path`` - must have. - is_directory (boolean): Indicates whether the path refers to a directory - (True) or a file (False). Defaults to False. - - Returns: - A string error message if an error was found. ``None`` otherwise. - - """ for letter, mode, descriptor in ( ('r', os.R_OK, 'read'), ('w', os.W_OK, 'write'), ('x', os.X_OK, 'execute')): - if letter in permissions and not os.access(path, mode): - message_key = 'NEED_PERMISSION_DIRECTORY' if is_directory else 'NEED_PERMISSION_FILE' - return MESSAGES[message_key].format(permission=descriptor) + if letter in permissions and not os.access(filepath, mode): + return MESSAGES['NEED_PERMISSION_FILE'].format(permission=descriptor) def _check_projection(srs, projected, projection_units): @@ -801,7 +799,11 @@ def check_spatial_overlap(spatial_filepaths_list, for filepath in spatial_filepaths_list: try: info = pygeoprocessing.get_raster_info(filepath) - except ValueError: + except (ValueError, RuntimeError): + # ValueError is raised by PyGeoprocessing < 3.4.4 when the file is + # not a raster. + # RuntimeError is raised by GDAL in PyGeoprocessing >= 3.4.4 when + # the file is not a raster. info = pygeoprocessing.get_vector_info(filepath) if info['projection_wkt'] is None: diff --git a/tests/test_datastack.py b/tests/test_datastack.py index 45d2e590bd..35d0695fa7 100644 --- a/tests/test_datastack.py +++ b/tests/test_datastack.py @@ -6,10 +6,10 @@ import pprint import shutil import sys -import tarfile import tempfile import textwrap import unittest +from unittest.mock import patch import numpy import pandas @@ -397,6 +397,25 @@ def test_archive_extraction(self): os.path.join(spatial_csv_dir, spatial_csv_dict[4]['path']), target_csv_vector_path) + def test_relative_path_failure(self): + """Datastack: raise error when relative path creation fails.""" + from natcap.invest import datastack + params = { + 'workspace_dir': os.path.join(self.workspace), + } + + archive_path = os.path.join(self.workspace, 'archive.invs.tar.gz') + + # Call build_datastack_archive and force build_parameter_set + # to raise an error + error_message = 'Error saving datastack' + with self.assertRaises(ValueError): + with patch('natcap.invest.datastack.build_parameter_set', + side_effect=ValueError(error_message)): + datastack.build_datastack_archive( + params, 'test_datastack_modules.simple_parameters', + archive_path) + class ParameterSetTest(unittest.TestCase): """Test Datastack.""" @@ -506,6 +525,26 @@ def test_relative_parameter_set(self): self.assertEqual(invest_version, __version__) self.assertEqual(callable_name, modelname) + def test_relative_path_failure(self): + """Datastack: raise error when relative path creation fails.""" + from natcap.invest import datastack + + params = { + 'data_dir': os.path.join(self.workspace, 'data_dir'), + } + modelname = 'natcap.invest.foo' + paramset_filename = os.path.join(self.workspace, 'paramset.json') + + # make the sample data so filepaths are interpreted correctly + os.makedirs(params['data_dir']) + + # Call build_parameter_set and force it into an error state + with self.assertRaises(ValueError): + with patch('natcap.invest.os.path.relpath', + side_effect=ValueError): + datastack.build_parameter_set( + params, modelname, paramset_filename, relative=True) + @unittest.skipUnless(sys.platform.startswith("win"), "requires Windows") def test_relative_parameter_set_windows(self): """Datastack: test relative parameter set paths saved linux style.""" diff --git a/tests/test_habitat_quality.py b/tests/test_habitat_quality.py index 0924dc76c8..2359cfeef1 100644 --- a/tests/test_habitat_quality.py +++ b/tests/test_habitat_quality.py @@ -1,4 +1,5 @@ """Module for Regression Testing the InVEST Habitat Quality model.""" +import csv import os import shutil import tempfile @@ -245,6 +246,33 @@ def test_habitat_quality_presence_absence_regression(self): # so we should exclude those new nodata pixel values. assert_array_sum(raster_path, assert_value, include_nodata=False) + # Based on the scenarios used to generate the rasters above, + # rarity values are calculated as follows: + # For LULC 1, rarity = 1.0 - (5000 / (10000 + 5000)) = 0.6667. + # For LULC 2 and 3, rarity = 0.0 because they are not in the baseline. + expected_csv_values = { + 'rarity_c_regression.csv': [ + (1, 0.6667, 4), + (2, 0.0, 0), + ], + 'rarity_f_regression.csv': [ + (1, 0.6667, 4), + (3, 0.0, 0), + ], + } + for csv_filename in expected_csv_values.keys(): + csv_path = os.path.join(args['workspace_dir'], csv_filename) + with open(csv_path, newline='') as csvfile: + reader = csv.DictReader(csvfile, delimiter=',') + self.assertEqual(reader.fieldnames, + ['lulc_code', 'rarity_value']) + for (exp_lulc, exp_rarity, + places_to_round) in expected_csv_values[csv_filename]: + row = next(reader) + self.assertEqual(int(row['lulc_code']), exp_lulc) + self.assertAlmostEqual(float(row['rarity_value']), + exp_rarity, places_to_round) + def test_habitat_quality_regression_different_projections(self): """Habitat Quality: base regression test with simplified data.""" from natcap.invest import habitat_quality @@ -1047,7 +1075,6 @@ def test_habitat_quality_case_insensitivty(self): habitat_quality.execute(args) # Reasonable to just check quality out in this case - #assert_array_sum( assert_array_sum( os.path.join(args['workspace_dir'], 'quality_c.tif'), 5852.088) diff --git a/tests/test_seasonal_water_yield_regression.py b/tests/test_seasonal_water_yield_regression.py index 899fc1446b..e5eafa0e98 100644 --- a/tests/test_seasonal_water_yield_regression.py +++ b/tests/test_seasonal_water_yield_regression.py @@ -495,7 +495,7 @@ def test_duplicate_aoi_assertion(self): args = { 'workspace_dir': self.workspace_dir, 'aoi_path': os.path.join( - self.workspace_dir, 'aggregated_results_foo.shp'), + self.workspace_dir, 'aggregated_results_swy_foo.shp'), 'results_suffix': 'foo', 'alpha_m': '1/12', 'beta_i': '1.0', diff --git a/tests/test_ucm.py b/tests/test_ucm.py index 1696cd4e58..ea6ef583b7 100644 --- a/tests/test_ucm.py +++ b/tests/test_ucm.py @@ -1,12 +1,12 @@ """InVEST Urban Heat Island Mitigation model tests.""" -import unittest -import tempfile -import shutil import os +import shutil +import tempfile +import unittest import numpy -from osgeo import gdal import pandas +from osgeo import gdal REGRESSION_DATA = os.path.join( os.path.dirname(__file__), '..', 'data', 'invest-test-data', 'ucm') @@ -79,11 +79,9 @@ def test_ucm_regression_factors(self): for key, expected_value in expected_results.items(): actual_value = float(results_feature.GetField(key)) # These accumulated values (esp. avd_eng_cn) are accumulated - # and may differ past about 4 decimal places. - self.assertAlmostEqual( - actual_value, expected_value, places=4, - msg='%s should be close to %f, actual: %f' % ( - key, expected_value, actual_value)) + # and may differ slightly from expected regression values. + numpy.testing.assert_allclose(actual_value, expected_value, + rtol=1e-4) finally: results_layer = None results_vector = None @@ -110,10 +108,7 @@ def test_ucm_regression_factors(self): # Expected energy savings is an accumulated value and may differ # past about 4 decimal places. - self.assertAlmostEqual( - energy_sav, expected_energy_sav, places=4, msg=( - '%f should be close to %f' % ( - energy_sav, expected_energy_sav))) + numpy.testing.assert_allclose(energy_sav, expected_energy_sav, rtol=1e-4) self.assertEqual(n_nonetype, 119) finally: buildings_layer = None @@ -151,10 +146,8 @@ def test_ucm_regression_factors(self): # These accumulated values are accumulated # and may differ past about 4 decimal places. - self.assertAlmostEqual( - energy_sav, expected_energy_sav, places=4, msg=( - '%f should be close to %f' % ( - energy_sav, expected_energy_sav))) + numpy.testing.assert_allclose(energy_sav, expected_energy_sav, + rtol=1e-4) self.assertEqual(n_nonetype, 119) finally: buildings_layer = None @@ -215,11 +208,9 @@ def test_ucm_regression_intensity(self): for key, expected_value in expected_results.items(): actual_value = float(results_feature.GetField(key)) # These accumulated values (esp. avd_eng_cn) are accumulated - # and may differ past about 4 decimal places. - self.assertAlmostEqual( - actual_value, expected_value, places=4, - msg='%s should be close to %f, actual: %f' % ( - key, expected_value, actual_value)) + # and may differ slightly. + numpy.testing.assert_allclose(actual_value, expected_value, + rtol=1e-4) finally: results_layer = None results_vector = None @@ -335,7 +326,8 @@ def test_missing_lulc_value_in_table(self): def test_bad_args(self): """UCM: test validation of bad arguments.""" - from natcap.invest import urban_cooling_model, validation + from natcap.invest import urban_cooling_model + from natcap.invest import validation args = { 'workspace_dir': self.workspace_dir, 'results_suffix': 'test_suffix', diff --git a/tests/test_ufrm.py b/tests/test_ufrm.py index 07df671573..eb2cef2f6a 100644 --- a/tests/test_ufrm.py +++ b/tests/test_ufrm.py @@ -59,6 +59,11 @@ def test_ufrm_regression(self): """UFRM: regression test.""" from natcap.invest import urban_flood_risk_mitigation args = self._make_args() + input_vector = gdal.OpenEx(args['aoi_watersheds_path'], + gdal.OF_VECTOR) + input_layer = input_vector.GetLayer() + input_fields = [field.GetName() for field in input_layer.schema] + urban_flood_risk_mitigation.execute(args) result_vector = gdal.OpenEx(os.path.join( @@ -66,13 +71,15 @@ def test_ufrm_regression(self): gdal.OF_VECTOR) result_layer = result_vector.GetLayer() - # Check that all four expected fields are there. + # Check that all expected fields are there. + output_fields = ['aff_bld', 'serv_blt', 'rnf_rt_idx', + 'rnf_rt_m3', 'flood_vol'] + output_fields += input_fields self.assertEqual( - set(('aff_bld', 'serv_blt', 'rnf_rt_idx', 'rnf_rt_m3', - 'flood_vol')), + set(output_fields), set(field.GetName() for field in result_layer.schema)) - result_feature = result_layer.GetFeature(0) + result_feature = result_layer.GetNextFeature() for fieldname, expected_value in ( ('aff_bld', 187010830.32202843), ('serv_blt', 13253546667257.65), @@ -85,6 +92,11 @@ def test_ufrm_regression(self): self.assertAlmostEqual( result_val, expected_value, places=-places_to_round) + input_feature = input_layer.GetNextFeature() + for fieldname in input_fields: + self.assertEqual(result_feature.GetField(fieldname), + input_feature.GetField(fieldname)) + result_feature = None result_layer = None result_vector = None @@ -94,6 +106,11 @@ def test_ufrm_regression_no_infrastructure(self): from natcap.invest import urban_flood_risk_mitigation args = self._make_args() del args['built_infrastructure_vector_path'] + input_vector = gdal.OpenEx(args['aoi_watersheds_path'], + gdal.OF_VECTOR) + input_layer = input_vector.GetLayer() + input_fields = [field.GetName() for field in input_layer.schema] + urban_flood_risk_mitigation.execute(args) result_raster = gdal.OpenEx(os.path.join( @@ -115,9 +132,11 @@ def test_ufrm_regression_no_infrastructure(self): result_layer = result_vector.GetLayer() result_feature = result_layer.GetFeature(0) - # Check that only the two expected fields are there. + # Check that only the expected fields are there. + output_fields = ['rnf_rt_idx', 'rnf_rt_m3', 'flood_vol'] + output_fields += input_fields self.assertEqual( - set(('rnf_rt_idx', 'rnf_rt_m3', 'flood_vol')), + set(output_fields), set(field.GetName() for field in result_layer.schema)) for fieldname, expected_value in ( @@ -218,7 +237,6 @@ def test_ufrm_explicit_zeros_in_table(self): except ValueError: self.fail('unexpected ValueError when testing curve number row with all zeros') - def test_ufrm_string_damage_to_infrastructure(self): """UFRM: handle str(int) structure indices. diff --git a/tests/test_ui_server.py b/tests/test_ui_server.py index 6efc23d270..3d4a347fe2 100644 --- a/tests/test_ui_server.py +++ b/tests/test_ui_server.py @@ -97,14 +97,41 @@ def test_write_parameter_set_file(self): }), 'relativePaths': True, } - _ = test_client.post( + response = test_client.post( f'{ROUTE_PREFIX}/write_parameter_set_file', json=payload) + self.assertEqual( + response.json, + {'message': 'Parameter set saved', 'error': False}) with open(filepath, 'r') as file: actual_data = json.loads(file.read()) self.assertEqual( set(actual_data), {'args', 'invest_version', 'model_name'}) + def test_write_parameter_set_file_error_handling(self): + """UI server: write_parameter_set_file endpoint + should catch a ValueError and return an error message. + """ + test_client = ui_server.app.test_client() + self.workspace_dir = tempfile.mkdtemp() + filepath = os.path.join(self.workspace_dir, 'datastack.json') + payload = { + 'filepath': filepath, + 'moduleName': 'natcap.invest.carbon', + 'args': json.dumps({ + 'workspace_dir': 'foo' + }), + 'relativePaths': True, + } + error_message = 'Error saving datastack' + with patch('natcap.invest.datastack.build_parameter_set', + side_effect=ValueError(error_message)): + response = test_client.post( + f'{ROUTE_PREFIX}/write_parameter_set_file', json=payload) + self.assertEqual( + response.json, + {'message': error_message, 'error': True}) + def test_save_to_python(self): """UI server: save_to_python endpoint.""" test_client = ui_server.app.test_client() @@ -138,11 +165,42 @@ def test_build_datastack_archive(self): 'carbon_pools_path': data_path }), } - _ = test_client.post( + response = test_client.post( f'{ROUTE_PREFIX}/build_datastack_archive', json=payload) + self.assertEqual( + response.json, + {'message': 'Datastack archive created', 'error': False}) # test_datastack.py asserts the actual archiving functionality self.assertTrue(os.path.exists(target_filepath)) + def test_build_datastack_archive_error_handling(self): + """UI server: build_datastack_archive endpoint + should catch a ValueError and return an error message. + """ + test_client = ui_server.app.test_client() + self.workspace_dir = tempfile.mkdtemp() + target_filepath = os.path.join(self.workspace_dir, 'data.tgz') + data_path = os.path.join(self.workspace_dir, 'data.csv') + with open(data_path, 'w') as file: + file.write('hello') + + payload = { + 'filepath': target_filepath, + 'moduleName': 'natcap.invest.carbon', + 'args': json.dumps({ + 'workspace_dir': 'foo', + 'carbon_pools_path': data_path + }), + } + error_message = 'Error saving datastack' + with patch('natcap.invest.datastack.build_datastack_archive', + side_effect=ValueError(error_message)): + response = test_client.post( + f'{ROUTE_PREFIX}/build_datastack_archive', json=payload) + self.assertEqual( + response.json, + {'message': error_message, 'error': True}) + @patch('natcap.invest.ui_server.usage.requests.post') @patch('natcap.invest.ui_server.usage.requests.get') def test_log_model_start(self, mock_get, mock_post): diff --git a/tests/test_urban_nature_access.py b/tests/test_urban_nature_access.py index 0e4858a832..a3d3d7b43c 100644 --- a/tests/test_urban_nature_access.py +++ b/tests/test_urban_nature_access.py @@ -358,6 +358,21 @@ def test_core_model(self): self.assertAlmostEqual(numpy.min(valid_pixels), 1171.7352294921875) self.assertAlmostEqual(numpy.max(valid_pixels), 11898.0712890625) + def test_no_lulc_nodata(self): + """UNA: verify behavior when the LULC has no nodata value.""" + from natcap.invest import urban_nature_access + + args = _build_model_args(self.workspace_dir) + args['search_radius_mode'] = urban_nature_access.RADIUS_OPT_UNIFORM + args['search_radius'] = 100 + + raster = gdal.OpenEx(args['lulc_raster_path'], gdal.OF_RASTER) + band = raster.GetRasterBand(1) + band.DeleteNoDataValue() + band = None + raster = None + urban_nature_access.execute(args) + def test_split_urban_nature(self): from natcap.invest import urban_nature_access @@ -397,7 +412,7 @@ def test_split_urban_nature(self): ) for fieldname, expected_value in expected_values.items(): numpy.testing.assert_allclose( - admin_feature.GetField(fieldname), expected_value) + admin_feature.GetField(fieldname), expected_value, rtol=1e-6) # The sum of the under-and-oversupplied populations should be equal # to the total population count. @@ -588,8 +603,8 @@ def test_radii_by_pop_group(self): set(defn.GetName() for defn in summary_layer.schema), set(expected_field_values.keys())) for fieldname, expected_value in expected_field_values.items(): - self.assertAlmostEqual( - expected_value, summary_feature.GetField(fieldname)) + numpy.testing.assert_allclose( + expected_value, summary_feature.GetField(fieldname), rtol=1e-6) output_dir = os.path.join(args['workspace_dir'], 'output') self._assert_urban_nature(os.path.join( @@ -664,8 +679,8 @@ def test_radii_by_pop_group_exponential_kernal(self): set(defn.GetName() for defn in summary_layer.schema), set(expected_field_values.keys())) for fieldname, expected_value in expected_field_values.items(): - self.assertAlmostEqual( - expected_value, summary_feature.GetField(fieldname)) + numpy.testing.assert_allclose( + expected_value, summary_feature.GetField(fieldname), rtol=1e-6) output_dir = os.path.join(args['workspace_dir'], 'output') self._assert_urban_nature(os.path.join( diff --git a/tests/test_validation.py b/tests/test_validation.py index cd3dddc014..18b50df567 100644 --- a/tests/test_validation.py +++ b/tests/test_validation.py @@ -1,11 +1,10 @@ """Testing module for validation.""" -import codecs -import collections import functools import os -import platform import shutil +import stat import string +import sys import tempfile import textwrap import time @@ -330,6 +329,91 @@ def test_workspace_not_exists(self): new_dir, must_exist=False, permissions='rwx')) +@unittest.skipIf( + sys.platform.startswith('win'), + 'requires support for os.chmod(), which is unreliable on Windows') +class DirectoryValidationMacOnly(unittest.TestCase): + """Test Directory Permissions Validation.""" + + def test_invalid_permissions_r(self): + """Validation: when a folder must have read/write/execute + permissions but is missing write and execute permissions.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IREAD) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='execute')) + + def test_invalid_permissions_w(self): + """Validation: when a folder must have read/write/execute + permissions but is missing read and execute permissions.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IWRITE) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read')) + + def test_invalid_permissions_x(self): + """Validation: when a folder must have read/write/execute + permissions but is missing read and write permissions.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IEXEC) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read')) + + def test_invalid_permissions_rw(self): + """Validation: when a folder must have read/write/execute + permissions but is missing execute permission.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IREAD | stat.S_IWRITE) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='execute')) + + def test_invalid_permissions_rx(self): + """Validation: when a folder must have read/write/execute + permissions but is missing write permission.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IREAD | stat.S_IEXEC) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='write')) + + def test_invalid_permissions_wx(self): + """Validation: when a folder must have read/write/execute + permissions but is missing read permission.""" + from natcap.invest import validation + + with tempfile.TemporaryDirectory() as tempdir: + os.chmod(tempdir, stat.S_IWRITE | stat.S_IEXEC) + validation_warning = validation.check_directory(tempdir, + permissions='rwx') + self.assertEqual( + validation_warning, + validation.MESSAGES['NEED_PERMISSION_DIRECTORY'].format(permission='read')) + + class FileValidation(unittest.TestCase): """Test File Validator.""" @@ -539,7 +623,6 @@ def test_vector_projected_in_m(self): def test_wrong_geom_type(self): """Validation: checks that the vector's geometry type is correct.""" - from natcap.invest import spec_utils from natcap.invest import validation driver = gdal.GetDriverByName('GPKG') filepath = os.path.join(self.workspace_dir, 'vector.gpkg') @@ -1369,7 +1452,6 @@ def test_csv_raster_validation_missing_file(self): }) self.assertIn('File not found', str(cm.exception)) - def test_csv_raster_validation_not_projected(self): """validation: validate unprojected raster within csv column""" from natcap.invest import validation diff --git a/workbench/src/main/ipcMainChannels.js b/workbench/src/main/ipcMainChannels.js index 784d642d68..61a92b1299 100644 --- a/workbench/src/main/ipcMainChannels.js +++ b/workbench/src/main/ipcMainChannels.js @@ -16,6 +16,7 @@ export const ipcMainChannels = { LAUNCH_PLUGIN_SERVER: 'launch-plugin-server', LOGGER: 'logger', OPEN_EXTERNAL_URL: 'open-external-url', + OPEN_PATH: 'open-path', OPEN_LOCAL_HTML: 'open-local-html', REMOVE_PLUGIN: 'remove-plugin', SET_SETTING: 'set-setting', diff --git a/workbench/src/main/setupDialogs.js b/workbench/src/main/setupDialogs.js index cecc003a90..3e79df45ba 100644 --- a/workbench/src/main/setupDialogs.js +++ b/workbench/src/main/setupDialogs.js @@ -26,4 +26,10 @@ export default function setupDialogs() { shell.showItemInFolder(filepath); } ); + + ipcMain.handle( + ipcMainChannels.OPEN_PATH, async (event, dirpath) => { + return await shell.openPath(dirpath); + } + ); } diff --git a/workbench/src/renderer/components/InvestTab/index.jsx b/workbench/src/renderer/components/InvestTab/index.jsx index e0e762830d..4f03a7ffb9 100644 --- a/workbench/src/renderer/components/InvestTab/index.jsx +++ b/workbench/src/renderer/components/InvestTab/index.jsx @@ -8,6 +8,8 @@ import TabContainer from 'react-bootstrap/TabContainer'; import Nav from 'react-bootstrap/Nav'; import Row from 'react-bootstrap/Row'; import Col from 'react-bootstrap/Col'; +import Modal from 'react-bootstrap/Modal'; +import Button from 'react-bootstrap/Button'; import { MdKeyboardArrowRight } from 'react-icons/md'; @@ -21,10 +23,7 @@ import { getSpec } from '../../server_requests'; import { ipcMainChannels } from '../../../main/ipcMainChannels'; const { ipcRenderer } = window.Workbench.electron; - -function handleOpenWorkspace(logfile) { - ipcRenderer.send(ipcMainChannels.SHOW_ITEM_IN_FOLDER, logfile); -} +const { logger } = window.Workbench; /** * Render an invest model setup form, log display, etc. @@ -42,6 +41,7 @@ class InvestTab extends React.Component { userTerminated: false, executeClicked: false, tabStatus: '', + showErrorModal: false, }; this.investExecute = this.investExecute.bind(this); @@ -49,6 +49,8 @@ class InvestTab extends React.Component { this.terminateInvestProcess = this.terminateInvestProcess.bind(this); this.investLogfileCallback = this.investLogfileCallback.bind(this); this.investExitCallback = this.investExitCallback.bind(this); + this.handleOpenWorkspace = this.handleOpenWorkspace.bind(this); + this.showErrorModal = this.showErrorModal.bind(this); } async componentDidMount() { @@ -156,6 +158,7 @@ class InvestTab extends React.Component { updateJobProperties(tabID, { argsValues: args, status: undefined, // in case of re-run, clear an old status + logfile: undefined, // in case of re-run where logfile may no longer exist, clear old logfile path }); ipcRenderer.send( @@ -188,6 +191,22 @@ class InvestTab extends React.Component { ); } + async handleOpenWorkspace(workspace_dir) { + if (workspace_dir) { + const error = await ipcRenderer.invoke(ipcMainChannels.OPEN_PATH, workspace_dir); + if (error) { + logger.error(`Error opening workspace (${workspace_dir}). ${error}`); + this.showErrorModal(true); + } + } + } + + showErrorModal(shouldShow) { + this.setState({ + showErrorModal: shouldShow, + }); + } + render() { const { activeTab, @@ -195,7 +214,8 @@ class InvestTab extends React.Component { argsSpec, uiSpec, executeClicked, - tabStatus + tabStatus, + showErrorModal, } = this.state; const { status, @@ -232,88 +252,99 @@ class InvestTab extends React.Component { const sidebarFooterElementId = `sidebar-footer-${tabID}`; return ( - - - - -
-
- -
-
- { - status - ? ( - handleOpenWorkspace(logfile)} - terminateInvestProcess={this.terminateInvestProcess} - /> - ) - : null - } -
- - - - - + {t('Setup')} + + + + {t('Log')} + + + +
+
+ - - +
- - - - - - + { + status + ? ( + this.handleOpenWorkspace(argsValues?.workspace_dir)} + terminateInvestProcess={this.terminateInvestProcess} + /> + ) + : null + } +
+ + + + + + + + + + + + + + this.showErrorModal(false)} aria-labelledby="error-modal-title"> + + {t('Error opening workspace')} + + {t('Failed to open workspace directory. Make sure the directory exists and that you have write access to it.')} + + + + + ); } } diff --git a/workbench/src/renderer/components/SaveAsModal/index.jsx b/workbench/src/renderer/components/SaveAsModal/index.jsx index 40c9d35c4a..a2e5e9917e 100644 --- a/workbench/src/renderer/components/SaveAsModal/index.jsx +++ b/workbench/src/renderer/components/SaveAsModal/index.jsx @@ -67,7 +67,11 @@ class SaveAsModal extends React.Component { } handleShow() { - this.setState({ show: true }); + this.props.removeSaveErrors(); + this.setState({ + relativePaths: false, + show: true, + }); } handleChange(event) { diff --git a/workbench/src/renderer/components/SetupTab/index.jsx b/workbench/src/renderer/components/SetupTab/index.jsx index 387c97fabe..7d37dda011 100644 --- a/workbench/src/renderer/components/SetupTab/index.jsx +++ b/workbench/src/renderer/components/SetupTab/index.jsx @@ -104,6 +104,7 @@ class SetupTab extends React.Component { this.savePythonScript = this.savePythonScript.bind(this); this.saveJsonFile = this.saveJsonFile.bind(this); this.setSaveAlert = this.setSaveAlert.bind(this); + this.removeSaveErrors = this.removeSaveErrors.bind(this); this.wrapInvestExecute = this.wrapInvestExecute.bind(this); this.investValidate = this.investValidate.bind(this); this.debouncedValidate = this.debouncedValidate.bind(this); @@ -211,8 +212,8 @@ class SetupTab extends React.Component { relativePaths: relativePaths, args: JSON.stringify(args), }; - const response = await writeParametersToFile(payload); - this.setSaveAlert(response); + const {message, error} = await writeParametersToFile(payload); + this.setSaveAlert(message, error); } async saveDatastack(datastackPath) { @@ -226,14 +227,16 @@ class SetupTab extends React.Component { args: JSON.stringify(args), }; const key = window.crypto.getRandomValues(new Uint16Array(1))[0].toString(); - this.setSaveAlert('archiving...', key); - const response = await archiveDatastack(payload); - this.setSaveAlert(response, key); + this.setSaveAlert('archiving...', false, key); + const {message, error} = await archiveDatastack(payload); + this.setSaveAlert(message, error, key); } /** State updater for alert messages from various save buttons. * * @param {string} message - the message to display + * @param {boolean} error - true if message was generated by an error, + * false otherwise. Defaults to false. * @param {string} key - a key to uniquely identify each save action, * passed as prop to `Expire` so that it can be aware of whether to, * 1. display: because a new save occurred, or @@ -244,10 +247,28 @@ class SetupTab extends React.Component { */ setSaveAlert( message, + error = false, key = window.crypto.getRandomValues(new Uint16Array(1))[0].toString() ) { this.setState({ - saveAlerts: { ...this.state.saveAlerts, ...{ [key]: message } } + saveAlerts: { + ...this.state.saveAlerts, + ...{ [key]: { + message, + error + }}} + }); + } + + removeSaveErrors() { + const alerts = this.state.saveAlerts; + Object.keys(alerts).forEach((key) => { + if (alerts[key].error) { + delete alerts[key]; + } + }); + this.setState({ + saveAlerts: alerts }); } @@ -534,18 +555,22 @@ class SetupTab extends React.Component { const SaveAlerts = []; Object.keys(saveAlerts).forEach((key) => { - const message = saveAlerts[key]; + const { message, error } = saveAlerts[key]; if (message) { - // Alert won't expire during archiving; will expire 2s after completion - const alertExpires = (message === 'archiving...') ? 1e7 : 2000; + // Alert won't expire during archiving; will expire 4s after completion + // Alert won't expire when an error has occurred; + // will be hidden next time save modal opens + const alertExpires = (error || message === 'archiving...') ? 1e7 : 4000; SaveAlerts.push( - - {message} + + {t(message)} ); @@ -608,6 +633,7 @@ class SetupTab extends React.Component { savePythonScript={this.savePythonScript} saveJsonFile={this.saveJsonFile} saveDatastack={this.saveDatastack} + removeSaveErrors={this.removeSaveErrors} /> {SaveAlerts} diff --git a/workbench/src/renderer/server_requests.js b/workbench/src/renderer/server_requests.js index f4e4c2deb4..61e6e3f06e 100644 --- a/workbench/src/renderer/server_requests.js +++ b/workbench/src/renderer/server_requests.js @@ -206,10 +206,14 @@ export async function archiveDatastack(payload) { body: JSON.stringify(payload), headers: { 'Content-Type': 'application/json' }, }) - .then((response) => response.text()) - .then((text) => { - logger.debug(text); - return text; + .then((response) => response.json()) + .then(({message, error}) => { + if (error) { + logger.error(message); + } else { + logger.debug(message); + } + return {message, error}; }) .catch((error) => logger.error(error.stack)) ); @@ -234,10 +238,14 @@ export async function writeParametersToFile(payload) { body: JSON.stringify(payload), headers: { 'Content-Type': 'application/json' }, }) - .then((response) => response.text()) - .then((text) => { - logger.debug(text); - return text; + .then((response) => response.json()) + .then(({message, error}) => { + if (error) { + logger.error(message); + } else { + logger.debug(message); + } + return {message, error}; }) .catch((error) => logger.error(error.stack)) ); diff --git a/workbench/tests/main/main.test.js b/workbench/tests/main/main.test.js index 2eac73da14..3fc064913c 100644 --- a/workbench/tests/main/main.test.js +++ b/workbench/tests/main/main.test.js @@ -190,6 +190,7 @@ describe('createWindow', () => { ipcMainChannels.INVEST_VERSION, ipcMainChannels.IS_FIRST_RUN, ipcMainChannels.LAUNCH_PLUGIN_SERVER, + ipcMainChannels.OPEN_PATH, ipcMainChannels.SHOW_OPEN_DIALOG, ipcMainChannels.SHOW_SAVE_DIALOG, ]; diff --git a/workbench/tests/renderer/investtab.test.js b/workbench/tests/renderer/investtab.test.js index 7dddd2dadc..73ed1c760a 100644 --- a/workbench/tests/renderer/investtab.test.js +++ b/workbench/tests/renderer/investtab.test.js @@ -109,9 +109,71 @@ describe('Run status Alert renders with status from a recent run', () => { }); const { findByRole } = renderInvestTab(job); - const openWorkspace = await findByRole('button', { name: 'Open Workspace' }) - openWorkspace.click(); - expect(shell.showItemInFolder).toHaveBeenCalledTimes(1); + const openWorkspaceBtn = await findByRole('button', { name: 'Open Workspace' }); + expect(openWorkspaceBtn).toBeTruthy(); + }); +}); + +describe('Open Workspace button', () => { + const spec = { + args: {}, + ui_spec: { order: [] }, + }; + + const baseJob = { + ...DEFAULT_JOB, + status: 'success', + }; + + beforeEach(() => { + getSpec.mockResolvedValue(spec); + fetchValidation.mockResolvedValue([]); + setupDialogs(); + }); + + afterEach(() => { + removeIpcMainListeners(); + }); + + test('should open workspace', async () => { + const job = { + ...baseJob, + argsValues: { + workspace_dir: '/workspace', + }, + }; + + jest.spyOn(ipcRenderer, 'invoke'); + + const { findByRole } = renderInvestTab(job); + const openWorkspaceBtn = await findByRole('button', { name: 'Open Workspace' }) + openWorkspaceBtn.click(); + + expect(ipcRenderer.invoke).toHaveBeenCalledTimes(1); + expect(ipcRenderer.invoke).toHaveBeenCalledWith(ipcMainChannels.OPEN_PATH, job.argsValues.workspace_dir); + }); + + test('should present an error message to the user if workspace cannot be opened (e.g., if it does not exist)', async () => { + const job = { + ...baseJob, + status: 'error', + argsValues: { + workspace_dir: '/nonexistent-workspace', + }, + }; + + jest.spyOn(ipcRenderer, 'invoke'); + ipcRenderer.invoke.mockResolvedValue('Error opening workspace'); + + const { findByRole } = renderInvestTab(job); + const openWorkspaceBtn = await findByRole('button', { name: 'Open Workspace' }); + openWorkspaceBtn.click(); + + expect(ipcRenderer.invoke).toHaveBeenCalledTimes(1); + expect(ipcRenderer.invoke).toHaveBeenCalledWith(ipcMainChannels.OPEN_PATH, job.argsValues.workspace_dir); + + const errorModal = await findByRole('dialog', { name: 'Error opening workspace'}); + expect(errorModal).toBeTruthy(); }); }); @@ -155,18 +217,22 @@ describe('Sidebar Buttons', () => { }); test('Save to JSON: requests endpoint with correct payload', async () => { - const response = 'saved'; - writeParametersToFile.mockResolvedValue(response); + const response = { + message: 'saved', + error: false, + }; + writeParametersToFile.mockResolvedValueOnce(response); + const mockDialogData = { canceled: false, filePath: 'foo.json' }; + ipcRenderer.invoke.mockResolvedValueOnce(mockDialogData); const { findByText, findByLabelText, findByRole } = renderInvestTab(); const saveAsButton = await findByText('Save as...'); await userEvent.click(saveAsButton); - const jsonOption = await findByLabelText((content, element) => content.startsWith('Parameters only')); + const jsonOption = await findByLabelText((content) => content.startsWith('Parameters only')); await userEvent.click(jsonOption); const saveButton = await findByRole('button', { name: 'Save' }); await userEvent.click(saveButton); - expect(await findByRole('alert')).toHaveTextContent(response); const payload = writeParametersToFile.mock.calls[0][0]; expect(Object.keys(payload)).toEqual(expect.arrayContaining( ['filepath', 'moduleName', 'relativePaths', 'args'] @@ -187,19 +253,18 @@ describe('Sidebar Buttons', () => { test('Save to Python script: requests endpoint with correct payload', async () => { const response = 'saved'; - saveToPython.mockResolvedValue(response); + saveToPython.mockResolvedValueOnce(response); const mockDialogData = { canceled: false, filePath: 'foo.py' }; ipcRenderer.invoke.mockResolvedValueOnce(mockDialogData); const { findByText, findByLabelText, findByRole } = renderInvestTab(); const saveAsButton = await findByText('Save as...'); await userEvent.click(saveAsButton); - const pythonOption = await findByLabelText((content, element) => content.startsWith('Python script')); + const pythonOption = await findByLabelText((content) => content.startsWith('Python script')); await userEvent.click(pythonOption); const saveButton = await findByRole('button', { name: 'Save' }); await userEvent.click(saveButton); - expect(await findByRole('alert')).toHaveTextContent(response); const payload = saveToPython.mock.calls[0][0]; expect(Object.keys(payload)).toEqual(expect.arrayContaining( ['filepath', 'modelname', 'args'] @@ -222,27 +287,22 @@ describe('Sidebar Buttons', () => { }); test('Save datastack: requests endpoint with correct payload', async () => { - const response = 'saved'; - archiveDatastack.mockImplementation(() => new Promise( - (resolve) => { - setTimeout(() => resolve(response), 500); - } - )); + const response = { + message: 'saved', + error: false, + }; + archiveDatastack.mockResolvedValueOnce(response); const mockDialogData = { canceled: false, filePath: 'data.tgz' }; ipcRenderer.invoke.mockResolvedValue(mockDialogData); const { findByText, findByLabelText, findByRole, getByRole } = renderInvestTab(); const saveAsButton = await findByText('Save as...'); await userEvent.click(saveAsButton); - const datastackOption = await findByLabelText((content, element) => content.startsWith('Parameters and data')); + const datastackOption = await findByLabelText((content) => content.startsWith('Parameters and data')); await userEvent.click(datastackOption); const saveButton = await findByRole('button', { name: 'Save' }); await userEvent.click(saveButton); - expect(await findByRole('alert')).toHaveTextContent('archiving...'); - await waitFor(() => { - expect(getByRole('alert')).toHaveTextContent(response); - }); const payload = archiveDatastack.mock.calls[0][0]; expect(Object.keys(payload)).toEqual(expect.arrayContaining( ['filepath', 'moduleName', 'args'] @@ -264,6 +324,124 @@ describe('Sidebar Buttons', () => { expect(archiveDatastack).toHaveBeenCalledTimes(1); }); + test.each([ + ['Parameters only', 'saveJsonFile'], + ['Parameters and data', 'saveDatastack'], + ['Python script', 'savePythonScript'] + ])('%s: does nothing when canceled', async (label, method) => { + // callback data if the OS dialog was canceled + const mockDialogData = { + canceled: true, + filePaths: [] + }; + ipcRenderer.invoke.mockResolvedValue(mockDialogData); + const spy = jest.spyOn(SetupTab.WrappedComponent.prototype, method); + + const { findByText, findByLabelText, findByRole } = renderInvestTab(); + const saveAsButton = await findByText('Save as...'); + await userEvent.click(saveAsButton); + const option = await findByLabelText((content, element) => content.startsWith(label)); + await userEvent.click(option); + const saveButton = await findByRole('button', { name: 'Save' }); + await userEvent.click(saveButton); + + // Calls that would have triggered if a file was selected + expect(spy).toHaveBeenCalledTimes(0); + }); + + test.each([ + [ + 'Parameters only', + writeParametersToFile, + {message: 'Parameter set saved', error: false} + ], + [ + 'Parameters and data', + archiveDatastack, + {message: 'Datastack archive created', error: false} + ], + [ + 'Python script', + saveToPython, + 'Python script saved' + ] + ])('%s: renders success message', async (label, method, response) => { + ipcRenderer.invoke.mockResolvedValueOnce({canceled: false, filePath: 'example.txt'}); + if (method == archiveDatastack) { + method.mockImplementationOnce(() => new Promise( + (resolve) => { + setTimeout(() => resolve(response), 500); + } + )); + } else { + method.mockResolvedValueOnce(response); + } + + const { findByText, findByLabelText, findByRole } = renderInvestTab(); + const saveAsButton = await findByText('Save as...'); + await userEvent.click(saveAsButton); + const option = await findByLabelText((content) => content.startsWith(label)); + await userEvent.click(option); + const saveButton = await findByRole('button', { name: 'Save' }); + await userEvent.click(saveButton); + + const saveAlert = await findByRole('alert'); + if (method == archiveDatastack) { + expect(saveAlert).toHaveTextContent('archiving...'); + } + await waitFor(() => { + expect(saveAlert).toHaveTextContent(response.message ?? response); + }); + expect(saveAlert).toHaveClass('alert-success'); + }); + + test.each([ + [ + 'Parameters only', + writeParametersToFile, + {message: 'Error saving parameter set', error: true} + ], + [ + 'Parameters and data', + archiveDatastack, + {message: 'Error creating datastack archive', error: true} + ], + ])('%s: renders error message', async (label, method, response) => { + ipcRenderer.invoke.mockResolvedValueOnce({canceled: false, filePath: 'example.txt'}); + method.mockResolvedValueOnce(response); + + const { findByText, findByLabelText, findByRole } = renderInvestTab(); + const saveAsButton = await findByText('Save as...'); + await userEvent.click(saveAsButton); + const option = await findByLabelText((content) => content.startsWith(label)); + await userEvent.click(option); + const saveButton = await findByRole('button', { name: 'Save' }); + await userEvent.click(saveButton); + + const saveAlert = await findByRole('alert'); + expect(saveAlert).toHaveTextContent(response.message); + expect(saveAlert).toHaveClass('alert-danger'); + }); + + test('Save errors are cleared when save modal opens', async () => { + ipcRenderer.invoke.mockResolvedValueOnce({canceled: false, filePath: 'example.txt'}); + writeParametersToFile.mockResolvedValueOnce({message: 'Error saving parameter set', error: true}); + + // Trigger error alert + const { findByText, findByLabelText, findByRole, queryByRole } = renderInvestTab(); + const saveAsButton = await findByText('Save as...'); + await userEvent.click(saveAsButton); + const jsonOption = await findByLabelText((content) => content.startsWith('Parameters only')); + await userEvent.click(jsonOption); + const saveButton = await findByRole('button', { name: 'Save' }); + await userEvent.click(saveButton); + expect(await findByRole('alert')).toHaveClass('alert-danger'); + + // Re-open save modal + await userEvent.click(saveAsButton); + expect(queryByRole('alert')).toBe(null); + }); + test('Load parameters from file: loads parameters', async () => { const mockDatastack = { module_name: spec.pyname, @@ -321,31 +499,6 @@ describe('Sidebar Buttons', () => { expect(spy).toHaveBeenCalledTimes(0); }); - test.each([ - ['Parameters only', 'saveJsonFile'], - ['Parameters and data', 'saveDatastack'], - ['Python script', 'savePythonScript'] - ])('%s: does nothing when canceled', async (label, method) => { - // callback data if the OS dialog was canceled - const mockDialogData = { - canceled: true, - filePaths: [] - }; - ipcRenderer.invoke.mockResolvedValue(mockDialogData); - const spy = jest.spyOn(SetupTab.WrappedComponent.prototype, method); - - const { findByText, findByLabelText, findByRole } = renderInvestTab(); - const saveAsButton = await findByText('Save as...'); - await userEvent.click(saveAsButton); - const option = await findByLabelText((content, element) => content.startsWith(label)); - await userEvent.click(option); - const saveButton = await findByRole('button', { name: 'Save' }); - await userEvent.click(saveButton); - - // Calls that would have triggered if a file was selected - expect(spy).toHaveBeenCalledTimes(0); - }); - test('Load parameters button has hover text', async () => { const { findByText,