diff --git a/docs/conf.py b/docs/conf.py index a35144d5..413a0f8a 100755 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,7 +24,7 @@ from pathlib import Path # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('..')) +sys.path.insert(0, os.path.abspath("..")) # -- General configuration ------------------------------------------------ @@ -35,8 +35,8 @@ sys.path.insert(0, os.path.abspath('..')) # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.autodoc', - 'sphinx.ext.todo', + "sphinx.ext.autodoc", + "sphinx.ext.todo", "sphinx.ext.linkcode", "sphinx.ext.mathjax", "sphinx.ext.napoleon", @@ -46,26 +46,26 @@ extensions = [ ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = 'Parcels' -copyright = f'{datetime.datetime.now().year}, The OceanParcels Team' -author = 'The OceanParcels Team' +project = "Parcels" +copyright = f"{datetime.datetime.now().year}, The OceanParcels Team" +author = "The OceanParcels Team" linkcheck_ignore = [ - r'http://localhost:\d+/', + r"http://localhost:\d+/", r"http://www2\.cesm\.ucar\.edu/models/cesm1\.0/pop2/doc/sci/POPRefManual.pdf", # Site doesn't allow crawling r"https://pubs\.acs\.org/doi/10\.1021/acs\.est\.0c01984", # Site doesn't allow crawling r"https://aip\.scitation\.org/doi/10\.1063/1\.4982720", # Site doesn't allow crawling @@ -107,7 +107,7 @@ release = version # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = 'en' +language = "en" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: @@ -117,7 +117,7 @@ language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build', '**.ipynb_checkpoints'] +exclude_patterns = ["_build", "**.ipynb_checkpoints"] # The reST default role (used for this markup: `text`) to use for all # documents. @@ -135,7 +135,7 @@ exclude_patterns = ['_build', '**.ipynb_checkpoints'] # show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'monokai' +pygments_style = "monokai" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] @@ -150,7 +150,7 @@ todo_include_todos = True # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'pydata_sphinx_theme' +html_theme = "pydata_sphinx_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the @@ -191,7 +191,7 @@ numpydoc_validation_checks = { # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] html_theme_options = { "logo": { "image_light": "parcelslogo.png", @@ -206,7 +206,7 @@ html_theme_options = { "icon": "fa-solid fa-box", "type": "fontawesome", } - ] + ], } html_context = { @@ -222,7 +222,7 @@ downloads_folder = Path("_downloads") downloads_folder.mkdir(exist_ok=True) -def make_filename_safe(filename: str, safe_char: str = '_') -> str: +def make_filename_safe(filename: str, safe_char: str = "_") -> str: """Make a filename safe for saving to disk.""" # Replace any characters that are not allowed in a filename with the safe character safe_filename = re.sub(r'[\\/:*?"<>|]', safe_char, filename) @@ -297,10 +297,7 @@ def linkcode_resolve(domain, info): if "-" in parcels.__version__: return f"https://github.com/OceanParcels/parcels/blob/master/parcels/{fn}{linespec}" else: - return ( - f"https://github.com/OceanParcels/parcels/blob/" - f"{parcels.__version__}/parcels/{fn}{linespec}" - ) + return f"https://github.com/OceanParcels/parcels/blob/" f"{parcels.__version__}/parcels/{fn}{linespec}" # Add any extra paths that contain custom files (such as robots.txt or @@ -318,10 +315,7 @@ html_extra_path = ["robots.txt"] # Custom sidebar templates, maps document names to template names. -html_sidebars = { - "**": ["sidebar-nav-bs"], - "documentation/additional_examples": [] -} +html_sidebars = {"**": ["sidebar-nav-bs"], "documentation/additional_examples": []} # Additional templates that should be rendered to pages, maps page names to # template names. @@ -368,31 +362,28 @@ html_sidebars = { # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'parcelsdoc' +htmlhelp_basename = "parcelsdoc" nbsphinx_thumbnails = { - 'examples/tutorial_parcels_structure': '_images/parcels_user_diagram.png', - 'examples/tutorial_timestamps': '_static/calendar-icon.jpg', - 'examples/tutorial_jit_vs_scipy': '_static/clock-icon.png', - 'examples/documentation_homepage_animation': '_images/homepage.gif', - 'examples/tutorial_interaction': '_static/pulled_particles_twoatractors_line.gif', - 'examples/documentation_LargeRunsOutput': '_static/harddrive.png', - 'examples/tutorial_unitconverters': '_static/globe-icon.jpg', - 'examples/documentation_geospatial': '_images/tutorial_geospatial_google_earth.png', - 'examples/tutorial_kernelloop': '_static/loop-icon.jpeg', + "examples/tutorial_parcels_structure": "_images/parcels_user_diagram.png", + "examples/tutorial_timestamps": "_static/calendar-icon.jpg", + "examples/tutorial_jit_vs_scipy": "_static/clock-icon.png", + "examples/documentation_homepage_animation": "_images/homepage.gif", + "examples/tutorial_interaction": "_static/pulled_particles_twoatractors_line.gif", + "examples/documentation_LargeRunsOutput": "_static/harddrive.png", + "examples/tutorial_unitconverters": "_static/globe-icon.jpg", + "examples/documentation_geospatial": "_images/tutorial_geospatial_google_earth.png", + "examples/tutorial_kernelloop": "_static/loop-icon.jpeg", } # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # 'preamble': '', - # Latex figure (float) alignment # 'figure_align': 'htbp', } @@ -401,9 +392,7 @@ latex_elements = { # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - ( - master_doc, 'parcels.tex', 'Parcels Documentation', - 'M. Lange, E. van Sebille', 'manual'), + (master_doc, "parcels.tex", "Parcels Documentation", "M. Lange, E. van Sebille", "manual"), ] # The name of an image file (relative to this directory) to place at the top of @@ -431,10 +420,7 @@ latex_documents = [ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'Parcels', 'Parcels Documentation', - [author], 1) -] +man_pages = [(master_doc, "Parcels", "Parcels Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False @@ -447,9 +433,14 @@ man_pages = [ # dir menu entry, description, category) texinfo_documents = [ ( - master_doc, 'parcels', 'Parcels Documentation', - author, 'Parcels', 'One line description of project.', - 'Miscellaneous'), + master_doc, + "parcels", + "Parcels Documentation", + author, + "Parcels", + "One line description of project.", + "Miscellaneous", + ), ] # Documents to append as an appendix to all manuals. diff --git a/docs/examples/example_brownian.py b/docs/examples/example_brownian.py index 370bd843..668d96de 100644 --- a/docs/examples/example_brownian.py +++ b/docs/examples/example_brownian.py @@ -15,9 +15,7 @@ def mesh_conversion(mesh): @pytest.mark.parametrize("mode", ["scipy", "jit"]) @pytest.mark.parametrize("mesh", ["flat", "spherical"]) def test_brownian_example(mode, mesh, npart=3000): - fieldset = parcels.FieldSet.from_data( - {"U": 0, "V": 0}, {"lon": 0, "lat": 0}, mesh=mesh - ) + fieldset = parcels.FieldSet.from_data({"U": 0, "V": 0}, {"lon": 0, "lat": 0}, mesh=mesh) # Set diffusion constants. kh_zonal = 100 # in m^2/s @@ -25,9 +23,7 @@ def test_brownian_example(mode, mesh, npart=3000): # Create field of constant Kh_zonal and Kh_meridional fieldset.add_field(parcels.Field("Kh_zonal", kh_zonal, lon=0, lat=0, mesh=mesh)) - fieldset.add_field( - parcels.Field("Kh_meridional", kh_meridional, lon=0, lat=0, mesh=mesh) - ) + fieldset.add_field(parcels.Field("Kh_meridional", kh_meridional, lon=0, lat=0, mesh=mesh)) # Set random seed parcels.ParcelsRandom.seed(123456) @@ -35,20 +31,14 @@ def test_brownian_example(mode, mesh, npart=3000): runtime = timedelta(days=1) parcels.ParcelsRandom.seed(1234) - pset = parcels.ParticleSet( - fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart) - ) - pset.execute( - pset.Kernel(parcels.DiffusionUniformKh), runtime=runtime, dt=timedelta(hours=1) - ) + pset = parcels.ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart)) + pset.execute(pset.Kernel(parcels.DiffusionUniformKh), runtime=runtime, dt=timedelta(hours=1)) expected_std_x = np.sqrt(2 * kh_zonal * runtime.total_seconds()) expected_std_y = np.sqrt(2 * kh_meridional * runtime.total_seconds()) ys = pset.lat * mesh_conversion(mesh) - xs = pset.lon * mesh_conversion( - mesh - ) # since near equator, we do not need to care about curvature effect + xs = pset.lon * mesh_conversion(mesh) # since near equator, we do not need to care about curvature effect tol = 250 # 250m tolerance assert np.allclose(np.std(xs), expected_std_x, atol=tol) diff --git a/docs/examples/example_dask_chunk_OCMs.py b/docs/examples/example_dask_chunk_OCMs.py index d4c299d2..fd8da7a5 100644 --- a/docs/examples/example_dask_chunk_OCMs.py +++ b/docs/examples/example_dask_chunk_OCMs.py @@ -128,9 +128,7 @@ def test_nemo_3D(mode, chunk_mode): }, } - fieldset = parcels.FieldSet.from_nemo( - filenames, variables, dimensions, chunksize=chs - ) + fieldset = parcels.FieldSet.from_nemo(filenames, variables, dimensions, chunksize=chs) compute_nemo_particle_advection(fieldset, mode) # Nemo sample file dimensions: depthu=75, y=201, x=151 @@ -140,31 +138,20 @@ def test_nemo_3D(mode, chunk_mode): if chunk_mode is False: assert len(fieldset.U.grid.load_chunk) == 1 elif chunk_mode == "auto": - assert ( - fieldset.gridset.size == 3 - ) # because three different grids in 'auto' mode + assert fieldset.gridset.size == 3 # because three different grids in 'auto' mode assert len(fieldset.U.grid.load_chunk) != 1 elif chunk_mode == "specific": assert len(fieldset.U.grid.load_chunk) == ( - 1 - * int(math.ceil(75.0 / 75.0)) - * int(math.ceil(201.0 / 16.0)) - * int(math.ceil(151.0 / 16.0)) + 1 * int(math.ceil(75.0 / 75.0)) * int(math.ceil(201.0 / 16.0)) * int(math.ceil(151.0 / 16.0)) ) elif chunk_mode == "failsafe": # chunking time and depth but not lat and lon assert len(fieldset.U.grid.load_chunk) != 1 assert len(fieldset.U.grid.load_chunk) == ( - 1 - * int(math.ceil(75.0 / 25.0)) - * int(math.ceil(201.0 / 171.0)) - * int(math.ceil(151.0 / 151.0)) + 1 * int(math.ceil(75.0 / 25.0)) * int(math.ceil(201.0 / 171.0)) * int(math.ceil(151.0 / 151.0)) ) assert len(fieldset.V.grid.load_chunk) != 1 assert len(fieldset.V.grid.load_chunk) == ( - 1 - * int(math.ceil(75.0 / 75.0)) - * int(math.ceil(201.0 / 171.0)) - * int(math.ceil(151.0 / 151.0)) + 1 * int(math.ceil(75.0 / 75.0)) * int(math.ceil(201.0 / 171.0)) * int(math.ceil(151.0 / 151.0)) ) @@ -178,9 +165,7 @@ def test_globcurrent_2D(mode, chunk_mode): else: dask.config.set({"array.chunk-size": "128MiB"}) data_folder = parcels.download_example_dataset("GlobCurrent_example_data") - filenames = str( - data_folder / "200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc" - ) + filenames = str(data_folder / "200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc") variables = { "U": "eastward_eulerian_current_velocity", "V": "northward_eulerian_current_velocity", @@ -200,14 +185,10 @@ def test_globcurrent_2D(mode, chunk_mode): "V": {"time": ("time", 1), "lat": ("lat", -1), "lon": ("lon", -1)}, } - fieldset = parcels.FieldSet.from_netcdf( - filenames, variables, dimensions, chunksize=chs - ) + fieldset = parcels.FieldSet.from_netcdf(filenames, variables, dimensions, chunksize=chs) try: pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=25, lat=-35) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5)) except DaskChunkingError: # we removed the failsafe, so now if all chunksize dimensions are incorrect, there is nothing left to chunk, # which raises an error saying so. This is the expected behaviour @@ -221,9 +202,7 @@ def test_globcurrent_2D(mode, chunk_mode): elif chunk_mode == "auto": assert len(fieldset.U.grid.load_chunk) != 1 elif chunk_mode == "specific": - assert len(fieldset.U.grid.load_chunk) == ( - 1 * int(math.ceil(41.0 / 8.0)) * int(math.ceil(81.0 / 8.0)) - ) + assert len(fieldset.U.grid.load_chunk) == (1 * int(math.ceil(41.0 / 8.0)) * int(math.ceil(81.0 / 8.0))) elif chunk_mode == "failsafe": # chunking time but not lat assert len(fieldset.U.grid.load_chunk) != 1 assert len(fieldset.V.grid.load_chunk) != 1 @@ -231,9 +210,7 @@ def test_globcurrent_2D(mode, chunk_mode): assert abs(pset[0].lat - -35.3) < 1 -@pytest.mark.skip( - reason="Started failing around #1644 (2024-08-08). Some change in chunking, inconsistent behavior." -) +@pytest.mark.skip(reason="Started failing around #1644 (2024-08-08). Some change in chunking, inconsistent behavior.") @pytest.mark.parametrize("mode", ["jit"]) @pytest.mark.parametrize("chunk_mode", [False, "auto", "specific", "failsafe"]) def test_pop(mode, chunk_mode): @@ -246,9 +223,7 @@ def test_pop(mode, chunk_mode): data_folder = parcels.download_example_dataset("POPSouthernOcean_data") filenames = str(data_folder / "t.x1_SAMOC_flux.1690*.nc") variables = {"U": "UVEL", "V": "VVEL", "W": "WVEL"} - timestamps = np.expand_dims( - np.array([np.datetime64("2000-%.2d-01" % m) for m in range(1, 7)]), axis=1 - ) + timestamps = np.expand_dims(np.array([np.datetime64("2000-%.2d-01" % m) for m in range(1, 7)]), axis=1) dimensions = {"lon": "ULON", "lat": "ULAT", "depth": "w_dep"} chs = False if chunk_mode == "auto": @@ -258,9 +233,7 @@ def test_pop(mode, chunk_mode): elif chunk_mode == "failsafe": # here: bad depth entry chs = {"depth": ("wz", 3), "lat": ("j", 8), "lon": ("i", 8)} - fieldset = parcels.FieldSet.from_pop( - filenames, variables, dimensions, chunksize=chs, timestamps=timestamps - ) + fieldset = parcels.FieldSet.from_pop(filenames, variables, dimensions, chunksize=chs, timestamps=timestamps) npart = 20 lonp = 70.0 * np.ones(npart) @@ -276,18 +249,14 @@ def test_pop(mode, chunk_mode): assert len(fieldset.V.grid.load_chunk) == 1 assert len(fieldset.W.grid.load_chunk) == 1 elif chunk_mode == "auto": - assert ( - fieldset.gridset.size == 3 - ) # because three different grids in 'auto' mode + assert fieldset.gridset.size == 3 # because three different grids in 'auto' mode assert len(fieldset.U.grid.load_chunk) != 1 assert len(fieldset.V.grid.load_chunk) != 1 assert len(fieldset.W.grid.load_chunk) != 1 elif chunk_mode == "specific": assert fieldset.gridset.size == 1 assert len(fieldset.U.grid.load_chunk) == ( - int(math.ceil(21.0 / 3.0)) - * int(math.ceil(60.0 / 8.0)) - * int(math.ceil(60.0 / 8.0)) + int(math.ceil(21.0 / 3.0)) * int(math.ceil(60.0 / 8.0)) * int(math.ceil(60.0 / 8.0)) ) elif chunk_mode == "failsafe": # here: done a typo in the netcdf dimname field assert fieldset.gridset.size == 1 @@ -295,9 +264,7 @@ def test_pop(mode, chunk_mode): assert len(fieldset.V.grid.load_chunk) != 1 assert len(fieldset.W.grid.load_chunk) != 1 assert len(fieldset.U.grid.load_chunk) == ( - int(math.ceil(21.0 / 3.0)) - * int(math.ceil(60.0 / 8.0)) - * int(math.ceil(60.0 / 8.0)) + int(math.ceil(21.0 / 3.0)) * int(math.ceil(60.0 / 8.0)) * int(math.ceil(60.0 / 8.0)) ) @@ -365,9 +332,7 @@ def test_swash(mode, chunk_mode): depthp = [ -0.1, ] * npart - pset = parcels.ParticleSet.from_list( - fieldset, ptype[mode], lon=lonp, lat=latp, depth=depthp - ) + pset = parcels.ParticleSet.from_list(fieldset, ptype[mode], lon=lonp, lat=latp, depth=depthp) pset.execute( parcels.AdvectionRK4, runtime=timedelta(seconds=0.2), @@ -392,22 +357,13 @@ def test_swash(mode, chunk_mode): assert len(fieldset.W.grid.load_chunk) != 1 if chunk_mode == "specific": assert len(fieldset.U.grid.load_chunk) == ( - 1 - * int(math.ceil(6.0 / 6.0)) - * int(math.ceil(21.0 / 4.0)) - * int(math.ceil(51.0 / 4.0)) + 1 * int(math.ceil(6.0 / 6.0)) * int(math.ceil(21.0 / 4.0)) * int(math.ceil(51.0 / 4.0)) ) assert len(fieldset.V.grid.load_chunk) == ( - 1 - * int(math.ceil(6.0 / 6.0)) - * int(math.ceil(21.0 / 4.0)) - * int(math.ceil(51.0 / 4.0)) + 1 * int(math.ceil(6.0 / 6.0)) * int(math.ceil(21.0 / 4.0)) * int(math.ceil(51.0 / 4.0)) ) assert len(fieldset.W.grid.load_chunk) == ( - 1 - * int(math.ceil(7.0 / 7.0)) - * int(math.ceil(21.0 / 4.0)) - * int(math.ceil(51.0 / 4.0)) + 1 * int(math.ceil(7.0 / 7.0)) * int(math.ceil(21.0 / 4.0)) * int(math.ceil(51.0 / 4.0)) ) @@ -449,9 +405,7 @@ def test_ofam_3D(mode, chunk_mode): ) pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=180, lat=10, depth=2.5) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=10), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=10), dt=timedelta(minutes=5)) # OFAM sample file dimensions: time=UNLIMITED, st_ocean=1, st_edges_ocean=52, lat=601, lon=2001 assert len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk) if chunk_mode is False: @@ -465,17 +419,12 @@ def test_ofam_3D(mode, chunk_mode): for bsize in fieldset.U.grid.chunk_info[3 : 3 + numblocks[0]]: vblocks += bsize ublocks = 0 - for bsize in fieldset.U.grid.chunk_info[ - 3 + numblocks[0] : 3 + numblocks[0] + numblocks[1] - ]: + for bsize in fieldset.U.grid.chunk_info[3 + numblocks[0] : 3 + numblocks[0] + numblocks[1]]: ublocks += bsize matching_numblocks = ublocks == 2001 and vblocks == 601 and dblocks == 1 matching_fields = fieldset.U.grid.chunk_info == fieldset.V.grid.chunk_info matching_uniformblocks = len(fieldset.U.grid.load_chunk) == ( - 1 - * int(math.ceil(1.0 / 60.0)) - * int(math.ceil(601.0 / 50.0)) - * int(math.ceil(2001.0 / 100.0)) + 1 * int(math.ceil(1.0 / 60.0)) * int(math.ceil(601.0 / 50.0)) * int(math.ceil(2001.0 / 100.0)) ) assert matching_uniformblocks or (matching_fields and matching_numblocks) assert abs(pset[0].lon - 173) < 1 @@ -483,9 +432,7 @@ def test_ofam_3D(mode, chunk_mode): @pytest.mark.parametrize("mode", ["jit"]) -@pytest.mark.parametrize( - "chunk_mode", [False, "auto", "specific_same", "specific_different"] -) +@pytest.mark.parametrize("chunk_mode", [False, "auto", "specific_same", "specific_different"]) @pytest.mark.parametrize("using_add_field", [False, True]) def test_mitgcm(mode, chunk_mode, using_add_field): if chunk_mode in [ @@ -537,16 +484,10 @@ def test_mitgcm(mode, chunk_mode, using_add_field): ) fieldset.add_field(fieldset2.V) else: - fieldset = parcels.FieldSet.from_mitgcm( - filenames, variables, dimensions, mesh="flat", chunksize=chs - ) + fieldset = parcels.FieldSet.from_mitgcm(filenames, variables, dimensions, mesh="flat", chunksize=chs) - pset = parcels.ParticleSet.from_list( - fieldset=fieldset, pclass=ptype[mode], lon=5e5, lat=5e5 - ) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5) - ) + pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=ptype[mode], lon=5e5, lat=5e5) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5)) # MITgcm sample file dimensions: time=10, XG=400, YG=200 if chunk_mode != "specific_different": assert len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk) @@ -559,9 +500,7 @@ def test_mitgcm(mode, chunk_mode, using_add_field): ]: assert len(fieldset.U.grid.load_chunk) != 1 elif "specific" in chunk_mode: - assert len(fieldset.U.grid.load_chunk) == ( - 1 * int(math.ceil(400.0 / 50.0)) * int(math.ceil(200.0 / 100.0)) - ) + assert len(fieldset.U.grid.load_chunk) == (1 * int(math.ceil(400.0 / 50.0)) * int(math.ceil(200.0 / 100.0))) if chunk_mode == "specific_same": assert fieldset.gridset.size == 1 elif chunk_mode == "specific_different": @@ -589,9 +528,7 @@ def test_diff_entry_dimensions_chunks(mode): "U": {"depth": ("depthu", 75), "lat": ("y", 16), "lon": ("x", 16)}, "V": {"depth": ("depthv", 75), "lat": ("y", 16), "lon": ("x", 16)}, } - fieldset = parcels.FieldSet.from_nemo( - filenames, variables, dimensions, chunksize=chs - ) + fieldset = parcels.FieldSet.from_nemo(filenames, variables, dimensions, chunksize=chs) compute_nemo_particle_advection(fieldset, mode) # Nemo sample file dimensions: depthu=75, y=201, x=151 assert len(fieldset.U.grid.load_chunk) == len(fieldset.V.grid.load_chunk) @@ -621,9 +558,7 @@ def test_3d_2dfield_sampling(mode): "V": {"lon": "glamf", "lat": "gphif", "time": "time_counter"}, "nav_lon": {"lon": "glamf", "lat": "gphif"}, } - fieldset = parcels.FieldSet.from_nemo( - filenames, variables, dimensions, chunksize=False - ) + fieldset = parcels.FieldSet.from_nemo(filenames, variables, dimensions, chunksize=False) fieldset.nav_lon.data = np.ones(fieldset.nav_lon.data.shape, dtype=np.float32) fieldset.add_field( parcels.Field( @@ -642,12 +577,8 @@ def test_3d_2dfield_sampling(mode): pset = parcels.ParticleSet(fieldset, pclass=MyParticle, lon=2.5, lat=52) def Sample2D(particle, fieldset, time): - particle.sample_var_curvilinear += fieldset.nav_lon[ - time, particle.depth, particle.lat, particle.lon - ] - particle.sample_var_rectilinear += fieldset.rectilinear_2D[ - time, particle.depth, particle.lat, particle.lon - ] + particle.sample_var_curvilinear += fieldset.nav_lon[time, particle.depth, particle.lat, particle.lon] + particle.sample_var_rectilinear += fieldset.rectilinear_2D[time, particle.depth, particle.lat, particle.lon] runtime, dt = 86400 * 4, 6 * 3600 pset.execute(pset.Kernel(parcels.AdvectionRK4) + Sample2D, runtime=runtime, dt=dt) @@ -697,9 +628,7 @@ def test_diff_entry_chunksize_error_nemo_complex_conform_depth(mode): "V": {"depth": ("depthv", 75), "lat": ("y", 4), "lon": ("x", 16)}, "W": {"depth": ("depthw", 75), "lat": ("y", 16), "lon": ("x", 4)}, } - fieldset = parcels.FieldSet.from_nemo( - filenames, variables, dimensions, chunksize=chs - ) + fieldset = parcels.FieldSet.from_nemo(filenames, variables, dimensions, chunksize=chs) compute_nemo_particle_advection(fieldset, mode) # Nemo sample file dimensions: depthu=75, y=201, x=151 npart_U = 1 @@ -739,9 +668,7 @@ def test_diff_entry_chunksize_error_nemo_complex_conform_depth(mode): @pytest.mark.parametrize("mode", ["jit"]) def test_diff_entry_chunksize_correction_globcurrent(mode): data_folder = parcels.download_example_dataset("GlobCurrent_example_data") - filenames = str( - data_folder / "200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc" - ) + filenames = str(data_folder / "200201*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc") variables = { "U": "eastward_eulerian_current_velocity", "V": "northward_eulerian_current_velocity", @@ -751,13 +678,9 @@ def test_diff_entry_chunksize_correction_globcurrent(mode): "U": {"lat": ("lat", 16), "lon": ("lon", 16)}, "V": {"lat": ("lat", 16), "lon": ("lon", 4)}, } - fieldset = parcels.FieldSet.from_netcdf( - filenames, variables, dimensions, chunksize=chs - ) + fieldset = parcels.FieldSet.from_netcdf(filenames, variables, dimensions, chunksize=chs) pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=25, lat=-35) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5)) # GlobCurrent sample file dimensions: time=UNLIMITED, lat=41, lon=81 npart_U = 1 npart_U = [npart_U * k for k in fieldset.U.nchunks[1:]] diff --git a/docs/examples/example_decaying_moving_eddy.py b/docs/examples/example_decaying_moving_eddy.py index 46bc10d5..a1f52a9d 100644 --- a/docs/examples/example_decaying_moving_eddy.py +++ b/docs/examples/example_decaying_moving_eddy.py @@ -10,18 +10,14 @@ ptype = {"scipy": parcels.ScipyParticle, "jit": parcels.JITParticle} # Define some constants. u_g = 0.04 # Geostrophic current u_0 = 0.3 # Initial speed in x dirrection. v_0 = 0 -gamma = ( - 1.0 / timedelta(days=2.89).total_seconds() -) # Dissipitave effects due to viscousity. +gamma = 1.0 / timedelta(days=2.89).total_seconds() # Dissipitave effects due to viscousity. gamma_g = 1.0 / timedelta(days=28.9).total_seconds() f = 1.0e-4 # Coriolis parameter. start_lon = [10000.0] # Define the start longitude and latitude for the particle. start_lat = [10000.0] -def decaying_moving_eddy_fieldset( - xdim=2, ydim=2 -): # Define 2D flat, square fieldset for testing purposes. +def decaying_moving_eddy_fieldset(xdim=2, ydim=2): # Define 2D flat, square fieldset for testing purposes. """Simulate an ocean that accelerates subject to Coriolis force and dissipative effects, upon which a geostrophic current is superimposed. @@ -40,9 +36,7 @@ def decaying_moving_eddy_fieldset( V = np.zeros((time.size, lat.size, lon.size), dtype=np.float32) for t in range(time.size): - U[t, :, :] = u_g * np.exp(-gamma_g * time[t]) + (u_0 - u_g) * np.exp( - -gamma * time[t] - ) * np.cos(f * time[t]) + U[t, :, :] = u_g * np.exp(-gamma_g * time[t]) + (u_0 - u_g) * np.exp(-gamma * time[t]) * np.cos(f * time[t]) V[t, :, :] = -(u_0 - u_g) * np.exp(-gamma * time[t]) * np.sin(f * time[t]) data = {"U": U, "V": V} @@ -50,18 +44,13 @@ def decaying_moving_eddy_fieldset( return parcels.FieldSet.from_data(data, dimensions, mesh="flat") -def true_values( - t, x_0, y_0 -): # Calculate the expected values for particles at the endtime, given their start location. +def true_values(t, x_0, y_0): # Calculate the expected values for particles at the endtime, given their start location. x = ( x_0 + (u_g / gamma_g) * (1 - np.exp(-gamma_g * t)) + f * ((u_0 - u_g) / (f**2 + gamma**2)) - * ( - (gamma / f) - + np.exp(-gamma * t) * (np.sin(f * t) - (gamma / f) * np.cos(f * t)) - ) + * ((gamma / f) + np.exp(-gamma * t) * (np.sin(f * t) - (gamma / f) * np.cos(f * t))) ) y = y_0 - ((u_0 - u_g) / (f**2 + gamma**2)) * f * ( 1 - np.exp(-gamma * t) * (np.cos(f * t) + (gamma / f) * np.sin(f * t)) @@ -70,12 +59,8 @@ def true_values( return np.array([x, y]) -def decaying_moving_example( - fieldset, outfile, mode="scipy", method=parcels.AdvectionRK4 -): - pset = parcels.ParticleSet( - fieldset, pclass=ptype[mode], lon=start_lon, lat=start_lat - ) +def decaying_moving_example(fieldset, outfile, mode="scipy", method=parcels.AdvectionRK4): + pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=start_lon, lat=start_lat) dt = timedelta(minutes=5) runtime = timedelta(days=2) @@ -96,9 +81,7 @@ def test_rotation_example(mode, tmpdir): outfile = tmpdir.join("DecayingMovingParticle.zarr") fieldset = decaying_moving_eddy_fieldset() pset = decaying_moving_example(fieldset, outfile, mode=mode) - vals = true_values( - pset[0].time, start_lon, start_lat - ) # Calculate values for the particle. + vals = true_values(pset[0].time, start_lon, start_lat) # Calculate values for the particle. assert np.allclose( np.array([[pset[0].lon], [pset[0].lat]]), vals, 1e-2 ) # Check advected values against calculated values. diff --git a/docs/examples/example_globcurrent.py b/docs/examples/example_globcurrent.py index 663d0dd3..00753140 100755 --- a/docs/examples/example_globcurrent.py +++ b/docs/examples/example_globcurrent.py @@ -20,9 +20,7 @@ def set_globcurrent_fieldset( ): if filename is None: data_folder = parcels.download_example_dataset("GlobCurrent_example_data") - filename = str( - data_folder / "2002*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc" - ) + filename = str(data_folder / "2002*-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc") variables = { "U": "eastward_eulerian_current_velocity", "V": "northward_eulerian_current_velocity", @@ -33,9 +31,7 @@ def set_globcurrent_fieldset( dimensions = {"lat": "lat", "lon": "lon"} if use_xarray: ds = xr.open_mfdataset(filename, combine="by_coords") - return parcels.FieldSet.from_xarray_dataset( - ds, variables, dimensions, time_periodic=time_periodic - ) + return parcels.FieldSet.from_xarray_dataset(ds, variables, dimensions, time_periodic=time_periodic) else: return parcels.FieldSet.from_netcdf( filename, @@ -66,9 +62,7 @@ def test_globcurrent_fieldset(use_xarray): @pytest.mark.parametrize("mode", ["scipy", "jit"]) -@pytest.mark.parametrize( - "dt, lonstart, latstart", [(3600.0, 25, -35), (-3600.0, 20, -39)] -) +@pytest.mark.parametrize("dt, lonstart, latstart", [(3600.0, 25, -35), (-3600.0, 20, -39)]) @pytest.mark.parametrize("use_xarray", [True, False]) def test_globcurrent_fieldset_advancetime(mode, dt, lonstart, latstart, use_xarray): data_folder = parcels.download_example_dataset("GlobCurrent_example_data") @@ -76,16 +70,10 @@ def test_globcurrent_fieldset_advancetime(mode, dt, lonstart, latstart, use_xarr files = sorted(glob(str(basepath))) fieldsetsub = set_globcurrent_fieldset(files[0:10], use_xarray=use_xarray) - psetsub = parcels.ParticleSet.from_list( - fieldset=fieldsetsub, pclass=ptype[mode], lon=[lonstart], lat=[latstart] - ) + psetsub = parcels.ParticleSet.from_list(fieldset=fieldsetsub, pclass=ptype[mode], lon=[lonstart], lat=[latstart]) - fieldsetall = set_globcurrent_fieldset( - files[0:10], deferred_load=False, use_xarray=use_xarray - ) - psetall = parcels.ParticleSet.from_list( - fieldset=fieldsetall, pclass=ptype[mode], lon=[lonstart], lat=[latstart] - ) + fieldsetall = set_globcurrent_fieldset(files[0:10], deferred_load=False, use_xarray=use_xarray) + psetall = parcels.ParticleSet.from_list(fieldset=fieldsetall, pclass=ptype[mode], lon=[lonstart], lat=[latstart]) if dt < 0: psetsub[0].time_nextloop = fieldsetsub.U.grid.time[-1] psetall[0].time_nextloop = fieldsetall.U.grid.time[-1] @@ -106,9 +94,7 @@ def test_globcurrent_particles(mode, use_xarray): pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=lonstart, lat=latstart) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5)) assert abs(pset[0].lon - 23.8) < 1 assert abs(pset[0].lat - -35.3) < 1 @@ -119,15 +105,11 @@ def test_globcurrent_particles(mode, use_xarray): def test_globcurrent_time_periodic(mode, rundays): sample_var = [] for deferred_load in [True, False]: - fieldset = set_globcurrent_fieldset( - time_periodic=timedelta(days=365), deferred_load=deferred_load - ) + fieldset = set_globcurrent_fieldset(time_periodic=timedelta(days=365), deferred_load=deferred_load) MyParticle = ptype[mode].add_variable("sample_var", initial=0.0) - pset = parcels.ParticleSet( - fieldset, pclass=MyParticle, lon=25, lat=-35, time=fieldset.U.grid.time[0] - ) + pset = parcels.ParticleSet(fieldset, pclass=MyParticle, lon=25, lat=-35, time=fieldset.U.grid.time[0]) def SampleU(particle, fieldset, time): u, v = fieldset.UV[time, particle.depth, particle.lat, particle.lon] @@ -145,14 +127,10 @@ def test_globcurrent_xarray_vs_netcdf(dt): fieldsetxarray = set_globcurrent_fieldset(use_xarray=True) lonstart, latstart, runtime = (25, -35, timedelta(days=7)) - psetN = parcels.ParticleSet( - fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetN = parcels.ParticleSet(fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetN.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) - psetX = parcels.ParticleSet( - fieldsetxarray, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetX = parcels.ParticleSet(fieldsetxarray, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetX.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) assert np.allclose(psetN[0].lon, psetX[0].lon) @@ -166,14 +144,10 @@ def test_globcurrent_netcdf_timestamps(dt): fieldsetTimestamps = set_globcurrent_fieldset(timestamps=timestamps) lonstart, latstart, runtime = (25, -35, timedelta(days=7)) - psetN = parcels.ParticleSet( - fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetN = parcels.ParticleSet(fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetN.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) - psetT = parcels.ParticleSet( - fieldsetTimestamps, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetT = parcels.ParticleSet(fieldsetTimestamps, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetT.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) assert np.allclose(psetN.lon[0], psetT.lon[0]) @@ -234,9 +208,7 @@ def test_globcurrent_time_extrapolation_error(mode, use_xarray): time=fieldset.U.time[0] - timedelta(days=1).total_seconds(), ) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=timedelta(minutes=5)) @pytest.mark.parametrize("mode", ["scipy", "jit"]) @@ -258,15 +230,11 @@ def test_globcurrent_startparticles_between_time_arrays(mode, dt, with_starttime MyParticle = ptype[mode].add_variable("sample_var", initial=0.0) def SampleP(particle, fieldset, time): - particle.sample_var += fieldset.P[ - time, particle.depth, particle.lat, particle.lon - ] + particle.sample_var += fieldset.P[time, particle.depth, particle.lat, particle.lon] if with_starttime: time = fieldset.U.grid.time[0] if dt > 0 else fieldset.U.grid.time[-1] - pset = parcels.ParticleSet( - fieldset, pclass=MyParticle, lon=[25], lat=[-35], time=time - ) + pset = parcels.ParticleSet(fieldset, pclass=MyParticle, lon=[25], lat=[-35], time=time) else: pset = parcels.ParticleSet(fieldset, pclass=MyParticle, lon=[25], lat=[-35]) @@ -294,9 +262,7 @@ def test_globcurrent_particle_independence(mode, rundays=5): if particle.id == 0: particle.delete() - pset0 = parcels.ParticleSet( - fieldset, pclass=ptype[mode], lon=[25, 25], lat=[-35, -35], time=time0 - ) + pset0 = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=[25, 25], lat=[-35, -35], time=time0) pset0.execute( pset0.Kernel(DeleteP0) + parcels.AdvectionRK4, @@ -304,13 +270,9 @@ def test_globcurrent_particle_independence(mode, rundays=5): dt=timedelta(minutes=5), ) - pset1 = parcels.ParticleSet( - fieldset, pclass=ptype[mode], lon=[25, 25], lat=[-35, -35], time=time0 - ) + pset1 = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=[25, 25], lat=[-35, -35], time=time0) - pset1.execute( - parcels.AdvectionRK4, runtime=timedelta(days=rundays), dt=timedelta(minutes=5) - ) + pset1.execute(parcels.AdvectionRK4, runtime=timedelta(days=rundays), dt=timedelta(minutes=5)) assert np.allclose([pset0[-1].lon, pset0[-1].lat], [pset1[-1].lon, pset1[-1].lat]) @@ -325,9 +287,7 @@ def test_globcurrent_pset_fromfile(mode, dt, pid_offset, tmpdir): ptype[mode].setLastID(pid_offset) pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=25, lat=-35) pfile = pset.ParticleFile(filename, outputdt=timedelta(hours=6)) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=1), dt=dt, output_file=pfile - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=dt, output_file=pfile) pfile.write_latest_locations(pset, max(pset.time_nextloop)) restarttime = np.nanmax if dt > 0 else np.nanmin @@ -338,9 +298,7 @@ def test_globcurrent_pset_fromfile(mode, dt, pid_offset, tmpdir): pset_new.execute(parcels.AdvectionRK4, runtime=timedelta(days=1), dt=dt) for var in ["lon", "lat", "depth", "time", "id"]: - assert np.allclose( - [getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new] - ) + assert np.allclose([getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new]) @pytest.mark.parametrize("mode", ["scipy", "jit"]) diff --git a/docs/examples/example_mitgcm.py b/docs/examples/example_mitgcm.py index f9cea663..b90afe07 100644 --- a/docs/examples/example_mitgcm.py +++ b/docs/examples/example_mitgcm.py @@ -20,9 +20,7 @@ def run_mitgcm_zonally_reentrant(mode): "U": {"lon": "XG", "lat": "YG", "time": "time"}, "V": {"lon": "XG", "lat": "YG", "time": "time"}, } - fieldset = parcels.FieldSet.from_mitgcm( - filenames, variables, dimensions, mesh="flat" - ) + fieldset = parcels.FieldSet.from_mitgcm(filenames, variables, dimensions, mesh="flat") fieldset.add_periodic_halo(zonal=True) fieldset.add_constant("domain_width", 1000000) @@ -48,9 +46,7 @@ def run_mitgcm_zonally_reentrant(mode): chunks=(len(pset), 1), ) kernels = parcels.AdvectionRK4 + pset.Kernel(periodicBC) - pset.execute( - kernels, runtime=timedelta(days=5), dt=timedelta(minutes=30), output_file=pfile - ) + pset.execute(kernels, runtime=timedelta(days=5), dt=timedelta(minutes=30), output_file=pfile) def test_mitgcm_output_compare(): diff --git a/docs/examples/example_moving_eddies.py b/docs/examples/example_moving_eddies.py index 1d238aad..bc6c0d7c 100644 --- a/docs/examples/example_moving_eddies.py +++ b/docs/examples/example_moving_eddies.py @@ -58,11 +58,7 @@ def moving_eddies_fieldset(xdim=200, ydim=350, mesh="flat"): def cosd(x): return math.cos(math.radians(float(x))) - dx = ( - (lon[1] - lon[0]) * 1852 * 60 * cosd(lat.mean()) - if mesh == "spherical" - else lon[1] - lon[0] - ) + dx = (lon[1] - lon[0]) * 1852 * 60 * cosd(lat.mean()) if mesh == "spherical" else lon[1] - lon[0] dy = (lat[1] - lat[0]) * 1852 * 60 if mesh == "spherical" else lat[1] - lat[0] # Define arrays U (zonal), V (meridional), and P (sea surface height) on A-grid @@ -87,12 +83,10 @@ def moving_eddies_fieldset(xdim=200, ydim=350, mesh="flat"): hxmax_2 = 0.75 * lon.size - dX * t P[:, :, t] = h0 * np.exp( - -((x - hxmax_1) ** 2) / (sig * lon.size / 4.0) ** 2 - - (y - hymax_1) ** 2 / (sig * lat.size / 7.0) ** 2 + -((x - hxmax_1) ** 2) / (sig * lon.size / 4.0) ** 2 - (y - hymax_1) ** 2 / (sig * lat.size / 7.0) ** 2 ) P[:, :, t] += h0 * np.exp( - -((x - hxmax_2) ** 2) / (sig * lon.size / 4.0) ** 2 - - (y - hymax_2) ** 2 / (sig * lat.size / 7.0) ** 2 + -((x - hxmax_2) ** 2) / (sig * lon.size / 4.0) ** 2 - (y - hymax_2) ** 2 / (sig * lat.size / 7.0) ** 2 ) V[:-1, :, t] = -np.diff(P[:, :, t], axis=0) / dx / corio_0 * g @@ -113,9 +107,7 @@ def moving_eddies_fieldset(xdim=200, ydim=350, mesh="flat"): return fieldset -def moving_eddies_example( - fieldset, outfile, npart=2, mode="jit", verbose=False, method=parcels.AdvectionRK4 -): +def moving_eddies_example(fieldset, outfile, npart=2, mode="jit", verbose=False, method=parcels.AdvectionRK4): """Configuration of a particle set that follows two moving eddies. @@ -137,9 +129,7 @@ def moving_eddies_example( # Determine particle class according to mode start = (3.3, 46.0) if fieldset.U.grid.mesh == "spherical" else (3.3e5, 1e5) finish = (3.3, 47.8) if fieldset.U.grid.mesh == "spherical" else (3.3e5, 2.8e5) - pset = parcels.ParticleSet.from_line( - fieldset=fieldset, size=npart, pclass=ptype[mode], start=start, finish=finish - ) + pset = parcels.ParticleSet.from_line(fieldset=fieldset, size=npart, pclass=ptype[mode], start=start, finish=finish) if verbose: print(f"Initial particle positions:\n{pset}") @@ -169,9 +159,7 @@ def test_moving_eddies_fwdbwd(mode, mesh, tmpdir, npart=2): # Determine particle class according to mode lons = [3.3, 3.3] if fieldset.U.grid.mesh == "spherical" else [3.3e5, 3.3e5] lats = [46.0, 47.8] if fieldset.U.grid.mesh == "spherical" else [1e5, 2.8e5] - pset = parcels.ParticleSet( - fieldset=fieldset, pclass=ptype[mode], lon=lons, lat=lats - ) + pset = parcels.ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=lons, lat=lats) # Execte for 14 days, with 30sec timesteps and hourly output runtime = timedelta(days=1) @@ -197,9 +185,7 @@ def test_moving_eddies_fwdbwd(mode, mesh, tmpdir, npart=2): # Also include last timestep for var in ["lon", "lat", "depth", "time"]: - pset.particledata.setallvardata( - f"{var}", pset.particledata.getvardata(f"{var}_nextloop") - ) + pset.particledata.setallvardata(f"{var}", pset.particledata.getvardata(f"{var}_nextloop")) assert np.allclose(pset.lon, lons) assert np.allclose(pset.lat, lats) @@ -213,9 +199,7 @@ def test_moving_eddies_fieldset(mode, mesh, tmpdir): pset = moving_eddies_example(fieldset, outfile, 2, mode=mode) # Also include last timestep for var in ["lon", "lat", "depth", "time"]: - pset.particledata.setallvardata( - f"{var}", pset.particledata.getvardata(f"{var}_nextloop") - ) + pset.particledata.setallvardata(f"{var}", pset.particledata.getvardata(f"{var}_nextloop")) if mesh == "flat": assert pset[0].lon < 2.2e5 and 1.1e5 < pset[0].lat < 1.2e5 assert pset[1].lon < 2.2e5 and 3.7e5 < pset[1].lat < 3.8e5 @@ -236,16 +220,12 @@ def fieldsetfile(mesh, tmpdir): @pytest.mark.parametrize("mesh", ["flat", "spherical"]) def test_moving_eddies_file(mode, mesh, tmpdir): gc.collect() - fieldset = parcels.FieldSet.from_parcels( - fieldsetfile(mesh, tmpdir), extra_fields={"P": "P"} - ) + fieldset = parcels.FieldSet.from_parcels(fieldsetfile(mesh, tmpdir), extra_fields={"P": "P"}) outfile = tmpdir.join("EddyParticle") pset = moving_eddies_example(fieldset, outfile, 2, mode=mode) # Also include last timestep for var in ["lon", "lat", "depth", "time"]: - pset.particledata.setallvardata( - f"{var}", pset.particledata.getvardata(f"{var}_nextloop") - ) + pset.particledata.setallvardata(f"{var}", pset.particledata.getvardata(f"{var}_nextloop")) if mesh == "flat": assert pset[0].lon < 2.2e5 and 1.1e5 < pset[0].lat < 1.2e5 assert pset[1].lon < 2.2e5 and 3.7e5 < pset[1].lat < 3.8e5 @@ -265,9 +245,7 @@ def test_periodic_and_computeTimeChunk_eddies(mode): fieldset.add_constant("halo_south", fieldset.U.grid.lat[0]) fieldset.add_constant("halo_north", fieldset.U.grid.lat[-1]) fieldset.add_periodic_halo(zonal=True, meridional=True) - pset = parcels.ParticleSet.from_list( - fieldset=fieldset, pclass=ptype[mode], lon=[3.3, 3.3], lat=[46.0, 47.8] - ) + pset = parcels.ParticleSet.from_list(fieldset=fieldset, pclass=ptype[mode], lon=[3.3, 3.3], lat=[46.0, 47.8]) def periodicBC(particle, fieldset, time): if particle.lon < fieldset.halo_west: @@ -299,9 +277,7 @@ Example of particle advection around an idealised peninsula""" default="jit", help="Execution mode for performing RK4 computation", ) - p.add_argument( - "-p", "--particles", type=int, default=2, help="Number of particles to advect" - ) + p.add_argument("-p", "--particles", type=int, default=2, help="Number of particles to advect") p.add_argument( "-v", "--verbose", @@ -336,9 +312,7 @@ Example of particle advection around an idealised peninsula""" # Generate fieldset files according to given dimensions if args.fieldset is not None: - fieldset = moving_eddies_fieldset( - args.fieldset[0], args.fieldset[1], mesh="flat" - ) + fieldset = moving_eddies_fieldset(args.fieldset[0], args.fieldset[1], mesh="flat") else: fieldset = moving_eddies_fieldset(mesh="flat") outfile = "EddyParticle" diff --git a/docs/examples/example_nemo_curvilinear.py b/docs/examples/example_nemo_curvilinear.py index 181733a3..c76d6574 100644 --- a/docs/examples/example_nemo_curvilinear.py +++ b/docs/examples/example_nemo_curvilinear.py @@ -32,9 +32,7 @@ def run_nemo_curvilinear(mode, outfile, advtype="RK4"): variables = {"U": "U", "V": "V"} dimensions = {"lon": "glamf", "lat": "gphif"} chunksize = {"lat": ("y", 256), "lon": ("x", 512)} - fieldset = parcels.FieldSet.from_nemo( - filenames, variables, dimensions, chunksize=chunksize - ) + fieldset = parcels.FieldSet.from_nemo(filenames, variables, dimensions, chunksize=chunksize) assert fieldset.U.chunksize == chunksize # Now run particles as normal diff --git a/docs/examples/example_ofam.py b/docs/examples/example_ofam.py index 791d890f..65728970 100644 --- a/docs/examples/example_ofam.py +++ b/docs/examples/example_ofam.py @@ -25,9 +25,7 @@ def set_ofam_fieldset(deferred_load=True, use_xarray=False): } if use_xarray: ds = xr.open_mfdataset([filenames["U"], filenames["V"]], combine="by_coords") - return parcels.FieldSet.from_xarray_dataset( - ds, variables, dimensions, allow_time_extrapolation=True - ) + return parcels.FieldSet.from_xarray_dataset(ds, variables, dimensions, allow_time_extrapolation=True) else: return parcels.FieldSet.from_netcdf( filenames, @@ -52,14 +50,10 @@ def test_ofam_xarray_vs_netcdf(dt): fieldsetxarray = set_ofam_fieldset(use_xarray=True) lonstart, latstart, runtime = (180, 10, timedelta(days=7)) - psetN = parcels.ParticleSet( - fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetN = parcels.ParticleSet(fieldsetNetcdf, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetN.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) - psetX = parcels.ParticleSet( - fieldsetxarray, pclass=parcels.JITParticle, lon=lonstart, lat=latstart - ) + psetX = parcels.ParticleSet(fieldsetxarray, pclass=parcels.JITParticle, lon=lonstart, lat=latstart) psetX.execute(parcels.AdvectionRK4, runtime=runtime, dt=dt) assert np.allclose(psetN[0].lon, psetX[0].lon) @@ -76,13 +70,9 @@ def test_ofam_particles(mode, use_xarray): latstart = [10] depstart = [2.5] # the depth of the first layer in OFAM - pset = parcels.ParticleSet( - fieldset, pclass=ptype[mode], lon=lonstart, lat=latstart, depth=depstart - ) + pset = parcels.ParticleSet(fieldset, pclass=ptype[mode], lon=lonstart, lat=latstart, depth=depstart) - pset.execute( - parcels.AdvectionRK4, runtime=timedelta(days=10), dt=timedelta(minutes=5) - ) + pset.execute(parcels.AdvectionRK4, runtime=timedelta(days=10), dt=timedelta(minutes=5)) assert abs(pset[0].lon - 173) < 1 assert abs(pset[0].lat - 11) < 1 diff --git a/docs/examples/example_peninsula.py b/docs/examples/example_peninsula.py index 3299d638..d0922231 100644 --- a/docs/examples/example_peninsula.py +++ b/docs/examples/example_peninsula.py @@ -165,9 +165,7 @@ def peninsula_example( dt = timedelta(minutes=5) k_adv = pset.Kernel(method) k_p = pset.Kernel(UpdateP) - out = ( - pset.ParticleFile(name=outfile, outputdt=timedelta(hours=1)) if output else None - ) + out = pset.ParticleFile(name=outfile, outputdt=timedelta(hours=1)) if output else None print("Peninsula: Advecting %d particles for %s" % (npart, str(time))) pset.execute(k_adv + k_p, runtime=time, dt=dt, output_file=out) @@ -189,28 +187,18 @@ def test_peninsula_fieldset(mode, mesh, tmpdir): assert (err_adv <= 1.0e-3).all() # Test Field sampling accuracy by comparing kernel against Field sampling err_smpl = np.array( - [ - abs( - pset.p[i] - - pset.fieldset.P[0.0, pset.depth[i], pset.lat[i], pset.lon[i]] - ) - for i in range(pset.size) - ] + [abs(pset.p[i] - pset.fieldset.P[0.0, pset.depth[i], pset.lat[i], pset.lon[i]]) for i in range(pset.size)] ) assert (err_smpl <= 1.0e-3).all() -@pytest.mark.parametrize( - "mode", ["scipy"] -) # Analytical Advection only implemented in Scipy mode +@pytest.mark.parametrize("mode", ["scipy"]) # Analytical Advection only implemented in Scipy mode @pytest.mark.parametrize("mesh", ["flat", "spherical"]) def test_peninsula_fieldset_AnalyticalAdvection(mode, mesh, tmpdir): """Execute peninsula test using Analytical Advection on C grid.""" fieldset = peninsula_fieldset(101, 51, "flat", grid_type="C") outfile = tmpdir.join("PeninsulaAA") - pset = peninsula_example( - fieldset, outfile, npart=10, mode=mode, method=parcels.AdvectionAnalytical - ) + pset = peninsula_example(fieldset, outfile, npart=10, mode=mode, method=parcels.AdvectionAnalytical) # Test advection accuracy by comparing streamline values err_adv = np.array([abs(p.p_start - p.p) for p in pset]) @@ -243,13 +231,7 @@ def test_peninsula_file(mode, mesh, tmpdir): assert (err_adv <= 1.0e-3).all() # Test Field sampling accuracy by comparing kernel against Field sampling err_smpl = np.array( - [ - abs( - pset.p[i] - - pset.fieldset.P[0.0, pset.depth[i], pset.lat[i], pset.lon[i]] - ) - for i in range(pset.size) - ] + [abs(pset.p[i] - pset.fieldset.P[0.0, pset.depth[i], pset.lat[i], pset.lon[i]]) for i in range(pset.size)] ) assert (err_smpl <= 1.0e-3).all() @@ -266,12 +248,8 @@ Example of particle advection around an idealised peninsula""" default="jit", help="Execution mode for performing RK4 computation", ) - p.add_argument( - "-p", "--particles", type=int, default=20, help="Number of particles to advect" - ) - p.add_argument( - "-d", "--degree", type=int, default=1, help="Degree of spatial interpolation" - ) + p.add_argument("-p", "--particles", type=int, default=20, help="Number of particles to advect") + p.add_argument("-d", "--degree", type=int, default=1, help="Degree of spatial interpolation") p.add_argument( "-v", "--verbose", @@ -317,9 +295,7 @@ Example of particle advection around an idealised peninsula""" fieldset.write(filename) # Open fieldset file set - fieldset = parcels.FieldSet.from_parcels( - "peninsula", extra_fields={"P": "P"}, allow_time_extrapolation=True - ) + fieldset = parcels.FieldSet.from_parcels("peninsula", extra_fields={"P": "P"}, allow_time_extrapolation=True) outfile = "Peninsula" if args.profiling: diff --git a/docs/examples/example_radial_rotation.py b/docs/examples/example_radial_rotation.py index 509e1976..fa95d4d7 100644 --- a/docs/examples/example_radial_rotation.py +++ b/docs/examples/example_radial_rotation.py @@ -9,9 +9,7 @@ import parcels ptype = {"scipy": parcels.ScipyParticle, "jit": parcels.JITParticle} -def radial_rotation_fieldset( - xdim=200, ydim=200 -): # Define 2D flat, square fieldset for testing purposes. +def radial_rotation_fieldset(xdim=200, ydim=200): # Define 2D flat, square fieldset for testing purposes. lon = np.linspace(0, 60, xdim, dtype=np.float32) lat = np.linspace(0, 60, ydim, dtype=np.float32) @@ -26,9 +24,7 @@ def radial_rotation_fieldset( for i in range(lon.size): for j in range(lat.size): - r = np.sqrt( - (lon[i] - x0) ** 2 + (lat[j] - y0) ** 2 - ) # Define radial displacement. + r = np.sqrt((lon[i] - x0) ** 2 + (lat[j] - y0) ** 2) # Define radial displacement. assert r >= 0.0 assert r <= np.sqrt(x0**2 + y0**2) @@ -79,13 +75,9 @@ def test_rotation_example(mode, tmpdir): fieldset = radial_rotation_fieldset() outfile = tmpdir.join("RadialParticle") pset = rotation_example(fieldset, outfile, mode=mode) - assert ( - pset[0].lon == 30.0 and pset[0].lat == 30.0 - ) # Particle at centre of Field remains stationary. + assert pset[0].lon == 30.0 and pset[0].lat == 30.0 # Particle at centre of Field remains stationary. vals = true_values(pset.time[1]) - assert np.allclose( - pset[1].lon, vals[0], 1e-5 - ) # Check advected values against calculated values. + assert np.allclose(pset[1].lon, vals[0], 1e-5) # Check advected values against calculated values. assert np.allclose(pset[1].lat, vals[1], 1e-5) diff --git a/docs/examples/example_stommel.py b/docs/examples/example_stommel.py index 58150eec..6f63514d 100755 --- a/docs/examples/example_stommel.py +++ b/docs/examples/example_stommel.py @@ -45,25 +45,10 @@ def stommel_fieldset(xdim=200, ydim=200, grid_type="A"): for i in range(lon.size): xi = lon[i] / a yi = lat[j] / b - P[j, i] = ( - (1 - math.exp(-xi / es) - xi) - * math.pi - * np.sin(math.pi * yi) - * scalefac - ) + P[j, i] = (1 - math.exp(-xi / es) - xi) * math.pi * np.sin(math.pi * yi) * scalefac if grid_type == "A": - U[j, i] = ( - -(1 - math.exp(-xi / es) - xi) - * math.pi**2 - * np.cos(math.pi * yi) - * scalefac - ) - V[j, i] = ( - (math.exp(-xi / es) / es - 1) - * math.pi - * np.sin(math.pi * yi) - * scalefac - ) + U[j, i] = -(1 - math.exp(-xi / es) - xi) * math.pi**2 * np.cos(math.pi * yi) * scalefac + V[j, i] = (math.exp(-xi / es) / es - 1) * math.pi * np.sin(math.pi * yi) * scalefac if grid_type == "C": V[:, 1:] = (P[:, 1:] - P[:, 0:-1]) / dx * a U[1:, :] = -(P[1:, :] - P[0:-1, :]) / dy * b @@ -106,9 +91,7 @@ def stommel_example( write_fields=True, custom_partition_function=False, ): - parcels.timer.fieldset = parcels.timer.Timer( - "FieldSet", parent=parcels.timer.stommel - ) + parcels.timer.fieldset = parcels.timer.Timer("FieldSet", parent=parcels.timer.stommel) fieldset = stommel_fieldset(grid_type=grid_type) if write_fields: filename = "stommel" @@ -206,12 +189,7 @@ def test_stommel_fieldset(mode, grid_type, tmpdir): assert (err_adv <= 1.0e-1).all() err_smpl = np.array( [ - abs( - psetRK4.p[i] - - psetRK4.fieldset.P[ - 0.0, psetRK4.lon[i], psetRK4.lat[i], psetRK4.depth[i] - ] - ) + abs(psetRK4.p[i] - psetRK4.fieldset.P[0.0, psetRK4.lon[i], psetRK4.lat[i], psetRK4.depth[i]]) for i in range(psetRK4.size) ] ) @@ -235,9 +213,7 @@ Example of particle advection in the steady-state solution of the Stommel equati default="jit", help="Execution mode for performing computation", ) - p.add_argument( - "-p", "--particles", type=int, default=1, help="Number of particles to advect" - ) + p.add_argument("-p", "--particles", type=int, default=1, help="Number of particles to advect") p.add_argument( "-v", "--verbose", @@ -252,12 +228,8 @@ Example of particle advection in the steady-state solution of the Stommel equati default="RK4", help="Numerical method used for advection", ) - p.add_argument( - "-o", "--outfile", default="StommelParticle.zarr", help="Name of output file" - ) - p.add_argument( - "-r", "--repeatdt", default=None, type=int, help="repeatdt of the ParticleSet" - ) + p.add_argument("-o", "--outfile", default="StommelParticle.zarr", help="Name of output file") + p.add_argument("-r", "--repeatdt", default=None, type=int, help="repeatdt of the ParticleSet") p.add_argument( "-a", "--maxage", diff --git a/parcels/application_kernels/EOSseawaterproperties.py b/parcels/application_kernels/EOSseawaterproperties.py index 50f6cf15..167c7111 100644 --- a/parcels/application_kernels/EOSseawaterproperties.py +++ b/parcels/application_kernels/EOSseawaterproperties.py @@ -1,7 +1,8 @@ """Collection of pre-built eos sea water property kernels.""" + import math -__all__ = ['PressureFromLatDepth', 'AdiabticTemperatureGradient', 'PtempFromTemp', 'TempFromPtemp', 'UNESCODensity'] +__all__ = ["PressureFromLatDepth", "AdiabticTemperatureGradient", "PtempFromTemp", "TempFromPtemp", "UNESCODensity"] def PressureFromLatDepth(particle, fieldset, time): @@ -22,7 +23,7 @@ def PressureFromLatDepth(particle, fieldset, time): # Angle conversions. deg2rad = math.pi / 180.0 - X = math.sin(max(particle.lat * deg2rad, -1*particle.lat * deg2rad)) + X = math.sin(max(particle.lat * deg2rad, -1 * particle.lat * deg2rad)) C1 = 5.92e-3 + math.pow(X, 2) * 5.25e-3 particle.pressure = ((1 - C1) - math.pow(((math.pow((1 - C1), 2)) - (8.84e-6 * particle.depth)), 0.5)) / 4.42e-6 @@ -71,11 +72,13 @@ def AdiabticTemperatureGradient(particle, fieldset, time): c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14] d = [-1.1351e-10, 2.7759e-12] e = [-4.6206e-13, 1.8676e-14, -2.1687e-16] - particle.adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 - + (b[0] + b[1] * T68) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) - + (d[0] + d[1] * T68) * (s - 35)) * pres - + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres) + particle.adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * T68) * T68) * T68 + + (b[0] + b[1] * T68) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) + (d[0] + d[1] * T68) * (s - 35)) * pres + + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres + ) def PtempFromTemp(particle, fieldset, time): @@ -130,11 +133,13 @@ def PtempFromTemp(particle, fieldset, time): c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14] d = [-1.1351e-10, 2.7759e-12] e = [-4.6206e-13, 1.8676e-14, -2.1687e-16] - adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 - + (b[0] + b[1] * T68) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) - + (d[0] + d[1] * T68) * (s - 35)) * pres - + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * T68) * T68) * T68 + + (b[0] + b[1] * T68) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) + (d[0] + d[1] * T68) * (s - 35)) * pres + + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres + ) # Theta1. del_P = pr - pres @@ -143,35 +148,41 @@ def PtempFromTemp(particle, fieldset, time): q = del_th pprime = pres + 0.5 * del_P - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) # Theta2. del_th = del_P * adtg - th = th + (1 - 1 / 2 ** 0.5) * (del_th - q) - q = (2 - 2 ** 0.5) * del_th + (-2 + 3 / 2 ** 0.5) * q + th = th + (1 - 1 / 2**0.5) * (del_th - q) + q = (2 - 2**0.5) * del_th + (-2 + 3 / 2**0.5) * q # Theta3. - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) del_th = del_P * adtg - th = th + (1 + 1 / 2 ** 0.5) * (del_th - q) - q = (2 + 2 ** 0.5) * del_th + (-2 - 3 / 2 ** 0.5) * q + th = th + (1 + 1 / 2**0.5) * (del_th - q) + q = (2 + 2**0.5) * del_th + (-2 - 3 / 2**0.5) * q # Theta4. pprime = pres + del_P - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) del_th = del_P * adtg particle.potemp = (th + (del_th - 2 * q) / 6) / 1.00024 @@ -228,11 +239,13 @@ def TempFromPtemp(particle, fieldset, time): c = [1.8741e-8, -6.7795e-10, 8.733e-12, -5.4481e-14] d = [-1.1351e-10, 2.7759e-12] e = [-4.6206e-13, 1.8676e-14, -2.1687e-16] - adtg = (a[0] + (a[1] + (a[2] + a[3] * T68) * T68) * T68 - + (b[0] + b[1] * T68) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) - + (d[0] + d[1] * T68) * (s - 35)) * pres - + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * T68) * T68) * T68 + + (b[0] + b[1] * T68) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * T68) * T68) * T68) + (d[0] + d[1] * T68) * (s - 35)) * pres + + (e[0] + (e[1] + e[2] * T68) * T68) * pres * pres + ) # Theta1. del_P = pr - pres @@ -241,35 +254,41 @@ def TempFromPtemp(particle, fieldset, time): q = del_th pprime = pres + 0.5 * del_P - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) # Theta2. del_th = del_P * adtg - th = th + (1 - 1 / 2 ** 0.5) * (del_th - q) - q = (2 - 2 ** 0.5) * del_th + (-2 + 3 / 2 ** 0.5) * q + th = th + (1 - 1 / 2**0.5) * (del_th - q) + q = (2 - 2**0.5) * del_th + (-2 + 3 / 2**0.5) * q # Theta3. - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) del_th = del_P * adtg - th = th + (1 + 1 / 2 ** 0.5) * (del_th - q) - q = (2 + 2 ** 0.5) * del_th + (-2 - 3 / 2 ** 0.5) * q + th = th + (1 + 1 / 2**0.5) * (del_th - q) + q = (2 + 2**0.5) * del_th + (-2 - 3 / 2**0.5) * q # Theta4. pprime = pres + del_P - adtg = (a[0] + (a[1] + (a[2] + a[3] * th) * th) * th - + (b[0] + b[1] * th) * (s - 35) - + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) - + (d[0] + d[1] * th) * (s - 35)) * pprime - + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime) + adtg = ( + a[0] + + (a[1] + (a[2] + a[3] * th) * th) * th + + (b[0] + b[1] * th) * (s - 35) + + ((c[0] + (c[1] + (c[2] + c[3] * th) * th) * th) + (d[0] + d[1] * th) * (s - 35)) * pprime + + (e[0] + (e[1] + e[2] * th) * th) * pprime * pprime + ) del_th = del_P * adtg @@ -294,8 +313,7 @@ def UNESCODensity(particle, fieldset, time): T = fieldset.cons_temperature[time, particle.depth, particle.lat, particle.lon] # temperature P = fieldset.cons_pressure[time, particle.depth, particle.lat, particle.lon] # pressure - rsmow = a0 + a1*T + a2*math.pow(T, 2) + a3*math.pow(T, 3) + \ - a4*math.pow(T, 4) + a5*math.pow(T, 5) + rsmow = a0 + a1 * T + a2 * math.pow(T, 2) + a3 * math.pow(T, 3) + a4 * math.pow(T, 4) + a5 * math.pow(T, 5) b0 = 0.82449 b1 = -0.0040899 @@ -309,10 +327,10 @@ def UNESCODensity(particle, fieldset, time): d0 = 0.00048314 - B1 = b0 + b1*T + b2*math.pow(T, 2) + b3*math.pow(T, 3) + b_four*math.pow(T, 4) - C1 = c0 + c1*T + c2*math.pow(T, 2) + B1 = b0 + b1 * T + b2 * math.pow(T, 2) + b3 * math.pow(T, 3) + b_four * math.pow(T, 4) + C1 = c0 + c1 * T + c2 * math.pow(T, 2) - rho_st0 = rsmow + B1*S + C1*math.pow(S, 1.5) + d0*math.pow(S, 2) + rho_st0 = rsmow + B1 * S + C1 * math.pow(S, 1.5) + d0 * math.pow(S, 2) e0 = 19652.21 e1 = 148.4206 @@ -329,11 +347,11 @@ def UNESCODensity(particle, fieldset, time): g1 = 0.016483 g2 = -0.00053009 - Kw = e0 + e1*T + e2*math.pow(T, 2) + e3*math.pow(T, 3) + e4*math.pow(T, 4) - F1 = f0 + f1*T + f2*math.pow(T, 2) + f3*math.pow(T, 3) - G1 = g0 + g1*T + g2*math.pow(T, 2) + Kw = e0 + e1 * T + e2 * math.pow(T, 2) + e3 * math.pow(T, 3) + e4 * math.pow(T, 4) + F1 = f0 + f1 * T + f2 * math.pow(T, 2) + f3 * math.pow(T, 3) + G1 = g0 + g1 * T + g2 * math.pow(T, 2) - K_ST0 = Kw + F1*S + G1*math.pow(S, 1.5) + K_ST0 = Kw + F1 * S + G1 * math.pow(S, 1.5) h0 = 3.2399 h1 = 0.00143713 @@ -354,11 +372,11 @@ def UNESCODensity(particle, fieldset, time): m1 = 0.000000020816 m2 = 0.00000000091697 - Aw = h0 + h1*T + h2*math.pow(T, 2) + h3*math.pow(T, 3) - A1 = Aw + (i0 + i1*T + i2*math.pow(T, 2))*S + j0*math.pow(S, 1.5) - Bw = k0 + k1*T + k2*math.pow(T, 2) - B2 = Bw + (m0 + m1*T + m2*math.pow(T, 2))*S + Aw = h0 + h1 * T + h2 * math.pow(T, 2) + h3 * math.pow(T, 3) + A1 = Aw + (i0 + i1 * T + i2 * math.pow(T, 2)) * S + j0 * math.pow(S, 1.5) + Bw = k0 + k1 * T + k2 * math.pow(T, 2) + B2 = Bw + (m0 + m1 * T + m2 * math.pow(T, 2)) * S - K_STp = K_ST0 + A1*P + B2*math.pow(T, 2) + K_STp = K_ST0 + A1 * P + B2 * math.pow(T, 2) - particle.density = rho_st0/(1-(P/K_STp)) + particle.density = rho_st0 / (1 - (P / K_STp)) diff --git a/parcels/application_kernels/TEOSseawaterdensity.py b/parcels/application_kernels/TEOSseawaterdensity.py index 18bff588..e25a7213 100644 --- a/parcels/application_kernels/TEOSseawaterdensity.py +++ b/parcels/application_kernels/TEOSseawaterdensity.py @@ -1,7 +1,8 @@ """Collection of pre-built sea water density kernels.""" + import math -__all__ = ['PolyTEOS10_bsq'] +__all__ = ["PolyTEOS10_bsq"] def PolyTEOS10_bsq(particle, fieldset, time): @@ -22,7 +23,7 @@ def PolyTEOS10_bsq(particle, fieldset, time): Oceanic Technology, 20, 730-741. """ - Z = - math.fabs(particle.depth) # Z needs to be negative + Z = -math.fabs(particle.depth) # Z needs to be negative SA = fieldset.abs_salinity[time, particle.depth, particle.lat, particle.lon] CT = fieldset.cons_temperature[time, particle.depth, particle.lat, particle.lon] @@ -30,55 +31,55 @@ def PolyTEOS10_bsq(particle, fieldset, time): CTu = 40 Zu = 1e4 deltaS = 32 - R000 = 8.0189615746e+02 - R100 = 8.6672408165e+02 - R200 = -1.7864682637e+03 - R300 = 2.0375295546e+03 - R400 = -1.2849161071e+03 - R500 = 4.3227585684e+02 - R600 = -6.0579916612e+01 - R010 = 2.6010145068e+01 - R110 = -6.5281885265e+01 - R210 = 8.1770425108e+01 - R310 = -5.6888046321e+01 - R410 = 1.7681814114e+01 - R510 = -1.9193502195e+00 - R020 = -3.7074170417e+01 - R120 = 6.1548258127e+01 - R220 = -6.0362551501e+01 - R320 = 2.9130021253e+01 - R420 = -5.4723692739e+00 - R030 = 2.1661789529e+01 - R130 = -3.3449108469e+01 - R230 = 1.9717078466e+01 - R330 = -3.1742946532e+00 - R040 = -8.3627885467e+00 - R140 = 1.1311538584e+01 - R240 = -5.3563304045e+00 + R000 = 8.0189615746e02 + R100 = 8.6672408165e02 + R200 = -1.7864682637e03 + R300 = 2.0375295546e03 + R400 = -1.2849161071e03 + R500 = 4.3227585684e02 + R600 = -6.0579916612e01 + R010 = 2.6010145068e01 + R110 = -6.5281885265e01 + R210 = 8.1770425108e01 + R310 = -5.6888046321e01 + R410 = 1.7681814114e01 + R510 = -1.9193502195e00 + R020 = -3.7074170417e01 + R120 = 6.1548258127e01 + R220 = -6.0362551501e01 + R320 = 2.9130021253e01 + R420 = -5.4723692739e00 + R030 = 2.1661789529e01 + R130 = -3.3449108469e01 + R230 = 1.9717078466e01 + R330 = -3.1742946532e00 + R040 = -8.3627885467e00 + R140 = 1.1311538584e01 + R240 = -5.3563304045e00 R050 = 5.4048723791e-01 R150 = 4.8169980163e-01 R060 = -1.9083568888e-01 - R001 = 1.9681925209e+01 - R101 = -4.2549998214e+01 - R201 = 5.0774768218e+01 - R301 = -3.0938076334e+01 - R401 = 6.6051753097e+00 - R011 = -1.3336301113e+01 - R111 = -4.4870114575e+00 - R211 = 5.0042598061e+00 + R001 = 1.9681925209e01 + R101 = -4.2549998214e01 + R201 = 5.0774768218e01 + R301 = -3.0938076334e01 + R401 = 6.6051753097e00 + R011 = -1.3336301113e01 + R111 = -4.4870114575e00 + R211 = 5.0042598061e00 R311 = -6.5399043664e-01 - R021 = 6.7080479603e+00 - R121 = 3.5063081279e+00 - R221 = -1.8795372996e+00 - R031 = -2.4649669534e+00 + R021 = 6.7080479603e00 + R121 = 3.5063081279e00 + R221 = -1.8795372996e00 + R031 = -2.4649669534e00 R131 = -5.5077101279e-01 R041 = 5.5927935970e-01 - R002 = 2.0660924175e+00 - R102 = -4.9527603989e+00 - R202 = 2.5019633244e+00 - R012 = 2.0564311499e+00 + R002 = 2.0660924175e00 + R102 = -4.9527603989e00 + R202 = 2.5019633244e00 + R012 = 2.0564311499e00 R112 = -2.1311365518e-01 - R022 = -1.2419983026e+00 + R022 = -1.2419983026e00 R003 = -2.3342758797e-02 R103 = -1.8507636718e-02 R013 = 3.7969820455e-01 @@ -87,6 +88,34 @@ def PolyTEOS10_bsq(particle, fieldset, time): zz = -Z / Zu rz3 = R013 * tt + R103 * ss + R003 rz2 = (R022 * tt + R112 * ss + R012) * tt + (R202 * ss + R102) * ss + R002 - rz1 = (((R041 * tt + R131 * ss + R031) * tt + (R221 * ss + R121) * ss + R021) * tt + ((R311 * ss + R211) * ss + R111) * ss + R011) * tt + (((R401 * ss + R301) * ss + R201) * ss + R101) * ss + R001 - rz0 = (((((R060 * tt + R150 * ss + R050) * tt + (R240 * ss + R140) * ss + R040) * tt + ((R330 * ss + R230) * ss + R130) * ss + R030) * tt + (((R420 * ss + R320) * ss + R220) * ss + R120) * ss + R020) * tt + ((((R510 * ss + R410) * ss + R310) * ss + R210) * ss + R110) * ss + R010) * tt + (((((R600 * ss + R500) * ss + R400) * ss + R300) * ss + R200) * ss + R100) * ss + R000 + rz1 = ( + ( + ((R041 * tt + R131 * ss + R031) * tt + (R221 * ss + R121) * ss + R021) * tt + + ((R311 * ss + R211) * ss + R111) * ss + + R011 + ) + * tt + + (((R401 * ss + R301) * ss + R201) * ss + R101) * ss + + R001 + ) + rz0 = ( + ( + ( + ( + ((R060 * tt + R150 * ss + R050) * tt + (R240 * ss + R140) * ss + R040) * tt + + ((R330 * ss + R230) * ss + R130) * ss + + R030 + ) + * tt + + (((R420 * ss + R320) * ss + R220) * ss + R120) * ss + + R020 + ) + * tt + + ((((R510 * ss + R410) * ss + R310) * ss + R210) * ss + R110) * ss + + R010 + ) + * tt + + (((((R600 * ss + R500) * ss + R400) * ss + R300) * ss + R200) * ss + R100) * ss + + R000 + ) particle.density = ((rz3 * zz + rz2) * zz + rz1) * zz + rz0 diff --git a/parcels/application_kernels/advection.py b/parcels/application_kernels/advection.py index 0868ed99..8dbaa95c 100644 --- a/parcels/application_kernels/advection.py +++ b/parcels/application_kernels/advection.py @@ -1,43 +1,43 @@ """Collection of pre-built advection kernels.""" + import math from parcels.tools.statuscodes import StatusCode -__all__ = ['AdvectionRK4', 'AdvectionEE', 'AdvectionRK45', 'AdvectionRK4_3D', - 'AdvectionAnalytical'] +__all__ = ["AdvectionRK4", "AdvectionEE", "AdvectionRK45", "AdvectionRK4_3D", "AdvectionAnalytical"] def AdvectionRK4(particle, fieldset, time): """Advection of particles using fourth-order Runge-Kutta integration.""" (u1, v1) = fieldset.UV[particle] - lon1, lat1 = (particle.lon + u1*.5*particle.dt, particle.lat + v1*.5*particle.dt) - (u2, v2) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat1, lon1, particle] - lon2, lat2 = (particle.lon + u2*.5*particle.dt, particle.lat + v2*.5*particle.dt) - (u3, v3) = fieldset.UV[time + .5 * particle.dt, particle.depth, lat2, lon2, particle] - lon3, lat3 = (particle.lon + u3*particle.dt, particle.lat + v3*particle.dt) + lon1, lat1 = (particle.lon + u1 * 0.5 * particle.dt, particle.lat + v1 * 0.5 * particle.dt) + (u2, v2) = fieldset.UV[time + 0.5 * particle.dt, particle.depth, lat1, lon1, particle] + lon2, lat2 = (particle.lon + u2 * 0.5 * particle.dt, particle.lat + v2 * 0.5 * particle.dt) + (u3, v3) = fieldset.UV[time + 0.5 * particle.dt, particle.depth, lat2, lon2, particle] + lon3, lat3 = (particle.lon + u3 * particle.dt, particle.lat + v3 * particle.dt) (u4, v4) = fieldset.UV[time + particle.dt, particle.depth, lat3, lon3, particle] - particle_dlon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt # noqa - particle_dlat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt # noqa + particle_dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6.0 * particle.dt # noqa + particle_dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6.0 * particle.dt # noqa def AdvectionRK4_3D(particle, fieldset, time): """Advection of particles using fourth-order Runge-Kutta integration including vertical velocity.""" (u1, v1, w1) = fieldset.UVW[particle] - lon1 = particle.lon + u1*.5*particle.dt - lat1 = particle.lat + v1*.5*particle.dt - dep1 = particle.depth + w1*.5*particle.dt - (u2, v2, w2) = fieldset.UVW[time + .5 * particle.dt, dep1, lat1, lon1, particle] - lon2 = particle.lon + u2*.5*particle.dt - lat2 = particle.lat + v2*.5*particle.dt - dep2 = particle.depth + w2*.5*particle.dt - (u3, v3, w3) = fieldset.UVW[time + .5 * particle.dt, dep2, lat2, lon2, particle] - lon3 = particle.lon + u3*particle.dt - lat3 = particle.lat + v3*particle.dt - dep3 = particle.depth + w3*particle.dt + lon1 = particle.lon + u1 * 0.5 * particle.dt + lat1 = particle.lat + v1 * 0.5 * particle.dt + dep1 = particle.depth + w1 * 0.5 * particle.dt + (u2, v2, w2) = fieldset.UVW[time + 0.5 * particle.dt, dep1, lat1, lon1, particle] + lon2 = particle.lon + u2 * 0.5 * particle.dt + lat2 = particle.lat + v2 * 0.5 * particle.dt + dep2 = particle.depth + w2 * 0.5 * particle.dt + (u3, v3, w3) = fieldset.UVW[time + 0.5 * particle.dt, dep2, lat2, lon2, particle] + lon3 = particle.lon + u3 * particle.dt + lat3 = particle.lat + v3 * particle.dt + dep3 = particle.depth + w3 * particle.dt (u4, v4, w4) = fieldset.UVW[time + particle.dt, dep3, lat3, lon3, particle] - particle_dlon += (u1 + 2*u2 + 2*u3 + u4) / 6. * particle.dt # noqa - particle_dlat += (v1 + 2*v2 + 2*v3 + v4) / 6. * particle.dt # noqa - particle_ddepth += (w1 + 2*w2 + 2*w3 + w4) / 6. * particle.dt # noqa + particle_dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6.0 * particle.dt # noqa + particle_dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6.0 * particle.dt # noqa + particle_ddepth += (w1 + 2 * w2 + 2 * w3 + w4) / 6.0 * particle.dt # noqa def AdvectionEE(particle, fieldset, time): @@ -58,30 +58,39 @@ def AdvectionRK45(particle, fieldset, time): and doubled if error is smaller than 1/10th of tolerance. """ particle.dt = min(particle.next_dt, fieldset.RK45_max_dt) - c = [1./4., 3./8., 12./13., 1., 1./2.] - A = [[1./4., 0., 0., 0., 0.], - [3./32., 9./32., 0., 0., 0.], - [1932./2197., -7200./2197., 7296./2197., 0., 0.], - [439./216., -8., 3680./513., -845./4104., 0.], - [-8./27., 2., -3544./2565., 1859./4104., -11./40.]] - b4 = [25./216., 0., 1408./2565., 2197./4104., -1./5.] - b5 = [16./135., 0., 6656./12825., 28561./56430., -9./50., 2./55.] + c = [1.0 / 4.0, 3.0 / 8.0, 12.0 / 13.0, 1.0, 1.0 / 2.0] + A = [ + [1.0 / 4.0, 0.0, 0.0, 0.0, 0.0], + [3.0 / 32.0, 9.0 / 32.0, 0.0, 0.0, 0.0], + [1932.0 / 2197.0, -7200.0 / 2197.0, 7296.0 / 2197.0, 0.0, 0.0], + [439.0 / 216.0, -8.0, 3680.0 / 513.0, -845.0 / 4104.0, 0.0], + [-8.0 / 27.0, 2.0, -3544.0 / 2565.0, 1859.0 / 4104.0, -11.0 / 40.0], + ] + b4 = [25.0 / 216.0, 0.0, 1408.0 / 2565.0, 2197.0 / 4104.0, -1.0 / 5.0] + b5 = [16.0 / 135.0, 0.0, 6656.0 / 12825.0, 28561.0 / 56430.0, -9.0 / 50.0, 2.0 / 55.0] (u1, v1) = fieldset.UV[particle] - lon1, lat1 = (particle.lon + u1 * A[0][0] * particle.dt, - particle.lat + v1 * A[0][0] * particle.dt) + lon1, lat1 = (particle.lon + u1 * A[0][0] * particle.dt, particle.lat + v1 * A[0][0] * particle.dt) (u2, v2) = fieldset.UV[time + c[0] * particle.dt, particle.depth, lat1, lon1, particle] - lon2, lat2 = (particle.lon + (u1 * A[1][0] + u2 * A[1][1]) * particle.dt, - particle.lat + (v1 * A[1][0] + v2 * A[1][1]) * particle.dt) + lon2, lat2 = ( + particle.lon + (u1 * A[1][0] + u2 * A[1][1]) * particle.dt, + particle.lat + (v1 * A[1][0] + v2 * A[1][1]) * particle.dt, + ) (u3, v3) = fieldset.UV[time + c[1] * particle.dt, particle.depth, lat2, lon2, particle] - lon3, lat3 = (particle.lon + (u1 * A[2][0] + u2 * A[2][1] + u3 * A[2][2]) * particle.dt, - particle.lat + (v1 * A[2][0] + v2 * A[2][1] + v3 * A[2][2]) * particle.dt) + lon3, lat3 = ( + particle.lon + (u1 * A[2][0] + u2 * A[2][1] + u3 * A[2][2]) * particle.dt, + particle.lat + (v1 * A[2][0] + v2 * A[2][1] + v3 * A[2][2]) * particle.dt, + ) (u4, v4) = fieldset.UV[time + c[2] * particle.dt, particle.depth, lat3, lon3, particle] - lon4, lat4 = (particle.lon + (u1 * A[3][0] + u2 * A[3][1] + u3 * A[3][2] + u4 * A[3][3]) * particle.dt, - particle.lat + (v1 * A[3][0] + v2 * A[3][1] + v3 * A[3][2] + v4 * A[3][3]) * particle.dt) + lon4, lat4 = ( + particle.lon + (u1 * A[3][0] + u2 * A[3][1] + u3 * A[3][2] + u4 * A[3][3]) * particle.dt, + particle.lat + (v1 * A[3][0] + v2 * A[3][1] + v3 * A[3][2] + v4 * A[3][3]) * particle.dt, + ) (u5, v5) = fieldset.UV[time + c[3] * particle.dt, particle.depth, lat4, lon4, particle] - lon5, lat5 = (particle.lon + (u1 * A[4][0] + u2 * A[4][1] + u3 * A[4][2] + u4 * A[4][3] + u5 * A[4][4]) * particle.dt, - particle.lat + (v1 * A[4][0] + v2 * A[4][1] + v3 * A[4][2] + v4 * A[4][3] + v5 * A[4][4]) * particle.dt) + lon5, lat5 = ( + particle.lon + (u1 * A[4][0] + u2 * A[4][1] + u3 * A[4][2] + u4 * A[4][3] + u5 * A[4][4]) * particle.dt, + particle.lat + (v1 * A[4][0] + v2 * A[4][1] + v3 * A[4][2] + v4 * A[4][3] + v5 * A[4][4]) * particle.dt, + ) (u6, v6) = fieldset.UV[time + c[4] * particle.dt, particle.depth, lat5, lon5, particle] lon_4th = (u1 * b4[0] + u2 * b4[1] + u3 * b4[2] + u4 * b4[3] + u5 * b4[4]) * particle.dt @@ -93,7 +102,7 @@ def AdvectionRK45(particle, fieldset, time): if (kappa <= fieldset.RK45_tol) or (math.fabs(particle.dt) < math.fabs(fieldset.RK45_min_dt)): particle_dlon += lon_4th # noqa particle_dlat += lat_4th # noqa - if (kappa <= fieldset.RK45_tol) / 10 and (math.fabs(particle.dt*2) <= math.fabs(fieldset.RK45_max_dt)): + if (kappa <= fieldset.RK45_tol) / 10 and (math.fabs(particle.dt * 2) <= math.fabs(fieldset.RK45_max_dt)): particle.next_dt *= 2 else: particle.next_dt /= 2 @@ -113,37 +122,39 @@ def AdvectionAnalytical(particle, fieldset, time): tol = 1e-10 I_s = 10 # number of intermediate time steps - direction = 1. if particle.dt > 0 else -1. - withW = True if 'W' in [f.name for f in fieldset.get_fields()] else False + direction = 1.0 if particle.dt > 0 else -1.0 + withW = True if "W" in [f.name for f in fieldset.get_fields()] else False withTime = True if len(fieldset.U.grid.time_full) > 1 else False ti = fieldset.U.time_index(time)[0] ds_t = particle.dt if withTime: - tau = (time - fieldset.U.grid.time[ti]) / (fieldset.U.grid.time[ti+1] - fieldset.U.grid.time[ti]) - time_i = np.linspace(0, fieldset.U.grid.time[ti+1] - fieldset.U.grid.time[ti], I_s) + tau = (time - fieldset.U.grid.time[ti]) / (fieldset.U.grid.time[ti + 1] - fieldset.U.grid.time[ti]) + time_i = np.linspace(0, fieldset.U.grid.time[ti + 1] - fieldset.U.grid.time[ti], I_s) ds_t = min(ds_t, time_i[np.where(time - fieldset.U.grid.time[ti] < time_i)[0][0]]) - xsi, eta, zeta, xi, yi, zi = fieldset.U.search_indices(particle.lon, particle.lat, particle.depth, particle=particle) + xsi, eta, zeta, xi, yi, zi = fieldset.U.search_indices( + particle.lon, particle.lat, particle.depth, particle=particle + ) if withW: if abs(xsi - 1) < tol: - if fieldset.U.data[0, zi+1, yi+1, xi+1] > 0: + if fieldset.U.data[0, zi + 1, yi + 1, xi + 1] > 0: xi += 1 xsi = 0 if abs(eta - 1) < tol: - if fieldset.V.data[0, zi+1, yi+1, xi+1] > 0: + if fieldset.V.data[0, zi + 1, yi + 1, xi + 1] > 0: yi += 1 eta = 0 if abs(zeta - 1) < tol: - if fieldset.W.data[0, zi+1, yi+1, xi+1] > 0: + if fieldset.W.data[0, zi + 1, yi + 1, xi + 1] > 0: zi += 1 zeta = 0 else: if abs(xsi - 1) < tol: - if fieldset.U.data[0, yi+1, xi+1] > 0: + if fieldset.U.data[0, yi + 1, xi + 1] > 0: xi += 1 xsi = 0 if abs(eta - 1) < tol: - if fieldset.V.data[0, yi+1, xi+1] > 0: + if fieldset.V.data[0, yi + 1, xi + 1] > 0: yi += 1 eta = 0 @@ -158,52 +169,52 @@ def AdvectionAnalytical(particle, fieldset, time): else: px = np.array([grid.lon[yi, xi], grid.lon[yi, xi + 1], grid.lon[yi + 1, xi + 1], grid.lon[yi + 1, xi]]) py = np.array([grid.lat[yi, xi], grid.lat[yi, xi + 1], grid.lat[yi + 1, xi + 1], grid.lat[yi + 1, xi]]) - if grid.mesh == 'spherical': - px[0] = px[0]+360 if px[0] < particle.lon-225 else px[0] - px[0] = px[0]-360 if px[0] > particle.lat+225 else px[0] - px[1:] = np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:]) - px[1:] = np.where(-px[1:] + px[0] > 180, px[1:]+360, px[1:]) + if grid.mesh == "spherical": + px[0] = px[0] + 360 if px[0] < particle.lon - 225 else px[0] + px[0] = px[0] - 360 if px[0] > particle.lat + 225 else px[0] + px[1:] = np.where(px[1:] - px[0] > 180, px[1:] - 360, px[1:]) + px[1:] = np.where(-px[1:] + px[0] > 180, px[1:] + 360, px[1:]) if withW: - pz = np.array([grid.depth[zi], grid.depth[zi+1]]) + pz = np.array([grid.depth[zi], grid.depth[zi + 1]]) dz = pz[1] - pz[0] else: - dz = 1. - - c1 = fieldset.UV.dist(px[0], px[1], py[0], py[1], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 0.), py)) - c2 = fieldset.UV.dist(px[1], px[2], py[1], py[2], grid.mesh, np.dot(i_u.phi2D_lin(1., eta), py)) - c3 = fieldset.UV.dist(px[2], px[3], py[2], py[3], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 1.), py)) - c4 = fieldset.UV.dist(px[3], px[0], py[3], py[0], grid.mesh, np.dot(i_u.phi2D_lin(0., eta), py)) - rad = np.pi / 180. - deg2m = 1852 * 60. - meshJac = (deg2m * deg2m * math.cos(rad * particle.lat)) if grid.mesh == 'spherical' else 1 + dz = 1.0 + + c1 = fieldset.UV.dist(px[0], px[1], py[0], py[1], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 0.0), py)) + c2 = fieldset.UV.dist(px[1], px[2], py[1], py[2], grid.mesh, np.dot(i_u.phi2D_lin(1.0, eta), py)) + c3 = fieldset.UV.dist(px[2], px[3], py[2], py[3], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 1.0), py)) + c4 = fieldset.UV.dist(px[3], px[0], py[3], py[0], grid.mesh, np.dot(i_u.phi2D_lin(0.0, eta), py)) + rad = np.pi / 180.0 + deg2m = 1852 * 60.0 + meshJac = (deg2m * deg2m * math.cos(rad * particle.lat)) if grid.mesh == "spherical" else 1 dxdy = fieldset.UV.jacobian(xsi, eta, px, py) * meshJac if withW: - U0 = direction * fieldset.U.data[ti, zi+1, yi+1, xi] * c4 * dz - U1 = direction * fieldset.U.data[ti, zi+1, yi+1, xi+1] * c2 * dz - V0 = direction * fieldset.V.data[ti, zi+1, yi, xi+1] * c1 * dz - V1 = direction * fieldset.V.data[ti, zi+1, yi+1, xi+1] * c3 * dz + U0 = direction * fieldset.U.data[ti, zi + 1, yi + 1, xi] * c4 * dz + U1 = direction * fieldset.U.data[ti, zi + 1, yi + 1, xi + 1] * c2 * dz + V0 = direction * fieldset.V.data[ti, zi + 1, yi, xi + 1] * c1 * dz + V1 = direction * fieldset.V.data[ti, zi + 1, yi + 1, xi + 1] * c3 * dz if withTime: - U0 = U0 * (1 - tau) + tau * direction * fieldset.U.data[ti+1, zi+1, yi+1, xi] * c4 * dz - U1 = U1 * (1 - tau) + tau * direction * fieldset.U.data[ti+1, zi+1, yi+1, xi+1] * c2 * dz - V0 = V0 * (1 - tau) + tau * direction * fieldset.V.data[ti+1, zi+1, yi, xi+1] * c1 * dz - V1 = V1 * (1 - tau) + tau * direction * fieldset.V.data[ti+1, zi+1, yi+1, xi+1] * c3 * dz + U0 = U0 * (1 - tau) + tau * direction * fieldset.U.data[ti + 1, zi + 1, yi + 1, xi] * c4 * dz + U1 = U1 * (1 - tau) + tau * direction * fieldset.U.data[ti + 1, zi + 1, yi + 1, xi + 1] * c2 * dz + V0 = V0 * (1 - tau) + tau * direction * fieldset.V.data[ti + 1, zi + 1, yi, xi + 1] * c1 * dz + V1 = V1 * (1 - tau) + tau * direction * fieldset.V.data[ti + 1, zi + 1, yi + 1, xi + 1] * c3 * dz else: - U0 = direction * fieldset.U.data[ti, yi+1, xi] * c4 * dz - U1 = direction * fieldset.U.data[ti, yi+1, xi+1] * c2 * dz - V0 = direction * fieldset.V.data[ti, yi, xi+1] * c1 * dz - V1 = direction * fieldset.V.data[ti, yi+1, xi+1] * c3 * dz + U0 = direction * fieldset.U.data[ti, yi + 1, xi] * c4 * dz + U1 = direction * fieldset.U.data[ti, yi + 1, xi + 1] * c2 * dz + V0 = direction * fieldset.V.data[ti, yi, xi + 1] * c1 * dz + V1 = direction * fieldset.V.data[ti, yi + 1, xi + 1] * c3 * dz if withTime: - U0 = U0 * (1 - tau) + tau * direction * fieldset.U.data[ti+1, yi+1, xi] * c4 * dz - U1 = U1 * (1 - tau) + tau * direction * fieldset.U.data[ti+1, yi+1, xi+1] * c2 * dz - V0 = V0 * (1 - tau) + tau * direction * fieldset.V.data[ti+1, yi, xi+1] * c1 * dz - V1 = V1 * (1 - tau) + tau * direction * fieldset.V.data[ti+1, yi+1, xi+1] * c3 * dz + U0 = U0 * (1 - tau) + tau * direction * fieldset.U.data[ti + 1, yi + 1, xi] * c4 * dz + U1 = U1 * (1 - tau) + tau * direction * fieldset.U.data[ti + 1, yi + 1, xi + 1] * c2 * dz + V0 = V0 * (1 - tau) + tau * direction * fieldset.V.data[ti + 1, yi, xi + 1] * c1 * dz + V1 = V1 * (1 - tau) + tau * direction * fieldset.V.data[ti + 1, yi + 1, xi + 1] * c3 * dz def compute_ds(F0, F1, r, direction, tol): - up = F0 * (1-r) + F1 * r - r_target = 1. if direction * up >= 0. else 0. + up = F0 * (1 - r) + F1 * r + r_target = 1.0 if direction * up >= 0.0 else 0.0 B = F0 - F1 - delta = - F0 + delta = -F0 B = 0 if abs(B) < tol else B if abs(B) > tol: @@ -213,29 +224,29 @@ def AdvectionAnalytical(particle, fieldset, time): F_r0, F_r1 = None, None if abs(B) < tol and abs(delta) < tol: - ds = float('inf') + ds = float("inf") elif B == 0: ds = -(r_target - r) / delta elif F_r1 * F_r0 < tol: - ds = float('inf') + ds = float("inf") else: - ds = - 1. / B * math.log(F_r1 / F_r0) + ds = -1.0 / B * math.log(F_r1 / F_r0) if abs(ds) < tol: - ds = float('inf') + ds = float("inf") return ds, B, delta ds_x, B_x, delta_x = compute_ds(U0, U1, xsi, direction, tol) ds_y, B_y, delta_y = compute_ds(V0, V1, eta, direction, tol) if withW: - W0 = direction * fieldset.W.data[ti, zi, yi+1, xi+1] * dxdy - W1 = direction * fieldset.W.data[ti, zi+1, yi+1, xi+1] * dxdy + W0 = direction * fieldset.W.data[ti, zi, yi + 1, xi + 1] * dxdy + W1 = direction * fieldset.W.data[ti, zi + 1, yi + 1, xi + 1] * dxdy if withTime: - W0 = W0 * (1 - tau) + tau * direction * fieldset.W.data[ti+1, zi, yi + 1, xi + 1] * dxdy - W1 = W1 * (1 - tau) + tau * direction * fieldset.W.data[ti+1, zi + 1, yi + 1, xi + 1] * dxdy + W0 = W0 * (1 - tau) + tau * direction * fieldset.W.data[ti + 1, zi, yi + 1, xi + 1] * dxdy + W1 = W1 * (1 - tau) + tau * direction * fieldset.W.data[ti + 1, zi + 1, yi + 1, xi + 1] * dxdy ds_z, B_z, delta_z = compute_ds(W0, W1, zeta, direction, tol) else: - ds_z = float('inf') + ds_z = float("inf") # take the minimum travel time s_min = min(abs(ds_x), abs(ds_y), abs(ds_z), abs(ds_t / (dxdy * dz))) @@ -250,12 +261,24 @@ def AdvectionAnalytical(particle, fieldset, time): rs_x = compute_rs(xsi, B_x, delta_x, s_min) rs_y = compute_rs(eta, B_y, delta_y, s_min) - particle_dlon += (1.-rs_x)*(1.-rs_y) * px[0] + rs_x * (1.-rs_y) * px[1] + rs_x * rs_y * px[2] + (1.-rs_x)*rs_y * px[3] - particle.lon # noqa - particle_dlat += (1.-rs_x)*(1.-rs_y) * py[0] + rs_x * (1.-rs_y) * py[1] + rs_x * rs_y * py[2] + (1.-rs_x)*rs_y * py[3] - particle.lat # noqa + particle_dlon += ( + (1.0 - rs_x) * (1.0 - rs_y) * px[0] + + rs_x * (1.0 - rs_y) * px[1] + + rs_x * rs_y * px[2] + + (1.0 - rs_x) * rs_y * px[3] + - particle.lon + ) # noqa + particle_dlat += ( + (1.0 - rs_x) * (1.0 - rs_y) * py[0] + + rs_x * (1.0 - rs_y) * py[1] + + rs_x * rs_y * py[2] + + (1.0 - rs_x) * rs_y * py[3] + - particle.lat + ) # noqa if withW: rs_z = compute_rs(zeta, B_z, delta_z, s_min) - particle_ddepth += (1.-rs_z) * pz[0] + rs_z * pz[1] - particle.depth # noqa + particle_ddepth += (1.0 - rs_z) * pz[0] + rs_z * pz[1] - particle.depth # noqa if particle.dt > 0: particle.dt = max(direction * s_min * (dxdy * dz), 1e-7) diff --git a/parcels/application_kernels/advectiondiffusion.py b/parcels/application_kernels/advectiondiffusion.py index 259ae305..dc5c7b5f 100644 --- a/parcels/application_kernels/advectiondiffusion.py +++ b/parcels/application_kernels/advectiondiffusion.py @@ -2,11 +2,16 @@ See `this tutorial <../examples/tutorial_diffusion.ipynb>`__ for a detailed explanation. """ + import math import parcels -__all__ = ['DiffusionUniformKh', 'AdvectionDiffusionM1', 'AdvectionDiffusionEM', ] +__all__ = [ + "DiffusionUniformKh", + "AdvectionDiffusionM1", + "AdvectionDiffusionEM", +] def AdvectionDiffusionM1(particle, fieldset, time): diff --git a/parcels/application_kernels/interaction.py b/parcels/application_kernels/interaction.py index 4db0eaae..cbf9a3ac 100644 --- a/parcels/application_kernels/interaction.py +++ b/parcels/application_kernels/interaction.py @@ -1,10 +1,10 @@ """Collection of pre-built interaction kernels.""" + import numpy as np from parcels.tools.statuscodes import StatusCode -__all__ = ['AsymmetricAttraction', 'NearestNeighborWithinRange', - 'MergeWithNearestNeighbor'] +__all__ = ["AsymmetricAttraction", "NearestNeighborWithinRange", "MergeWithNearestNeighbor"] def NearestNeighborWithinRange(particle, fieldset, time, neighbors, mutator): @@ -29,6 +29,7 @@ def NearestNeighborWithinRange(particle, fieldset, time, neighbors, mutator): def f(p, neighbor): p.nearest_neighbor = neighbor + mutator[particle.id].append((f, [neighbor_id])) return StatusCode.Success @@ -42,6 +43,7 @@ def MergeWithNearestNeighbor(particle, fieldset, time, neighbors, mutator): properties. Only pairs of particles that have each other as nearest neighbors will be merged. """ + def delete_particle(p): p.state = StatusCode.Delete @@ -87,13 +89,12 @@ def AsymmetricAttraction(particle, fieldset, time, neighbors, mutator): velocity_param = 0.04 for n in na_neighbors: assert n.dt == particle.dt - dx = np.array([particle.lat-n.lat, particle.lon-n.lon, - particle.depth-n.depth]) + dx = np.array([particle.lat - n.lat, particle.lon - n.lon, particle.depth - n.depth]) dx_norm = np.linalg.norm(dx) - velocity = velocity_param/(dx_norm**2) + velocity = velocity_param / (dx_norm**2) - distance = velocity*n.dt - d_vec = distance*dx/dx_norm + distance = velocity * n.dt + d_vec = distance * dx / dx_norm def f(n, dlat, dlon, ddepth): n.lat_nextloop += dlat diff --git a/parcels/compilation/codecompiler.py b/parcels/compilation/codecompiler.py index a268135a..406daf59 100644 --- a/parcels/compilation/codecompiler.py +++ b/parcels/compilation/codecompiler.py @@ -9,6 +9,7 @@ except ModuleNotFoundError: _tmp_dir = os.getcwd() + class Compiler_parameters: def __init__(self): self._compiler = "" @@ -81,35 +82,35 @@ class GNU_parameters(Compiler_parameters): Iflags = [] if isinstance(incdirs, list): for dir in incdirs: - Iflags.append("-I"+dir) + Iflags.append("-I" + dir) Lflags = [] if isinstance(libdirs, list): for dir in libdirs: - Lflags.append("-L"+dir) + Lflags.append("-L" + dir) lflags = [] if isinstance(libs, list): for lib in libs: lflags.append("-l" + lib) - cc_env = os.getenv('CC') + cc_env = os.getenv("CC") mpicc = None if MPI: - mpicc_env = os.getenv('MPICC') + mpicc_env = os.getenv("MPICC") mpicc = mpicc_env mpicc = "mpicc" if mpicc is None and os._exists("mpicc") else None mpicc = "mpiCC" if mpicc is None and os._exists("mpiCC") else None self._compiler = mpicc if MPI and mpicc is not None else cc_env if cc_env is not None else "gcc" - opt_flags = ['-g', '-O3'] - arch_flag = ['-m64' if calcsize("P") == 8 else '-m32'] - self._cppargs = ['-Wall', '-fPIC', '-std=gnu11'] + opt_flags = ["-g", "-O3"] + arch_flag = ["-m64" if calcsize("P") == 8 else "-m32"] + self._cppargs = ["-Wall", "-fPIC", "-std=gnu11"] self._cppargs += Iflags self._cppargs += opt_flags + cppargs + arch_flag - self._ldargs = ['-shared'] + self._ldargs = ["-shared"] self._ldargs += Lflags self._ldargs += lflags self._ldargs += ldargs if len(Lflags) > 0: - self._ldargs += ['-Wl, -rpath=%s' % (":".join(libdirs))] + self._ldargs += ["-Wl, -rpath=%s" % (":".join(libdirs))] self._ldargs += arch_flag self._incdirs = incdirs self._libdirs = libdirs @@ -216,7 +217,7 @@ class CCompiler: if ldargs is None: ldargs = [] - self._cc = os.getenv('CC') if cc is None else cc + self._cc = os.getenv("CC") if cc is None else cc self._cppargs = cppargs self._ldargs = ldargs self._dynlib_ext = "" @@ -232,20 +233,22 @@ class CCompiler: pass def _create_compile_process_(self, cmd, src, log): - with open(log, 'w') as logfile: + with open(log, "w") as logfile: try: subprocess.check_call(cmd, stdout=logfile, stderr=logfile) except OSError: raise RuntimeError(f"OSError during compilation. Please check if compiler exists: {self._cc}") except subprocess.CalledProcessError: with open(log) as logfile2: - raise RuntimeError(f"Error during compilation:\n" - f"Compilation command: {cmd}\n" - f"Source/Destination file: {src}\n" - f"Log file: {logfile.name}\n" - f"Log output: {logfile2.read()}\n" - f"\n" - f"If you are on macOS, it might help to type 'export CC=gcc'") + raise RuntimeError( + f"Error during compilation:\n" + f"Compilation command: {cmd}\n" + f"Source/Destination file: {src}\n" + f"Log file: {logfile.name}\n" + f"Log output: {logfile2.read()}\n" + f"\n" + f"If you are on macOS, it might help to type 'export CC=gcc'" + ) return True @@ -253,7 +256,9 @@ class CCompiler_SS(CCompiler): """Single-stage C-compiler; used for a SINGLE source file.""" def __init__(self, cc=None, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None, tmp_dir=None): - super().__init__(cc=cc, cppargs=cppargs, ldargs=ldargs, incdirs=incdirs, libdirs=libdirs, libs=libs, tmp_dir=tmp_dir) + super().__init__( + cc=cc, cppargs=cppargs, ldargs=ldargs, incdirs=incdirs, libdirs=libdirs, libs=libs, tmp_dir=tmp_dir + ) def __str__(self): output = "[CCompiler_SS]: " @@ -267,8 +272,8 @@ class CCompiler_SS(CCompiler): return output def compile(self, src, obj, log): - cc = [self._cc] + self._cppargs + ['-o', obj, src] + self._ldargs - with open(log, 'w') as logfile: + cc = [self._cc] + self._cppargs + ["-o", obj, src] + self._ldargs + with open(log, "w") as logfile: logfile.write(f"Compiling: {cc}\n") self._create_compile_process_(cc, src, log) @@ -287,7 +292,15 @@ class GNUCompiler_SS(CCompiler_SS): def __init__(self, cppargs=None, ldargs=None, incdirs=None, libdirs=None, libs=None, tmp_dir=None): c_params = GNU_parameters(cppargs, ldargs, incdirs, libdirs, libs) - super().__init__(c_params.compiler, cppargs=c_params.cppargs, ldargs=c_params.ldargs, incdirs=c_params.incdirs, libdirs=c_params.libdirs, libs=c_params.libs, tmp_dir=tmp_dir) + super().__init__( + c_params.compiler, + cppargs=c_params.cppargs, + ldargs=c_params.ldargs, + incdirs=c_params.incdirs, + libdirs=c_params.libdirs, + libs=c_params.libs, + tmp_dir=tmp_dir, + ) self._dynlib_ext = c_params.dynlib_ext self._stclib_ext = c_params.stclib_ext self._obj_ext = c_params.obj_ext diff --git a/parcels/compilation/codegenerator.py b/parcels/compilation/codegenerator.py index 55c03a2d..e6ee5f8e 100644 --- a/parcels/compilation/codegenerator.py +++ b/parcels/compilation/codegenerator.py @@ -23,32 +23,26 @@ class IntrinsicNode(ast.AST): class FieldSetNode(IntrinsicNode): def __getattr__(self, attr): if isinstance(getattr(self.obj, attr), Field): - return FieldNode(getattr(self.obj, attr), - ccode=f"{self.ccode}->{attr}") + return FieldNode(getattr(self.obj, attr), ccode=f"{self.ccode}->{attr}") elif isinstance(getattr(self.obj, attr), NestedField): if isinstance(getattr(self.obj, attr)[0], VectorField): - return NestedVectorFieldNode(getattr(self.obj, attr), - ccode=f"{self.ccode}->{attr}") + return NestedVectorFieldNode(getattr(self.obj, attr), ccode=f"{self.ccode}->{attr}") else: - return NestedFieldNode(getattr(self.obj, attr), - ccode=f"{self.ccode}->{attr}") + return NestedFieldNode(getattr(self.obj, attr), ccode=f"{self.ccode}->{attr}") elif isinstance(getattr(self.obj, attr), VectorField): - return VectorFieldNode(getattr(self.obj, attr), - ccode=f"{self.ccode}->{attr}") + return VectorFieldNode(getattr(self.obj, attr), ccode=f"{self.ccode}->{attr}") else: - return ConstNode(getattr(self.obj, attr), - ccode="%s" % (attr)) + return ConstNode(getattr(self.obj, attr), ccode="%s" % (attr)) class FieldNode(IntrinsicNode): def __getattr__(self, attr): if isinstance(getattr(self.obj, attr), Grid): - return GridNode(getattr(self.obj, attr), - ccode=f"{self.ccode}->{attr}") + return GridNode(getattr(self.obj, attr), ccode=f"{self.ccode}->{attr}") elif attr == "eval": return FieldEvalCallNode(self) else: - raise NotImplementedError('Access to Field attributes are not (yet) implemented in JIT mode') + raise NotImplementedError("Access to Field attributes are not (yet) implemented in JIT mode") class FieldEvalCallNode(IntrinsicNode): @@ -71,7 +65,7 @@ class VectorFieldNode(IntrinsicNode): if attr == "eval": return VectorFieldEvalCallNode(self) else: - raise NotImplementedError('Access to VectorField attributes are not (yet) implemented in JIT mode') + raise NotImplementedError("Access to VectorField attributes are not (yet) implemented in JIT mode") def __getitem__(self, attr): return VectorFieldEvalNode(self.obj, attr) @@ -122,7 +116,7 @@ class NestedVectorFieldEvalNode(IntrinsicNode): class GridNode(IntrinsicNode): def __getattr__(self, attr): - raise NotImplementedError('Access to Grids is not (yet) implemented in JIT mode') + raise NotImplementedError("Access to Grids is not (yet) implemented in JIT mode") class ConstNode(IntrinsicNode): @@ -131,7 +125,7 @@ class ConstNode(IntrinsicNode): class MathNode(IntrinsicNode): - symbol_map = {'pi': 'M_PI', 'e': 'M_E', 'nan': 'NAN'} + symbol_map = {"pi": "M_PI", "e": "M_E", "nan": "NAN"} def __getattr__(self, attr): if hasattr(math, attr): @@ -143,13 +137,15 @@ class MathNode(IntrinsicNode): class RandomNode(IntrinsicNode): - symbol_map = {'random': 'parcels_random', - 'uniform': 'parcels_uniform', - 'randint': 'parcels_randint', - 'normalvariate': 'parcels_normalvariate', - 'expovariate': 'parcels_expovariate', - 'vonmisesvariate': 'parcels_vonmisesvariate', - 'seed': 'parcels_seed'} + symbol_map = { + "random": "parcels_random", + "uniform": "parcels_uniform", + "randint": "parcels_randint", + "normalvariate": "parcels_normalvariate", + "expovariate": "parcels_expovariate", + "vonmisesvariate": "parcels_vonmisesvariate", + "seed": "parcels_seed", + } def __getattr__(self, attr): if hasattr(random, attr): @@ -162,7 +158,7 @@ class RandomNode(IntrinsicNode): class StatusCodeNode(IntrinsicNode): def __getattr__(self, attr): - statuscodes = [c for c in vars(StatusCode) if not c.startswith('_')] + statuscodes = [c for c in vars(StatusCode) if not c.startswith("_")] if attr in statuscodes: return IntrinsicNode(None, ccode=attr.upper()) else: @@ -171,7 +167,7 @@ class StatusCodeNode(IntrinsicNode): class PrintNode(IntrinsicNode): def __init__(self): - self.obj = 'print' + self.obj = "print" class ParticleAttributeNode(IntrinsicNode): @@ -182,27 +178,30 @@ class ParticleAttributeNode(IntrinsicNode): class ParticleXiYiZiTiAttributeNode(IntrinsicNode): def __init__(self, obj, attr): - logger.warning_once(f"Be careful when sampling particle.{attr}, as this is updated in the kernel loop. " - "Best to place the sampling statement before advection.") + logger.warning_once( + f"Be careful when sampling particle.{attr}, as this is updated in the kernel loop. " + "Best to place the sampling statement before advection." + ) self.obj = obj.ccode self.attr = attr class ParticleNode(IntrinsicNode): - def __init__(self, obj): - super().__init__(obj, ccode='particles') + super().__init__(obj, ccode="particles") def __getattr__(self, attr): - if attr in ['xi', 'yi', 'zi', 'ti']: + if attr in ["xi", "yi", "zi", "ti"]: return ParticleXiYiZiTiAttributeNode(self, attr) if attr in [v.name for v in self.obj.variables]: return ParticleAttributeNode(self, attr) - elif attr in ['delete']: - return ParticleAttributeNode(self, 'state') + elif attr in ["delete"]: + return ParticleAttributeNode(self, "state") else: - raise AttributeError(f"Particle type {self.obj.name} does not define attribute '{attr}. " - f"Please add '{attr}' as a Variable in {self.obj.name}.") + raise AttributeError( + f"Particle type {self.obj.name} does not define attribute '{attr}. " + f"Please add '{attr}' as a Variable in {self.obj.name}." + ) class IntrinsicTransformer(ast.NodeTransformer): @@ -230,21 +229,21 @@ class IntrinsicTransformer(ast.NodeTransformer): def visit_Name(self, node): """Inject IntrinsicNode objects into the tree according to keyword.""" - if node.id == 'fieldset' and self.fieldset is not None: - node = FieldSetNode(self.fieldset, ccode='fset') - elif node.id == 'particle': + if node.id == "fieldset" and self.fieldset is not None: + node = FieldSetNode(self.fieldset, ccode="fset") + elif node.id == "particle": node = ParticleNode(self.ptype) - elif node.id in ['StatusCode']: - node = StatusCodeNode(math, ccode='') - elif node.id == 'math': - node = MathNode(math, ccode='') - elif node.id in ['ParcelsRandom', 'rng']: - node = RandomNode(math, ccode='') - elif node.id == 'print': + elif node.id in ["StatusCode"]: + node = StatusCodeNode(math, ccode="") + elif node.id == "math": + node = MathNode(math, ccode="") + elif node.id in ["ParcelsRandom", "rng"]: + node = RandomNode(math, ccode="") + elif node.id == "print": node = PrintNode() - elif (node.id == 'pnum') or ('parcels_tmpvar' in node.id): + elif (node.id == "pnum") or ("parcels_tmpvar" in node.id): raise NotImplementedError(f"Custom Kernels cannot contain string {node.id}; please change your kernel") - elif node.id == 'abs': + elif node.id == "abs": raise NotImplementedError("abs() does not work in JIT Kernels. Use math.fabs() instead") return node @@ -253,14 +252,18 @@ class IntrinsicTransformer(ast.NodeTransformer): if isinstance(node.value, IntrinsicNode): return getattr(node.value, node.attr) else: - if node.value.id in ['np', 'numpy']: - raise NotImplementedError("Cannot convert numpy functions in kernels to C-code.\n" - "Either use functions from the math library or run Parcels in Scipy mode.\n" - "For more information, see https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernels") - elif node.value.id in ['random']: - raise NotImplementedError("Cannot convert random functions in kernels to C-code.\n" - "Use `import parcels.rng as ParcelsRandom` and then ParcelsRandom.random(), ParcelsRandom.uniform() etc.\n" - "For more information, see https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernels") + if node.value.id in ["np", "numpy"]: + raise NotImplementedError( + "Cannot convert numpy functions in kernels to C-code.\n" + "Either use functions from the math library or run Parcels in Scipy mode.\n" + "For more information, see https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernels" + ) + elif node.value.id in ["random"]: + raise NotImplementedError( + "Cannot convert random functions in kernels to C-code.\n" + "Use `import parcels.rng as ParcelsRandom` and then ParcelsRandom.random(), ParcelsRandom.uniform() etc.\n" + "For more information, see https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernels" + ) else: raise NotImplementedError(f"Cannot convert '{node.value.id}' used in kernel to C-code") @@ -279,7 +282,7 @@ class IntrinsicTransformer(ast.NodeTransformer): elif isinstance(node.value, VectorFieldNode): tmp = self.get_tmp() tmp2 = self.get_tmp() - tmp3 = self.get_tmp() if node.value.obj.vector_type == '3D' else None + tmp3 = self.get_tmp() if node.value.obj.vector_type == "3D" else None # Insert placeholder node for field eval ... self.stmt_stack += [VectorFieldEvalNode(node.value, node.slice, tmp, tmp2, tmp3)] # .. and return the name of the temporary that will be populated @@ -294,7 +297,7 @@ class IntrinsicTransformer(ast.NodeTransformer): elif isinstance(node.value, NestedVectorFieldNode): tmp = self.get_tmp() tmp2 = self.get_tmp() - tmp3 = self.get_tmp() if list.__getitem__(node.value.obj, 0).vector_type == '3D' else None + tmp3 = self.get_tmp() if list.__getitem__(node.value.obj, 0).vector_type == "3D" else None self.stmt_stack += [NestedVectorFieldEvalNode(node.value, node.slice, tmp, tmp2, tmp3)] if tmp3: return ast.Tuple([ast.Name(id=tmp), ast.Name(id=tmp2), ast.Name(id=tmp3)], ast.Load()) @@ -305,8 +308,10 @@ class IntrinsicTransformer(ast.NodeTransformer): def visit_AugAssign(self, node): node.target = self.visit(node.target) - if isinstance(node.target, ParticleAttributeNode) and node.target.attr in ['lon', 'lat', 'depth', 'time']: - logger.warning_once("Don't change the location of a particle directly in a Kernel. Use particle_dlon, particle_dlat, etc.") + if isinstance(node.target, ParticleAttributeNode) and node.target.attr in ["lon", "lat", "depth", "time"]: + logger.warning_once( + "Don't change the location of a particle directly in a Kernel. Use particle_dlon, particle_dlat, etc." + ) node.op = self.visit(node.op) node.value = self.visit(node.value) stmts = [node] @@ -322,7 +327,9 @@ class IntrinsicTransformer(ast.NodeTransformer): node.value = self.visit(node.value) if isinstance(node.value, ConstNode) and len(node.targets) > 0 and isinstance(node.targets[0], ast.Name): if node.targets[0].id == node.value.ccode: - raise NotImplementedError(f"Assignment of fieldset.{node.value.ccode} to a local variable {node.targets[0].id} with same name in kernel. This is not allowed.") + raise NotImplementedError( + f"Assignment of fieldset.{node.value.ccode} to a local variable {node.targets[0].id} with same name in kernel. This is not allowed." + ) stmts = [node] # Inject statements from the stack @@ -336,7 +343,7 @@ class IntrinsicTransformer(ast.NodeTransformer): node.args = [self.visit(a) for a in node.args] node.keywords = {kw.arg: self.visit(kw.value) for kw in node.keywords} - if isinstance(node.func, ParticleAttributeNode) and node.func.attr == 'state': + if isinstance(node.func, ParticleAttributeNode) and node.func.attr == "state": node = IntrinsicNode(node, "particles->state[pnum] = DELETE") elif isinstance(node.func, FieldEvalCallNode): @@ -359,7 +366,7 @@ class IntrinsicTransformer(ast.NodeTransformer): # get a temporary value to assign result to tmp1 = self.get_tmp() tmp2 = self.get_tmp() - tmp3 = self.get_tmp() if node.func.field.obj.vector_type == '3D' else None + tmp3 = self.get_tmp() if node.func.field.obj.vector_type == "3D" else None # whether to convert convert = True if "applyConversion" in node.keywords: @@ -383,8 +390,7 @@ class TupleSplitter(ast.NodeTransformer): """AST transformer that detects and splits Pythonic tuple assignments into multiple statements for conversion to C.""" def visit_Assign(self, node): - if isinstance(node.targets[0], ast.Tuple) \ - and isinstance(node.value, ast.Tuple): + if isinstance(node.targets[0], ast.Tuple) and isinstance(node.value, ast.Tuple): t_elts = node.targets[0].elts v_elts = node.value.elts if len(t_elts) != len(v_elts): @@ -403,7 +409,7 @@ class KernelGenerator(ABC, ast.NodeVisitor): """ # Intrinsic variables that appear as function arguments - kernel_vars = ['particle', 'fieldset', 'time', 'output_time', 'tol'] + kernel_vars = ["particle", "fieldset", "time", "output_time", "tol"] array_vars = [] def __init__(self, fieldset=None, ptype=JITParticle): @@ -431,9 +437,9 @@ class KernelGenerator(ABC, ast.NodeVisitor): used_vars = [] funcvars_copy = copy(funcvars) # editing a list while looping over it is dangerous for kvar in funcvars: - if kvar in used_vars + ['particle_dlon', 'particle_dlat', 'particle_ddepth']: - if kvar not in ['particle', 'fieldset', 'time', 'particle_dlon', 'particle_dlat', 'particle_ddepth']: - logger.warning(kvar+" declared in multiple Kernels") + if kvar in used_vars + ["particle_dlon", "particle_dlat", "particle_ddepth"]: + if kvar not in ["particle", "fieldset", "time", "particle_dlon", "particle_dlat", "particle_ddepth"]: + logger.warning(kvar + " declared in multiple Kernels") funcvars_copy.remove(kvar) else: used_vars.append(kvar) @@ -453,9 +459,9 @@ class KernelGenerator(ABC, ast.NodeVisitor): @staticmethod def _check_FieldSamplingArguments(ccode): - if ccode == 'particles': - args = ('time', 'particles->depth[pnum]', 'particles->lat[pnum]', 'particles->lon[pnum]') - elif ccode[-1] == 'particles': + if ccode == "particles": + args = ("time", "particles->depth[pnum]", "particles->lat[pnum]", "particles->lon[pnum]") + elif ccode[-1] == "particles": args = ccode[:-1] else: args = ccode @@ -467,14 +473,16 @@ class KernelGenerator(ABC, ast.NodeVisitor): self.visit(stmt) # Create function declaration and argument list - decl = c.Static(c.DeclSpecifier(c.Value("StatusCode", node.name), spec='inline')) - args = [c.Pointer(c.Value(self.ptype.name + 'p', "particles")), - c.Value("int", "pnum"), - c.Value("double", "time")] + decl = c.Static(c.DeclSpecifier(c.Value("StatusCode", node.name), spec="inline")) + args = [ + c.Pointer(c.Value(self.ptype.name + "p", "particles")), + c.Value("int", "pnum"), + c.Value("double", "time"), + ] for field in self.field_args.values(): args += [c.Pointer(c.Value("CField", "%s" % field.ccode_name))] for field in self.vector_field_args.values(): - for fcomponent in ['U', 'V', 'W']: + for fcomponent in ["U", "V", "W"]: try: f = getattr(field, fcomponent) if f.ccode_name not in self.field_args: @@ -487,14 +495,14 @@ class KernelGenerator(ABC, ast.NodeVisitor): # Create function body as C-code object body = [] - for coord in ['lon', 'lat', 'depth']: + for coord in ["lon", "lat", "depth"]: body += [c.Statement(f"type_coord particle_d{coord} = 0")] body += [c.Statement(f"particles->{coord}[pnum] = particles->{coord}_nextloop[pnum]")] body += [c.Statement("particles->time[pnum] = particles->time_nextloop[pnum]")] body += [stmt.ccode for stmt in node.body] - for coord in ['lon', 'lat', 'depth']: + for coord in ["lon", "lat", "depth"]: body += [c.Statement(f"particles->{coord}_nextloop[pnum] = particles->{coord}[pnum] + particle_d{coord}")] body += [c.Statement("particles->time_nextloop[pnum] = particles->time[pnum] + particles->dt[pnum]")] body += [c.Statement("return particles->state[pnum]")] @@ -515,16 +523,16 @@ class KernelGenerator(ABC, ast.NodeVisitor): elif isinstance(node.args[0], ast.Name): node.ccode = str(c.Statement('printf("%%f\\n", %s)' % (node.args[0].id))) elif isinstance(node.args[0], ast.BinOp): - if hasattr(node.args[0].right, 'ccode'): + if hasattr(node.args[0].right, "ccode"): args = node.args[0].right.ccode - elif hasattr(node.args[0].right, 'id'): + elif hasattr(node.args[0].right, "id"): args = node.args[0].right.id - elif hasattr(node.args[0].right, 'elts'): + elif hasattr(node.args[0].right, "elts"): args = [] for a in node.args[0].right.elts: - if hasattr(a, 'ccode'): + if hasattr(a, "ccode"): args.append(a.ccode) - elif hasattr(a, 'id'): + elif hasattr(a, "id"): args.append(a.id) else: args = [] @@ -541,10 +549,10 @@ class KernelGenerator(ABC, ast.NodeVisitor): else: for a in node.args: self.visit(a) - if a.ccode == 'parcels_customed_Cfunc_pointer_args': + if a.ccode == "parcels_customed_Cfunc_pointer_args": pointer_args = True parcels_customed_Cfunc = True - elif a.ccode == 'parcels_customed_Cfunc': + elif a.ccode == "parcels_customed_Cfunc": parcels_customed_Cfunc = True elif isinstance(a, FieldNode) or isinstance(a, VectorFieldNode): a.ccode = a.obj.ccode_name @@ -555,24 +563,34 @@ class KernelGenerator(ABC, ast.NodeVisitor): ccode_args = ", ".join([a.ccode for a in node.args[pointer_args:]]) try: if isinstance(node.func, str): - node.ccode = node.func + '(' + ccode_args + ')' + node.ccode = node.func + "(" + ccode_args + ")" else: self.visit(node.func) rhs = f"{node.func.ccode}({ccode_args})" if parcels_customed_Cfunc: - node.ccode = str(c.Block([c.Assign("parcels_interp_state", rhs), - c.Assign("particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)"), - c.Statement("CHECKSTATUS_KERNELLOOP(parcels_interp_state)")])) + node.ccode = str( + c.Block( + [ + c.Assign("parcels_interp_state", rhs), + c.Assign( + "particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)" + ), + c.Statement("CHECKSTATUS_KERNELLOOP(parcels_interp_state)"), + ] + ) + ) else: node.ccode = rhs except: - raise RuntimeError("Error in converting Kernel to C. See https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernel-execution for hints and tips") + raise RuntimeError( + "Error in converting Kernel to C. See https://docs.oceanparcels.org/en/latest/examples/tutorial_parcels_structure.html#3.-Kernel-execution for hints and tips" + ) def visit_Name(self, node): """Catches any mention of intrinsic variable names such as 'particle' or 'fieldset' and inserts our placeholder objects.""" - if node.id == 'True': + if node.id == "True": node.id = "1" - if node.id == 'False': + if node.id == "False": node.id = "0" node.ccode = node.id @@ -592,7 +610,7 @@ class KernelGenerator(ABC, ast.NodeVisitor): if isinstance(node.value, ast.List): # Detect in-place initialisation of multi-dimensional arrays tmp_node = node.value - decl = c.Value('float', node.targets[0].id) + decl = c.Value("float", node.targets[0].id) while isinstance(tmp_node, ast.List): decl = c.ArrayOf(decl, len(tmp_node.elts)) if isinstance(tmp_node.elts[0], ast.List): @@ -605,8 +623,9 @@ class KernelGenerator(ABC, ast.NodeVisitor): node.ccode = c.Initializer(decl, node.value.ccode) self.array_vars += [node.targets[0].id] elif isinstance(node.value, ParticleXiYiZiTiAttributeNode): - raise RuntimeError(f"Add index of the grid when using particle.{node.value.attr} " - f"(e.g. particle.{node.value.attr}[0]).") + raise RuntimeError( + f"Add index of the grid when using particle.{node.value.attr} " f"(e.g. particle.{node.value.attr}[0])." + ) else: node.ccode = c.Assign(node.targets[0].ccode, node.value.ccode) @@ -624,7 +643,7 @@ class KernelGenerator(ABC, ast.NodeVisitor): self.visit(b) # field evals are replaced by a tmp variable is added to the stack. # Here it means field evals passes from node.test to node.body. We take it out manually - fieldInTestCount = node.test.ccode.count('parcels_tmpvar') + fieldInTestCount = node.test.ccode.count("parcels_tmpvar") body0 = c.Block([b.ccode for b in node.body[:fieldInTestCount]]) body = c.Block([b.ccode for b in node.body[fieldInTestCount:]]) orelse = c.Block([b.ccode for b in node.orelse]) if len(node.orelse) > 0 else None @@ -633,9 +652,9 @@ class KernelGenerator(ABC, ast.NodeVisitor): def visit_Compare(self, node): self.visit(node.left) - assert (len(node.ops) == 1) + assert len(node.ops) == 1 self.visit(node.ops[0]) - assert (len(node.comparators) == 1) + assert len(node.comparators) == 1 self.visit(node.comparators[0]) node.ccode = f"{node.left.ccode} {node.ops[0].ccode} {node.comparators[0].ccode}" @@ -676,9 +695,11 @@ class KernelGenerator(ABC, ast.NodeVisitor): self.visit(node.op) self.visit(node.right) if isinstance(node.op, ast.BitXor): - raise RuntimeError("JIT kernels do not support the '^' operator.\n" - "Did you intend to use the exponential/power operator? In that case, please use '**'") - elif node.op.ccode == 'pow': # catching '**' pow statements + raise RuntimeError( + "JIT kernels do not support the '^' operator.\n" + "Did you intend to use the exponential/power operator? In that case, please use '**'" + ) + elif node.op.ccode == "pow": # catching '**' pow statements node.ccode = f"pow({node.left.ccode}, {node.right.ccode})" else: node.ccode = f"({node.left.ccode} {node.op.ccode} {node.right.ccode})" @@ -783,15 +804,17 @@ class KernelGenerator(ABC, ast.NodeVisitor): def visit_Return(self, node): self.visit(node.value) - node.ccode = c.Statement('return %s' % node.value.ccode) + node.ccode = c.Statement("return %s" % node.value.ccode) def visit_FieldEvalNode(self, node): self.visit(node.field) self.visit(node.args) args = self._check_FieldSamplingArguments(node.args.ccode) ccode_eval = node.field.obj.ccode_eval(node.var, *args) - stmts = [c.Assign("parcels_interp_state", ccode_eval), - c.Assign("particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)")] + stmts = [ + c.Assign("parcels_interp_state", ccode_eval), + c.Assign("particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)"), + ] if node.convert: ccode_conv = node.field.obj.ccode_convert(*args) @@ -804,22 +827,27 @@ class KernelGenerator(ABC, ast.NodeVisitor): self.visit(node.field) self.visit(node.args) args = self._check_FieldSamplingArguments(node.args.ccode) - ccode_eval = node.field.obj.ccode_eval(node.var, node.var2, node.var3, - node.field.obj.U, node.field.obj.V, node.field.obj.W, *args) - if node.convert and node.field.obj.U.interp_method != 'cgrid_velocity': + ccode_eval = node.field.obj.ccode_eval( + node.var, node.var2, node.var3, node.field.obj.U, node.field.obj.V, node.field.obj.W, *args + ) + if node.convert and node.field.obj.U.interp_method != "cgrid_velocity": ccode_conv1 = node.field.obj.U.ccode_convert(*args) ccode_conv2 = node.field.obj.V.ccode_convert(*args) - statements = [c.Statement(f"{node.var} *= {ccode_conv1}"), - c.Statement(f"{node.var2} *= {ccode_conv2}")] + statements = [c.Statement(f"{node.var} *= {ccode_conv1}"), c.Statement(f"{node.var2} *= {ccode_conv2}")] else: statements = [] - if node.convert and node.field.obj.vector_type == '3D': + if node.convert and node.field.obj.vector_type == "3D": ccode_conv3 = node.field.obj.W.ccode_convert(*args) statements.append(c.Statement(f"{node.var3} *= {ccode_conv3}")) conv_stat = c.Block(statements) - node.ccode = c.Block([c.Assign("parcels_interp_state", ccode_eval), - c.Assign("particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)"), - conv_stat, c.Statement("CHECKSTATUS_KERNELLOOP(parcels_interp_state)")]) + node.ccode = c.Block( + [ + c.Assign("parcels_interp_state", ccode_eval), + c.Assign("particles->state[pnum]", "max(particles->state[pnum], parcels_interp_state)"), + conv_stat, + c.Statement("CHECKSTATUS_KERNELLOOP(parcels_interp_state)"), + ] + ) def visit_NestedFieldEvalNode(self, node): self.visit(node.fields) @@ -830,9 +858,14 @@ class KernelGenerator(ABC, ast.NodeVisitor): ccode_eval = fld.ccode_eval(node.var, *args) ccode_conv = fld.ccode_convert(*args) conv_stat = c.Statement(f"{node.var} *= {ccode_conv}") - cstat += [c.Assign("particles->state[pnum]", ccode_eval), - conv_stat, - c.If("particles->state[pnum] != ERROROUTOFBOUNDS ", c.Block([c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")]))] + cstat += [ + c.Assign("particles->state[pnum]", ccode_eval), + conv_stat, + c.If( + "particles->state[pnum] != ERROROUTOFBOUNDS ", + c.Block([c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")]), + ), + ] cstat += [c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")] node.ccode = c.While("1==1", c.Block(cstat)) @@ -843,31 +876,35 @@ class KernelGenerator(ABC, ast.NodeVisitor): args = self._check_FieldSamplingArguments(node.args.ccode) for fld in node.fields.obj: ccode_eval = fld.ccode_eval(node.var, node.var2, node.var3, fld.U, fld.V, fld.W, *args) - if fld.U.interp_method != 'cgrid_velocity': + if fld.U.interp_method != "cgrid_velocity": ccode_conv1 = fld.U.ccode_convert(*args) ccode_conv2 = fld.V.ccode_convert(*args) - statements = [c.Statement(f"{node.var} *= {ccode_conv1}"), - c.Statement(f"{node.var2} *= {ccode_conv2}")] + statements = [c.Statement(f"{node.var} *= {ccode_conv1}"), c.Statement(f"{node.var2} *= {ccode_conv2}")] else: statements = [] - if fld.vector_type == '3D': + if fld.vector_type == "3D": ccode_conv3 = fld.W.ccode_convert(*args) statements.append(c.Statement(f"{node.var3} *= {ccode_conv3}")) - cstat += [c.Assign("particles->state[pnum]", ccode_eval), - c.Block(statements), - c.If("particles->state[pnum] != ERROROUTOFBOUNDS ", c.Block([c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")]))] + cstat += [ + c.Assign("particles->state[pnum]", ccode_eval), + c.Block(statements), + c.If( + "particles->state[pnum] != ERROROUTOFBOUNDS ", + c.Block([c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")]), + ), + ] cstat += [c.Statement("CHECKSTATUS_KERNELLOOP(particles->state[pnum])"), c.Statement("break")] node.ccode = c.While("1==1", c.Block(cstat)) def visit_Print(self, node): for n in node.values: self.visit(n) - if hasattr(node.values[0], 's'): + if hasattr(node.values[0], "s"): node.ccode = c.Statement('printf("%s\\n")' % (n.ccode)) return - if hasattr(node.values[0], 's_print'): + if hasattr(node.values[0], "s_print"): args = node.values[0].right.ccode - s = ('printf("%s\\n"' % node.values[0].left.ccode) + s = 'printf("%s\\n"' % node.values[0].left.ccode if isinstance(args, str): s = s + (", %s)" % args) else: @@ -876,16 +913,16 @@ class KernelGenerator(ABC, ast.NodeVisitor): s = s + ")" node.ccode = c.Statement(s) return - vars = ', '.join([n.ccode for n in node.values]) - int_vars = ['particle->id', 'particle->xi', 'particle->yi', 'particle->zi'] - stat = ', '.join(["%d" if n.ccode in int_vars else "%f" for n in node.values]) + vars = ", ".join([n.ccode for n in node.values]) + int_vars = ["particle->id", "particle->xi", "particle->yi", "particle->zi"] + stat = ", ".join(["%d" if n.ccode in int_vars else "%f" for n in node.values]) node.ccode = c.Statement(f'printf("{stat}\\n", {vars})') def visit_Constant(self, node): - if node.value == 'parcels_customed_Cfunc_pointer_args': + if node.value == "parcels_customed_Cfunc_pointer_args": node.ccode = node.value elif isinstance(node.value, str): - node.ccode = '' # skip strings from docstrings or comments + node.ccode = "" # skip strings from docstrings or comments elif isinstance(node.value, bool): node.ccode = "1" if node.value is True else "0" else: @@ -902,12 +939,12 @@ class LoopGenerator: def generate(self, funcname, field_args, const_args, kernel_ast, c_include): ccode = [] - pname = self.ptype.name + 'p' + pname = self.ptype.name + "p" # ==== Add include for Parcels and math header ==== # ccode += [str(c.Include("parcels.h", system=False))] ccode += [str(c.Include("math.h", system=False))] - ccode += [str(c.Assign('const int ngrid', str(self.fieldset.gridset.size if self.fieldset is not None else 1)))] + ccode += [str(c.Assign("const int ngrid", str(self.fieldset.gridset.size if self.fieldset is not None else 1)))] # ==== Generate type definition for particle type ==== # vdeclp = [c.Pointer(c.POD(v.dtype, v.name)) for v in self.ptype.variables] @@ -920,15 +957,17 @@ class LoopGenerator: ccode += [str(kernel_ast)] # Generate outer loop for repeated kernel invocation - args = [c.Value("int", "num_particles"), - c.Pointer(c.Value(pname, "particles")), - c.Value("double", "endtime"), c.Value("double", "dt")] + args = [ + c.Value("int", "num_particles"), + c.Pointer(c.Value(pname, "particles")), + c.Value("double", "endtime"), + c.Value("double", "dt"), + ] for field, _ in field_args.items(): args += [c.Pointer(c.Value("CField", "%s" % field))] for const, _ in const_args.items(): args += [c.Value("double", const)] # are we SURE those const's are double's ? - fargs_str = ", ".join(['particles->time_nextloop[pnum]'] + list(field_args.keys()) - + list(const_args.keys())) + fargs_str = ", ".join(["particles->time_nextloop[pnum]"] + list(field_args.keys()) + list(const_args.keys())) # ==== statement clusters use to compose 'body' variable and variables 'time_loop' and 'part_loop' ==== ## sign_dt = c.Assign("sign_dt", "dt > 0 ? 1 : -1") @@ -940,25 +979,46 @@ class LoopGenerator: body += [c.Value("double", "pre_dt")] body += [c.Statement("pre_dt = particles->dt[pnum]")] body += [c.If("sign_dt*particles->time_nextloop[pnum] >= sign_dt*(endtime)", c.Statement("break"))] - body += [c.If(f"fabs(endtime - particles->time_nextloop[pnum]) < fabs(particles->{dtname}[pnum])-1e-6", - c.Statement(f"particles->{dtname}[pnum] = fabs(endtime - particles->time_nextloop[pnum]) * sign_dt"))] + body += [ + c.If( + f"fabs(endtime - particles->time_nextloop[pnum]) < fabs(particles->{dtname}[pnum])-1e-6", + c.Statement(f"particles->{dtname}[pnum] = fabs(endtime - particles->time_nextloop[pnum]) * sign_dt"), + ) + ] body += [c.Assign("particles->state[pnum]", f"{funcname}(particles, pnum, {fargs_str})")] - body += [c.If("particles->state[pnum] == SUCCESS", - c.Block([c.If("sign_dt*particles->time[pnum] < sign_dt*endtime", - c.Block([c.Assign("particles->state[pnum]", "EVALUATE")]), - c.Block([c.Assign("particles->state[pnum]", "SUCCESS")])) - ]))] + body += [ + c.If( + "particles->state[pnum] == SUCCESS", + c.Block( + [ + c.If( + "sign_dt*particles->time[pnum] < sign_dt*endtime", + c.Block([c.Assign("particles->state[pnum]", "EVALUATE")]), + c.Block([c.Assign("particles->state[pnum]", "SUCCESS")]), + ) + ] + ), + ) + ] body += [c.If("particles->state[pnum] == STOPALLEXECUTION", c.Statement("return"))] body += [c.Statement("particles->dt[pnum] = pre_dt")] - body += [c.If("(particles->state[pnum] == REPEAT || particles->state[pnum] == DELETE)", c.Block([c.Statement('break')]))] + body += [ + c.If( + "(particles->state[pnum] == REPEAT || particles->state[pnum] == DELETE)", + c.Block([c.Statement("break")]), + ) + ] time_loop = c.While("(particles->state[pnum] == EVALUATE || particles->state[pnum] == REPEAT)", c.Block(body)) - part_loop = c.For("pnum = 0", "pnum < num_particles", "++pnum", - c.Block([time_loop])) - fbody = c.Block([c.Value("int", "pnum"), - c.Value("double", "sign_dt"), - sign_dt, part_loop, - ]) + part_loop = c.For("pnum = 0", "pnum < num_particles", "++pnum", c.Block([time_loop])) + fbody = c.Block( + [ + c.Value("int", "pnum"), + c.Value("double", "sign_dt"), + sign_dt, + part_loop, + ] + ) fdecl = c.FunctionDeclaration(c.Value("void", "particle_loop"), args) ccode += [str(c.FunctionBody(fdecl, fbody))] return "\n\n".join(ccode) diff --git a/parcels/field.py b/parcels/field.py index 42772225..67ffc659 100644 --- a/parcels/field.py +++ b/parcels/field.py @@ -33,11 +33,11 @@ from .fieldfilebuffer import ( ) from .grid import CGrid, Grid, GridType -__all__ = ['Field', 'VectorField', 'NestedField'] +__all__ = ["Field", "VectorField", "NestedField"] def _isParticle(key): - if hasattr(key, 'obs_written'): + if hasattr(key, "obs_written"): return True else: return False @@ -51,9 +51,9 @@ def _deal_with_errors(error, key, vector_type): else: raise RuntimeError(f"{error}. Error could not be handled because particle was not part of the Field Sampling.") - if vector_type == '3D': + if vector_type == "3D": return (0, 0, 0) - elif vector_type == '2D': + elif vector_type == "2D": return (0, 0) else: return 0 @@ -132,10 +132,30 @@ class Field: * `Nested Fields <../examples/tutorial_NestedFields.ipynb>`__ """ - def __init__(self, name, data, lon=None, lat=None, depth=None, time=None, grid=None, mesh='flat', timestamps=None, - fieldtype=None, transpose=False, vmin=None, vmax=None, cast_data_dtype='float32', time_origin=None, - interp_method='linear', allow_time_extrapolation=None, time_periodic=False, gridindexingtype='nemo', - to_write=False, **kwargs): + def __init__( + self, + name, + data, + lon=None, + lat=None, + depth=None, + time=None, + grid=None, + mesh="flat", + timestamps=None, + fieldtype=None, + transpose=False, + vmin=None, + vmax=None, + cast_data_dtype="float32", + time_origin=None, + interp_method="linear", + allow_time_extrapolation=None, + time_periodic=False, + gridindexingtype="nemo", + to_write=False, + **kwargs, + ): if not isinstance(name, tuple): self.name = name self.filebuffername = name @@ -144,7 +164,9 @@ class Field: self.data = data if grid: if grid.defer_load and isinstance(data, np.ndarray): - raise ValueError('Cannot combine Grid from defer_loaded Field with np.ndarray data. please specify lon, lat, depth and time dimensions separately') + raise ValueError( + "Cannot combine Grid from defer_loaded Field with np.ndarray data. please specify lon, lat, depth and time dimensions separately" + ) self.grid = grid else: if (time is not None) and isinstance(time[0], np.datetime64): @@ -162,9 +184,9 @@ class Field: self.depth = self.grid.depth self.fieldtype = self.name if fieldtype is None else fieldtype self.to_write = to_write - if self.grid.mesh == 'flat' or (self.fieldtype not in unitconverters_map.keys()): + if self.grid.mesh == "flat" or (self.fieldtype not in unitconverters_map.keys()): self.units = UnitConverter() - elif self.grid.mesh == 'spherical': + elif self.grid.mesh == "spherical": self.units = unitconverters_map[self.fieldtype] else: raise ValueError("Unsupported mesh type. Choose either: 'spherical' or 'flat'") @@ -173,13 +195,17 @@ class Field: if self.name in interp_method: self.interp_method = interp_method[self.name] else: - raise RuntimeError(f'interp_method is a dictionary but {name} is not in it') + raise RuntimeError(f"interp_method is a dictionary but {name} is not in it") else: self.interp_method = interp_method self.gridindexingtype = gridindexingtype - if self.interp_method in ['bgrid_velocity', 'bgrid_w_velocity', 'bgrid_tracer'] and \ - self.grid.gtype in [GridType.RectilinearSGrid, GridType.CurvilinearSGrid]: - logger.warning_once('General s-levels are not supported in B-grid. RectilinearSGrid and CurvilinearSGrid can still be used to deal with shaved cells, but the levels must be horizontal.') + if self.interp_method in ["bgrid_velocity", "bgrid_w_velocity", "bgrid_tracer"] and self.grid.gtype in [ + GridType.RectilinearSGrid, + GridType.CurvilinearSGrid, + ]: + logger.warning_once( + "General s-levels are not supported in B-grid. RectilinearSGrid and CurvilinearSGrid can still be used to deal with shaved cells, but the levels must be horizontal." + ) self.fieldset = None if allow_time_extrapolation is None: @@ -189,11 +215,15 @@ class Field: self.time_periodic = time_periodic if self.time_periodic is not False and self.allow_time_extrapolation: - logger.warning_once("allow_time_extrapolation and time_periodic cannot be used together.\n \ - allow_time_extrapolation is set to False") + logger.warning_once( + "allow_time_extrapolation and time_periodic cannot be used together.\n \ + allow_time_extrapolation is set to False" + ) self.allow_time_extrapolation = False if self.time_periodic is True: - raise ValueError("Unsupported time_periodic=True. time_periodic must now be either False or the length of the period (either float in seconds or datetime.timedelta object.") + raise ValueError( + "Unsupported time_periodic=True. time_periodic must now be either False or the length of the period (either float in seconds or datetime.timedelta object." + ) if self.time_periodic is not False: if isinstance(self.time_periodic, datetime.timedelta): self.time_periodic = self.time_periodic.total_seconds() @@ -207,9 +237,9 @@ class Field: self.vmin = vmin self.vmax = vmax self.cast_data_dtype = cast_data_dtype - if self.cast_data_dtype == 'float32': + if self.cast_data_dtype == "float32": self.cast_data_dtype = np.float32 - elif self.cast_data_dtype == 'float64': + elif self.cast_data_dtype == "float64": self.cast_data_dtype = np.float64 if not self.grid.defer_load: @@ -218,11 +248,11 @@ class Field: # Hack around the fact that NaN and ridiculously large values # propagate in SciPy's interpolators lib = np if isinstance(self.data, np.ndarray) else da - self.data[lib.isnan(self.data)] = 0. + self.data[lib.isnan(self.data)] = 0.0 if self.vmin is not None: - self.data[self.data < self.vmin] = 0. + self.data[self.data < self.vmin] = 0.0 if self.vmax is not None: - self.data[self.data > self.vmax] = 0. + self.data[self.data > self.vmax] = 0.0 if self.grid._add_last_periodic_data_timestep: self.data = lib.concatenate((self.data, self.data[:1, :]), axis=0) @@ -230,28 +260,30 @@ class Field: self._scaling_factor = None # Variable names in JIT code - self.dimensions = kwargs.pop('dimensions', None) - self.indices = kwargs.pop('indices', None) - self.dataFiles = kwargs.pop('dataFiles', None) + self.dimensions = kwargs.pop("dimensions", None) + self.indices = kwargs.pop("indices", None) + self.dataFiles = kwargs.pop("dataFiles", None) if self.grid._add_last_periodic_data_timestep and self.dataFiles is not None: self.dataFiles = np.append(self.dataFiles, self.dataFiles[0]) - self._field_fb_class = kwargs.pop('FieldFileBuffer', None) - self.netcdf_engine = kwargs.pop('netcdf_engine', 'netcdf4') - self.netcdf_decodewarning = kwargs.pop('netcdf_decodewarning', True) + self._field_fb_class = kwargs.pop("FieldFileBuffer", None) + self.netcdf_engine = kwargs.pop("netcdf_engine", "netcdf4") + self.netcdf_decodewarning = kwargs.pop("netcdf_decodewarning", True) self.loaded_time_indices = [] - self.creation_log = kwargs.pop('creation_log', '') - self.chunksize = kwargs.pop('chunksize', None) - self.netcdf_chunkdims_name_map = kwargs.pop('chunkdims_name_map', None) - self.grid.depth_field = kwargs.pop('depth_field', None) + self.creation_log = kwargs.pop("creation_log", "") + self.chunksize = kwargs.pop("chunksize", None) + self.netcdf_chunkdims_name_map = kwargs.pop("chunkdims_name_map", None) + self.grid.depth_field = kwargs.pop("depth_field", None) - if self.grid.depth_field == 'not_yet_set': - assert self.grid.z4d, 'Providing the depth dimensions from another field data is only available for 4d S grids' + if self.grid.depth_field == "not_yet_set": + assert ( + self.grid.z4d + ), "Providing the depth dimensions from another field data is only available for 4d S grids" # data_full_zdim is the vertical dimension of the complete field data, ignoring the indices. # (data_full_zdim = grid.zdim if no indices are used, for A- and C-grids and for some B-grids). It is used for the B-grid, # since some datasets do not provide the deeper level of data (which is ignored by the interpolation). - self.data_full_zdim = kwargs.pop('data_full_zdim', None) - self.data_chunks = [] # the data buffer of the FileBuffer raw loaded data - shall be a list of C-contiguous arrays + self.data_full_zdim = kwargs.pop("data_full_zdim", None) + self.data_chunks = [] # the data buffer of the FileBuffer raw loaded data - shall be a list of C-contiguous arrays self.c_data_chunks = [] # C-pointers to the data_chunks array self.nchunks = [] self.chunk_set = False @@ -264,8 +296,7 @@ class Field: if isinstance(filenames, str) or not isinstance(filenames, collections.abc.Iterable): return [filenames] elif isinstance(filenames, dict): - assert dim in filenames.keys(), \ - 'filename dimension keys must be lon, lat, depth or data' + assert dim in filenames.keys(), "filename dimension keys must be lon, lat, depth or data" filename = filenames[dim] if isinstance(filename, str): return [filename] @@ -275,13 +306,16 @@ class Field: return filenames @staticmethod - def collect_timeslices(timestamps, data_filenames, _grid_fb_class, dimensions, indices, netcdf_engine, - netcdf_decodewarning=True): + def collect_timeslices( + timestamps, data_filenames, _grid_fb_class, dimensions, indices, netcdf_engine, netcdf_decodewarning=True + ): if timestamps is not None: dataFiles = [] for findex in range(len(data_filenames)): stamps_in_file = 1 if isinstance(timestamps[findex], (int, np.datetime64)) else len(timestamps[findex]) - for f in [data_filenames[findex], ] * stamps_in_file: + for f in [ + data_filenames[findex], + ] * stamps_in_file: dataFiles.append(f) timeslices = np.array([stamp for file in timestamps for stamp in file]) time = timeslices @@ -289,8 +323,9 @@ class Field: timeslices = [] dataFiles = [] for fname in data_filenames: - with _grid_fb_class(fname, dimensions, indices, netcdf_engine=netcdf_engine, - netcdf_decodewarning=netcdf_decodewarning) as filebuffer: + with _grid_fb_class( + fname, dimensions, indices, netcdf_engine=netcdf_engine, netcdf_decodewarning=netcdf_decodewarning + ) as filebuffer: ftime = filebuffer.time timeslices.append(ftime) dataFiles.append([fname] * len(ftime)) @@ -303,13 +338,26 @@ class Field: if not np.all((time[1:] - time[:-1]) > 0): id_not_ordered = np.where(time[1:] < time[:-1])[0][0] - raise AssertionError(f'Please make sure your netCDF files are ordered in time. First pair of non-ordered files: {dataFiles[id_not_ordered]}, {dataFiles[id_not_ordered + 1]}') + raise AssertionError( + f"Please make sure your netCDF files are ordered in time. First pair of non-ordered files: {dataFiles[id_not_ordered]}, {dataFiles[id_not_ordered + 1]}" + ) return time, time_origin, timeslices, dataFiles @classmethod - def from_netcdf(cls, filenames, variable, dimensions, indices=None, grid=None, - mesh='spherical', timestamps=None, allow_time_extrapolation=None, time_periodic=False, - deferred_load=True, **kwargs): + def from_netcdf( + cls, + filenames, + variable, + dimensions, + indices=None, + grid=None, + mesh="spherical", + timestamps=None, + allow_time_extrapolation=None, + time_periodic=False, + deferred_load=True, + **kwargs, + ): """Create field from netCDF file. Parameters @@ -372,116 +420,138 @@ class Field: # Ensure the timestamps array is compatible with the user-provided datafiles. if timestamps is not None: if isinstance(filenames, list): - assert len(filenames) == len(timestamps), 'Outer dimension of timestamps should correspond to number of files.' + assert len(filenames) == len( + timestamps + ), "Outer dimension of timestamps should correspond to number of files." elif isinstance(filenames, dict): for k in filenames.keys(): - if k not in ['lat', 'lon', 'depth', 'time']: + if k not in ["lat", "lon", "depth", "time"]: if isinstance(filenames[k], list): - assert (len(filenames[k]) == len(timestamps)), 'Outer dimension of timestamps should correspond to number of files.' + assert len(filenames[k]) == len( + timestamps + ), "Outer dimension of timestamps should correspond to number of files." else: - assert len(timestamps) == 1, 'Outer dimension of timestamps should correspond to number of files.' + assert ( + len(timestamps) == 1 + ), "Outer dimension of timestamps should correspond to number of files." for t in timestamps: - assert isinstance(t, (list, np.ndarray)), 'timestamps should be a list for each file' + assert isinstance(t, (list, np.ndarray)), "timestamps should be a list for each file" else: - raise TypeError("Filenames type is inconsistent with manual timestamp provision." - + "Should be dict or list") + raise TypeError( + "Filenames type is inconsistent with manual timestamp provision." + "Should be dict or list" + ) if isinstance(variable, str): # for backward compatibility with Parcels < 2.0.0 variable = (variable, variable) elif isinstance(variable, dict): - assert len(variable) == 1, 'Field.from_netcdf() supports only one variable at a time. Use FieldSet.from_netcdf() for multiple variables.' + assert ( + len(variable) == 1 + ), "Field.from_netcdf() supports only one variable at a time. Use FieldSet.from_netcdf() for multiple variables." variable = tuple(variable.items())[0] - assert len(variable) == 2, 'The variable tuple must have length 2. Use FieldSet.from_netcdf() for multiple variables' + assert ( + len(variable) == 2 + ), "The variable tuple must have length 2. Use FieldSet.from_netcdf() for multiple variables" - data_filenames = cls.get_dim_filenames(filenames, 'data') - lonlat_filename = cls.get_dim_filenames(filenames, 'lon') + data_filenames = cls.get_dim_filenames(filenames, "data") + lonlat_filename = cls.get_dim_filenames(filenames, "lon") if isinstance(filenames, dict): assert len(lonlat_filename) == 1 - if lonlat_filename != cls.get_dim_filenames(filenames, 'lat'): - raise NotImplementedError('longitude and latitude dimensions are currently processed together from one single file') + if lonlat_filename != cls.get_dim_filenames(filenames, "lat"): + raise NotImplementedError( + "longitude and latitude dimensions are currently processed together from one single file" + ) lonlat_filename = lonlat_filename[0] - if 'depth' in dimensions: - depth_filename = cls.get_dim_filenames(filenames, 'depth') + if "depth" in dimensions: + depth_filename = cls.get_dim_filenames(filenames, "depth") if isinstance(filenames, dict) and len(depth_filename) != 1: - raise NotImplementedError('Vertically adaptive meshes not implemented for from_netcdf()') + raise NotImplementedError("Vertically adaptive meshes not implemented for from_netcdf()") depth_filename = depth_filename[0] - netcdf_engine = kwargs.pop('netcdf_engine', 'netcdf4') - netcdf_decodewarning = kwargs.pop('netcdf_decodewarning', True) + netcdf_engine = kwargs.pop("netcdf_engine", "netcdf4") + netcdf_decodewarning = kwargs.pop("netcdf_decodewarning", True) indices = {} if indices is None else indices.copy() for ind in indices: if len(indices[ind]) == 0: - raise RuntimeError(f'Indices for {ind} can not be empty') - assert np.min(indices[ind]) >= 0, \ - ('Negative indices are currently not allowed in Parcels. ' - + 'This is related to the non-increasing dimension it could generate ' - + 'if the domain goes from lon[-4] to lon[6] for example. ' - + 'Please raise an issue on https://github.com/OceanParcels/parcels/issues ' - + 'if you would need such feature implemented.') - - interp_method = kwargs.pop('interp_method', 'linear') + raise RuntimeError(f"Indices for {ind} can not be empty") + assert np.min(indices[ind]) >= 0, ( + "Negative indices are currently not allowed in Parcels. " + + "This is related to the non-increasing dimension it could generate " + + "if the domain goes from lon[-4] to lon[6] for example. " + + "Please raise an issue on https://github.com/OceanParcels/parcels/issues " + + "if you would need such feature implemented." + ) + + interp_method = kwargs.pop("interp_method", "linear") if type(interp_method) is dict: if variable[0] in interp_method: interp_method = interp_method[variable[0]] else: - raise RuntimeError(f'interp_method is a dictionary but {variable[0]} is not in it') + raise RuntimeError(f"interp_method is a dictionary but {variable[0]} is not in it") _grid_fb_class = NetcdfFileBuffer - with _grid_fb_class(lonlat_filename, dimensions, indices, netcdf_engine, - netcdf_decodewarning=netcdf_decodewarning) as filebuffer: + with _grid_fb_class( + lonlat_filename, dimensions, indices, netcdf_engine, netcdf_decodewarning=netcdf_decodewarning + ) as filebuffer: lon, lat = filebuffer.lonlat indices = filebuffer.indices # Check if parcels_mesh has been explicitly set in file - if 'parcels_mesh' in filebuffer.dataset.attrs: - mesh = filebuffer.dataset.attrs['parcels_mesh'] - - if 'depth' in dimensions: - with _grid_fb_class(depth_filename, dimensions, indices, netcdf_engine, interp_method=interp_method, - netcdf_decodewarning=netcdf_decodewarning) as filebuffer: + if "parcels_mesh" in filebuffer.dataset.attrs: + mesh = filebuffer.dataset.attrs["parcels_mesh"] + + if "depth" in dimensions: + with _grid_fb_class( + depth_filename, + dimensions, + indices, + netcdf_engine, + interp_method=interp_method, + netcdf_decodewarning=netcdf_decodewarning, + ) as filebuffer: filebuffer.name = filebuffer.parse_name(variable[1]) - if dimensions['depth'] == 'not_yet_set': + if dimensions["depth"] == "not_yet_set": depth = filebuffer.depth_dimensions - kwargs['depth_field'] = 'not_yet_set' + kwargs["depth_field"] = "not_yet_set" else: depth = filebuffer.depth data_full_zdim = filebuffer.data_full_zdim else: - indices['depth'] = [0] + indices["depth"] = [0] depth = np.zeros(1) data_full_zdim = 1 - kwargs['data_full_zdim'] = data_full_zdim + kwargs["data_full_zdim"] = data_full_zdim - if len(data_filenames) > 1 and 'time' not in dimensions and timestamps is None: - raise RuntimeError('Multiple files given but no time dimension specified') + if len(data_filenames) > 1 and "time" not in dimensions and timestamps is None: + raise RuntimeError("Multiple files given but no time dimension specified") if grid is None: # Concatenate time variable to determine overall dimension # across multiple files - time, time_origin, timeslices, dataFiles = cls.collect_timeslices(timestamps, data_filenames, - _grid_fb_class, dimensions, - indices, netcdf_engine, netcdf_decodewarning) + time, time_origin, timeslices, dataFiles = cls.collect_timeslices( + timestamps, data_filenames, _grid_fb_class, dimensions, indices, netcdf_engine, netcdf_decodewarning + ) grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh) grid.timeslices = timeslices - kwargs['dataFiles'] = dataFiles - elif grid is not None and ('dataFiles' not in kwargs or kwargs['dataFiles'] is None): + kwargs["dataFiles"] = dataFiles + elif grid is not None and ("dataFiles" not in kwargs or kwargs["dataFiles"] is None): # ==== means: the field has a shared grid, but may have different data files, so we need to collect the # ==== correct file time series again. - _, _, _, dataFiles = cls.collect_timeslices(timestamps, data_filenames, _grid_fb_class, - dimensions, indices, netcdf_engine, netcdf_decodewarning) - kwargs['dataFiles'] = dataFiles + _, _, _, dataFiles = cls.collect_timeslices( + timestamps, data_filenames, _grid_fb_class, dimensions, indices, netcdf_engine, netcdf_decodewarning + ) + kwargs["dataFiles"] = dataFiles - chunksize = kwargs.get('chunksize', None) + chunksize = kwargs.get("chunksize", None) grid.chunksize = chunksize - if 'time' in indices: - logger.warning_once('time dimension in indices is not necessary anymore. It is then ignored.') + if "time" in indices: + logger.warning_once("time dimension in indices is not necessary anymore. It is then ignored.") - if 'full_load' in kwargs: # for backward compatibility with Parcels < v2.0.0 - deferred_load = not kwargs['full_load'] + if "full_load" in kwargs: # for backward compatibility with Parcels < v2.0.0 + deferred_load = not kwargs["full_load"] if grid.time.size <= 2 or deferred_load is False: deferred_load = False @@ -495,24 +565,33 @@ class Field: _field_fb_class = DeferredNetcdfFileBuffer else: _field_fb_class = NetcdfFileBuffer - kwargs['FieldFileBuffer'] = _field_fb_class + kwargs["FieldFileBuffer"] = _field_fb_class if not deferred_load: # Pre-allocate data before reading files into buffer data_list = [] ti = 0 for tslice, fname in zip(grid.timeslices, data_filenames): - with _field_fb_class(fname, dimensions, indices, netcdf_engine, - interp_method=interp_method, data_full_zdim=data_full_zdim, - chunksize=chunksize, netcdf_decodewarning=netcdf_decodewarning) as filebuffer: + with _field_fb_class( + fname, + dimensions, + indices, + netcdf_engine, + interp_method=interp_method, + data_full_zdim=data_full_zdim, + chunksize=chunksize, + netcdf_decodewarning=netcdf_decodewarning, + ) as filebuffer: # If Field.from_netcdf is called directly, it may not have a 'data' dimension # In that case, assume that 'name' is the data dimension filebuffer.name = filebuffer.parse_name(variable[1]) buffer_data = filebuffer.data if len(buffer_data.shape) == 4: - errormessage = (f'Field {filebuffer.name} expecting a data shape of [tdim={grid.tdim}, zdim={grid.zdim}, ' - f'ydim={grid.ydim - 2 * grid.meridional_halo}, xdim={grid.xdim - 2 * grid.zonal_halo}] ' - f'but got shape {buffer_data.shape}. Flag transpose=True could help to reorder the data.') + errormessage = ( + f"Field {filebuffer.name} expecting a data shape of [tdim={grid.tdim}, zdim={grid.zdim}, " + f"ydim={grid.ydim - 2 * grid.meridional_halo}, xdim={grid.xdim - 2 * grid.zonal_halo}] " + f"but got shape {buffer_data.shape}. Flag transpose=True could help to reorder the data." + ) assert buffer_data.shape[0] == grid.tdim, errormessage assert buffer_data.shape[2] == grid.ydim - 2 * grid.meridional_halo, errormessage assert buffer_data.shape[3] == grid.xdim - 2 * grid.zonal_halo, errormessage @@ -520,7 +599,7 @@ class Field: if len(buffer_data.shape) == 2: data_list.append(buffer_data.reshape(sum(((len(tslice), 1), buffer_data.shape), ()))) elif len(buffer_data.shape) == 3: - if len(filebuffer.indices['depth']) > 1: + if len(filebuffer.indices["depth"]) > 1: data_list.append(buffer_data.reshape(sum(((1,), buffer_data.shape), ()))) else: if type(tslice) not in [list, np.ndarray, da.Array, xr.DataArray]: @@ -540,20 +619,28 @@ class Field: data.compute_shape(grid.xdim, grid.ydim, grid.zdim, grid.tdim, len(grid.timeslices)) if allow_time_extrapolation is None: - allow_time_extrapolation = False if 'time' in dimensions else True - - kwargs['dimensions'] = dimensions.copy() - kwargs['indices'] = indices - kwargs['time_periodic'] = time_periodic - kwargs['netcdf_engine'] = netcdf_engine - kwargs['netcdf_decodewarning'] = netcdf_decodewarning - - return cls(variable, data, grid=grid, timestamps=timestamps, - allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method, **kwargs) + allow_time_extrapolation = False if "time" in dimensions else True + + kwargs["dimensions"] = dimensions.copy() + kwargs["indices"] = indices + kwargs["time_periodic"] = time_periodic + kwargs["netcdf_engine"] = netcdf_engine + kwargs["netcdf_decodewarning"] = netcdf_decodewarning + + return cls( + variable, + data, + grid=grid, + timestamps=timestamps, + allow_time_extrapolation=allow_time_extrapolation, + interp_method=interp_method, + **kwargs, + ) @classmethod - def from_xarray(cls, da, name, dimensions, mesh='spherical', allow_time_extrapolation=None, - time_periodic=False, **kwargs): + def from_xarray( + cls, da, name, dimensions, mesh="spherical", allow_time_extrapolation=None, time_periodic=False, **kwargs + ): """Create field from xarray Variable. Parameters @@ -582,20 +669,26 @@ class Field: Keyword arguments passed to the :class:`Field` constructor. """ data = da.data - interp_method = kwargs.pop('interp_method', 'linear') + interp_method = kwargs.pop("interp_method", "linear") - time = da[dimensions['time']].values if 'time' in dimensions else np.array([0.]) - depth = da[dimensions['depth']].values if 'depth' in dimensions else np.array([0]) - lon = da[dimensions['lon']].values - lat = da[dimensions['lat']].values + time = da[dimensions["time"]].values if "time" in dimensions else np.array([0.0]) + depth = da[dimensions["depth"]].values if "depth" in dimensions else np.array([0]) + lon = da[dimensions["lon"]].values + lat = da[dimensions["lat"]].values time_origin = TimeConverter(time[0]) time = time_origin.reltime(time) grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh) - kwargs['time_periodic'] = time_periodic - return cls(name, data, grid=grid, allow_time_extrapolation=allow_time_extrapolation, - interp_method=interp_method, **kwargs) + kwargs["time_periodic"] = time_periodic + return cls( + name, + data, + grid=grid, + allow_time_extrapolation=allow_time_extrapolation, + interp_method=interp_method, + **kwargs, + ) def reshape(self, data, transpose=False): # Ensure that field data is the right data type @@ -615,11 +708,15 @@ class Field: data = lib.squeeze(data) # First remove all length-1 dimensions in data, so that we can add them below if self.grid.xdim == 1 and len(data.shape) < 4: if lib == da: - raise NotImplementedError('Length-one dimensions with field chunking not implemented, as dask does not have an `expand_dims` method. Use chunksize=None') + raise NotImplementedError( + "Length-one dimensions with field chunking not implemented, as dask does not have an `expand_dims` method. Use chunksize=None" + ) data = lib.expand_dims(data, axis=-1) if self.grid.ydim == 1 and len(data.shape) < 4: if lib == da: - raise NotImplementedError('Length-one dimensions with field chunking not implemented, as dask does not have an `expand_dims` method. Use chunksize=None') + raise NotImplementedError( + "Length-one dimensions with field chunking not implemented, as dask does not have an `expand_dims` method. Use chunksize=None" + ) data = lib.expand_dims(data, axis=-2) if self.grid.tdim == 1: if len(data.shape) < 4: @@ -628,23 +725,33 @@ class Field: if len(data.shape) == 4: data = data.reshape(sum(((data.shape[0],), data.shape[2:]), ())) if len(data.shape) == 4: - errormessage = (f'Field {self.name} expecting a data shape of [tdim, zdim, ydim, xdim]. ' - 'Flag transpose=True could help to reorder the data.') + errormessage = ( + f"Field {self.name} expecting a data shape of [tdim, zdim, ydim, xdim]. " + "Flag transpose=True could help to reorder the data." + ) assert data.shape[0] == self.grid.tdim, errormessage assert data.shape[2] == self.grid.ydim - 2 * self.grid.meridional_halo, errormessage assert data.shape[3] == self.grid.xdim - 2 * self.grid.zonal_halo, errormessage - if self.gridindexingtype == 'pop': - assert data.shape[1] == self.grid.zdim or data.shape[1] == self.grid.zdim-1, errormessage + if self.gridindexingtype == "pop": + assert data.shape[1] == self.grid.zdim or data.shape[1] == self.grid.zdim - 1, errormessage else: assert data.shape[1] == self.grid.zdim, errormessage else: - assert (data.shape == (self.grid.tdim, - self.grid.ydim - 2 * self.grid.meridional_halo, - self.grid.xdim - 2 * self.grid.zonal_halo)), \ - (f'Field {self.name} expecting a data shape of [tdim, ydim, xdim]. ' - 'Flag transpose=True could help to reorder the data.') + assert data.shape == ( + self.grid.tdim, + self.grid.ydim - 2 * self.grid.meridional_halo, + self.grid.xdim - 2 * self.grid.zonal_halo, + ), ( + f"Field {self.name} expecting a data shape of [tdim, ydim, xdim]. " + "Flag transpose=True could help to reorder the data." + ) if self.grid.meridional_halo > 0 or self.grid.zonal_halo > 0: - data = self.add_periodic_halo(zonal=self.grid.zonal_halo > 0, meridional=self.grid.meridional_halo > 0, halosize=max(self.grid.meridional_halo, self.grid.zonal_halo), data=data) + data = self.add_periodic_halo( + zonal=self.grid.zonal_halo > 0, + meridional=self.grid.meridional_halo > 0, + halosize=max(self.grid.meridional_halo, self.grid.zonal_halo), + data=data, + ) return data def set_scaling_factor(self, factor): @@ -663,7 +770,7 @@ class Field: * `Unit converters <../examples/tutorial_unitconverters.ipynb>`__ """ if self._scaling_factor: - raise NotImplementedError(f'Scaling factor for field {self.name} already defined.') + raise NotImplementedError(f"Scaling factor for field {self.name} already defined.") self._scaling_factor = factor if not self.grid.defer_load: self.data *= factor @@ -688,20 +795,26 @@ class Field: """ if not self.grid.cell_edge_sizes: if self.grid.gtype in (GridType.RectilinearZGrid, GridType.RectilinearSGrid): - self.grid.cell_edge_sizes['x'] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32) - self.grid.cell_edge_sizes['y'] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32) + self.grid.cell_edge_sizes["x"] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32) + self.grid.cell_edge_sizes["y"] = np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32) - x_conv = GeographicPolar() if self.grid.mesh == 'spherical' else UnitConverter() - y_conv = Geographic() if self.grid.mesh == 'spherical' else UnitConverter() + x_conv = GeographicPolar() if self.grid.mesh == "spherical" else UnitConverter() + y_conv = Geographic() if self.grid.mesh == "spherical" else UnitConverter() for y, (lat, dy) in enumerate(zip(self.grid.lat, np.gradient(self.grid.lat))): for x, (lon, dx) in enumerate(zip(self.grid.lon, np.gradient(self.grid.lon))): - self.grid.cell_edge_sizes['x'][y, x] = x_conv.to_source(dx, lon, lat, self.grid.depth[0]) - self.grid.cell_edge_sizes['y'][y, x] = y_conv.to_source(dy, lon, lat, self.grid.depth[0]) + self.grid.cell_edge_sizes["x"][y, x] = x_conv.to_source(dx, lon, lat, self.grid.depth[0]) + self.grid.cell_edge_sizes["y"][y, x] = y_conv.to_source(dy, lon, lat, self.grid.depth[0]) self.cell_edge_sizes = self.grid.cell_edge_sizes else: - logger.error(('Field.cell_edge_sizes() not implemented for ', self.grid.gtype, 'grids.', - 'You can provide Field.grid.cell_edge_sizes yourself', - 'by in e.g. NEMO using the e1u fields etc from the mesh_mask.nc file')) + logger.error( + ( + "Field.cell_edge_sizes() not implemented for ", + self.grid.gtype, + "grids.", + "You can provide Field.grid.cell_edge_sizes yourself", + "by in e.g. NEMO using the e1u fields etc from the mesh_mask.nc file", + ) + ) exit(-1) def cell_areas(self): @@ -711,7 +824,7 @@ class Field: """ if not self.grid.cell_edge_sizes: self.calc_cell_edge_sizes() - return self.grid.cell_edge_sizes['x'] * self.grid.cell_edge_sizes['y'] + return self.grid.cell_edge_sizes["x"] * self.grid.cell_edge_sizes["y"] def search_indices_vertical_z(self, z): grid = self.grid @@ -719,7 +832,7 @@ class Field: if grid.depth[-1] > grid.depth[0]: if z < grid.depth[0]: # Since MOM5 is indexed at cell bottom, allow z at depth[0] - dz where dz = (depth[1] - depth[0]) - if self.gridindexingtype == "mom5" and z > 2*grid.depth[0] - grid.depth[1]: + if self.gridindexingtype == "mom5" and z > 2 * grid.depth[0] - grid.depth[1]: return (-1, z / grid.depth[0]) else: raise FieldOutOfBoundSurfaceError(0, 0, z, field=self) @@ -740,35 +853,41 @@ class Field: zi = len(grid.depth) - 2 else: zi = depth_indices.argmin() - 1 if z <= grid.depth[0] else 0 - zeta = (z-grid.depth[zi]) / (grid.depth[zi+1]-grid.depth[zi]) + zeta = (z - grid.depth[zi]) / (grid.depth[zi + 1] - grid.depth[zi]) return (zi, zeta) def search_indices_vertical_s(self, x, y, z, xi, yi, xsi, eta, ti, time): grid = self.grid - if self.interp_method in ['bgrid_velocity', 'bgrid_w_velocity', 'bgrid_tracer']: + if self.interp_method in ["bgrid_velocity", "bgrid_w_velocity", "bgrid_tracer"]: xsi = 1 eta = 1 if time < grid.time[ti]: ti -= 1 if grid.z4d: - if ti == len(grid.time)-1: - depth_vector = (1-xsi)*(1-eta) * grid.depth[-1, :, yi, xi] + \ - xsi*(1-eta) * grid.depth[-1, :, yi, xi+1] + \ - xsi*eta * grid.depth[-1, :, yi+1, xi+1] + \ - (1-xsi)*eta * grid.depth[-1, :, yi+1, xi] + if ti == len(grid.time) - 1: + depth_vector = ( + (1 - xsi) * (1 - eta) * grid.depth[-1, :, yi, xi] + + xsi * (1 - eta) * grid.depth[-1, :, yi, xi + 1] + + xsi * eta * grid.depth[-1, :, yi + 1, xi + 1] + + (1 - xsi) * eta * grid.depth[-1, :, yi + 1, xi] + ) else: - dv2 = (1-xsi)*(1-eta) * grid.depth[ti:ti+2, :, yi, xi] + \ - xsi*(1-eta) * grid.depth[ti:ti+2, :, yi, xi+1] + \ - xsi*eta * grid.depth[ti:ti+2, :, yi+1, xi+1] + \ - (1-xsi)*eta * grid.depth[ti:ti+2, :, yi+1, xi] - tt = (time-grid.time[ti]) / (grid.time[ti+1]-grid.time[ti]) - assert tt >= 0 and tt <= 1, 'Vertical s grid is being wrongly interpolated in time' - depth_vector = dv2[0, :] * (1-tt) + dv2[1, :] * tt + dv2 = ( + (1 - xsi) * (1 - eta) * grid.depth[ti : ti + 2, :, yi, xi] + + xsi * (1 - eta) * grid.depth[ti : ti + 2, :, yi, xi + 1] + + xsi * eta * grid.depth[ti : ti + 2, :, yi + 1, xi + 1] + + (1 - xsi) * eta * grid.depth[ti : ti + 2, :, yi + 1, xi] + ) + tt = (time - grid.time[ti]) / (grid.time[ti + 1] - grid.time[ti]) + assert tt >= 0 and tt <= 1, "Vertical s grid is being wrongly interpolated in time" + depth_vector = dv2[0, :] * (1 - tt) + dv2[1, :] * tt else: - depth_vector = (1-xsi)*(1-eta) * grid.depth[:, yi, xi] + \ - xsi*(1-eta) * grid.depth[:, yi, xi+1] + \ - xsi*eta * grid.depth[:, yi+1, xi+1] + \ - (1-xsi)*eta * grid.depth[:, yi+1, xi] + depth_vector = ( + (1 - xsi) * (1 - eta) * grid.depth[:, yi, xi] + + xsi * (1 - eta) * grid.depth[:, yi, xi + 1] + + xsi * eta * grid.depth[:, yi + 1, xi + 1] + + (1 - xsi) * eta * grid.depth[:, yi + 1, xi] + ) z = np.float32(z) if depth_vector[-1] > depth_vector[0]: @@ -779,7 +898,7 @@ class Field: zi = depth_indices.argmin() - 1 if z >= depth_vector[0] else 0 if z < depth_vector[zi]: raise FieldOutOfBoundSurfaceError(0, 0, z, field=self) - elif z > depth_vector[zi+1]: + elif z > depth_vector[zi + 1]: raise FieldOutOfBoundError(x, y, z, field=self) else: depth_indices = depth_vector >= z @@ -789,26 +908,26 @@ class Field: zi = depth_indices.argmin() - 1 if z <= depth_vector[0] else 0 if z > depth_vector[zi]: raise FieldOutOfBoundSurfaceError(0, 0, z, field=self) - elif z < depth_vector[zi+1]: + elif z < depth_vector[zi + 1]: raise FieldOutOfBoundError(x, y, z, field=self) - zeta = (z - depth_vector[zi]) / (depth_vector[zi+1]-depth_vector[zi]) + zeta = (z - depth_vector[zi]) / (depth_vector[zi + 1] - depth_vector[zi]) return (zi, zeta) def reconnect_bnd_indices(self, xi, yi, xdim, ydim, sphere_mesh): if xi < 0: if sphere_mesh: - xi = xdim-2 + xi = xdim - 2 else: xi = 0 - if xi > xdim-2: + if xi > xdim - 2: if sphere_mesh: xi = 0 else: - xi = xdim-2 + xi = xdim - 2 if yi < 0: yi = 0 - if yi > ydim-2: - yi = ydim-2 + if yi > ydim - 2: + yi = ydim - 2 if sphere_mesh: xi = xdim - xi return xi, yi @@ -823,24 +942,24 @@ class Field: raise FieldOutOfBoundError(x, y, z, field=self) if grid.xdim > 1: - if grid.mesh != 'spherical': + if grid.mesh != "spherical": lon_index = grid.lon < x if lon_index.all(): xi = len(grid.lon) - 2 else: xi = lon_index.argmin() - 1 if lon_index.any() else 0 - xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi]) + xsi = (x - grid.lon[xi]) / (grid.lon[xi + 1] - grid.lon[xi]) if xsi < 0: xi -= 1 - xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi]) + xsi = (x - grid.lon[xi]) / (grid.lon[xi + 1] - grid.lon[xi]) elif xsi > 1: xi += 1 - xsi = (x-grid.lon[xi]) / (grid.lon[xi+1]-grid.lon[xi]) + xsi = (x - grid.lon[xi]) / (grid.lon[xi + 1] - grid.lon[xi]) else: lon_fixed = grid.lon.copy() indices = lon_fixed >= lon_fixed[0] if not indices.all(): - lon_fixed[indices.argmin():] += 360 + lon_fixed[indices.argmin() :] += 360 if x < lon_fixed[0]: lon_fixed -= 360 @@ -849,13 +968,13 @@ class Field: xi = len(lon_fixed) - 2 else: xi = lon_index.argmin() - 1 if lon_index.any() else 0 - xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi]) + xsi = (x - lon_fixed[xi]) / (lon_fixed[xi + 1] - lon_fixed[xi]) if xsi < 0: xi -= 1 - xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi]) + xsi = (x - lon_fixed[xi]) / (lon_fixed[xi + 1] - lon_fixed[xi]) elif xsi > 1: xi += 1 - xsi = (x-lon_fixed[xi]) / (lon_fixed[xi+1]-lon_fixed[xi]) + xsi = (x - lon_fixed[xi]) / (lon_fixed[xi + 1] - lon_fixed[xi]) else: xi, xsi = -1, 0 @@ -866,13 +985,13 @@ class Field: else: yi = lat_index.argmin() - 1 if lat_index.any() else 0 - eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi]) + eta = (y - grid.lat[yi]) / (grid.lat[yi + 1] - grid.lat[yi]) if eta < 0: yi -= 1 - eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi]) + eta = (y - grid.lat[yi]) / (grid.lat[yi + 1] - grid.lat[yi]) elif eta > 1: yi += 1 - eta = (y-grid.lat[yi]) / (grid.lat[yi+1]-grid.lat[yi]) + eta = (y - grid.lat[yi]) / (grid.lat[yi + 1] - grid.lat[yi]) else: yi, eta = -1, 0 @@ -909,13 +1028,10 @@ class Field: yi = int(self.grid.ydim / 2) - 1 xsi = eta = -1 grid = self.grid - invA = np.array([[1, 0, 0, 0], - [-1, 1, 0, 0], - [-1, 0, 0, 1], - [1, -1, 1, -1]]) + invA = np.array([[1, 0, 0, 0], [-1, 1, 0, 0], [-1, 0, 0, 1], [1, -1, 1, -1]]) maxIterSearch = 1e6 it = 0 - tol = 1.e-10 + tol = 1.0e-10 if not grid.zonal_periodic: if x < grid.lonlat_minmax[0] or x > grid.lonlat_minmax[1]: if grid.lon[0, 0] < grid.lon[0, -1]: @@ -925,52 +1041,52 @@ class Field: if y < grid.lonlat_minmax[2] or y > grid.lonlat_minmax[3]: raise FieldOutOfBoundError(x, y, z, field=self) - while xsi < -tol or xsi > 1+tol or eta < -tol or eta > 1+tol: - px = np.array([grid.lon[yi, xi], grid.lon[yi, xi+1], grid.lon[yi+1, xi+1], grid.lon[yi+1, xi]]) - if grid.mesh == 'spherical': - px[0] = px[0]+360 if px[0] < x-225 else px[0] - px[0] = px[0]-360 if px[0] > x+225 else px[0] - px[1:] = np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:]) - px[1:] = np.where(-px[1:] + px[0] > 180, px[1:]+360, px[1:]) - py = np.array([grid.lat[yi, xi], grid.lat[yi, xi+1], grid.lat[yi+1, xi+1], grid.lat[yi+1, xi]]) + while xsi < -tol or xsi > 1 + tol or eta < -tol or eta > 1 + tol: + px = np.array([grid.lon[yi, xi], grid.lon[yi, xi + 1], grid.lon[yi + 1, xi + 1], grid.lon[yi + 1, xi]]) + if grid.mesh == "spherical": + px[0] = px[0] + 360 if px[0] < x - 225 else px[0] + px[0] = px[0] - 360 if px[0] > x + 225 else px[0] + px[1:] = np.where(px[1:] - px[0] > 180, px[1:] - 360, px[1:]) + px[1:] = np.where(-px[1:] + px[0] > 180, px[1:] + 360, px[1:]) + py = np.array([grid.lat[yi, xi], grid.lat[yi, xi + 1], grid.lat[yi + 1, xi + 1], grid.lat[yi + 1, xi]]) a = np.dot(invA, px) b = np.dot(invA, py) - aa = a[3]*b[2] - a[2]*b[3] - bb = a[3]*b[0] - a[0]*b[3] + a[1]*b[2] - a[2]*b[1] + x*b[3] - y*a[3] - cc = a[1]*b[0] - a[0]*b[1] + x*b[1] - y*a[1] + aa = a[3] * b[2] - a[2] * b[3] + bb = a[3] * b[0] - a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + x * b[3] - y * a[3] + cc = a[1] * b[0] - a[0] * b[1] + x * b[1] - y * a[1] if abs(aa) < 1e-12: # Rectilinear cell, or quasi eta = -cc / bb else: - det2 = bb*bb-4*aa*cc + det2 = bb * bb - 4 * aa * cc if det2 > 0: # so, if det is nan we keep the xsi, eta from previous iter det = np.sqrt(det2) - eta = (-bb+det)/(2*aa) - if abs(a[1]+a[3]*eta) < 1e-12: # this happens when recti cell rotated of 90deg - xsi = ((y-py[0])/(py[1]-py[0]) + (y-py[3])/(py[2]-py[3])) * .5 + eta = (-bb + det) / (2 * aa) + if abs(a[1] + a[3] * eta) < 1e-12: # this happens when recti cell rotated of 90deg + xsi = ((y - py[0]) / (py[1] - py[0]) + (y - py[3]) / (py[2] - py[3])) * 0.5 else: - xsi = (x-a[0]-a[2]*eta) / (a[1]+a[3]*eta) + xsi = (x - a[0] - a[2] * eta) / (a[1] + a[3] * eta) if xsi < 0 and eta < 0 and xi == 0 and yi == 0: raise FieldOutOfBoundError(x, y, 0, field=self) - if xsi > 1 and eta > 1 and xi == grid.xdim-1 and yi == grid.ydim-1: + if xsi > 1 and eta > 1 and xi == grid.xdim - 1 and yi == grid.ydim - 1: raise FieldOutOfBoundError(x, y, 0, field=self) if xsi < -tol: xi -= 1 - elif xsi > 1+tol: + elif xsi > 1 + tol: xi += 1 if eta < -tol: yi -= 1 - elif eta > 1+tol: + elif eta > 1 + tol: yi += 1 (xi, yi) = self.reconnect_bnd_indices(xi, yi, grid.xdim, grid.ydim, grid.mesh) it += 1 if it > maxIterSearch: - print('Correct cell not found after %d iterations' % maxIterSearch) + print("Correct cell not found after %d iterations" % maxIterSearch) raise FieldOutOfBoundError(x, y, 0, field=self) - xsi = max(0., xsi) - eta = max(0., eta) - xsi = min(1., xsi) - eta = min(1., eta) + xsi = max(0.0, xsi) + eta = max(0.0, eta) + xsi = min(1.0, xsi) + eta = min(1.0, eta) if grid.zdim > 1 and not search2D: if grid.gtype == GridType.CurvilinearZGrid: @@ -1002,18 +1118,20 @@ class Field: def interpolator2D(self, ti, z, y, x, particle=None): (xsi, eta, _, xi, yi, _) = self.search_indices(x, y, z, particle=particle) - if self.interp_method == 'nearest': - xii = xi if xsi <= .5 else xi+1 - yii = yi if eta <= .5 else yi+1 + if self.interp_method == "nearest": + xii = xi if xsi <= 0.5 else xi + 1 + yii = yi if eta <= 0.5 else yi + 1 return self.data[ti, yii, xii] - elif self.interp_method in ['linear', 'bgrid_velocity', 'partialslip', 'freeslip']: - val = (1-xsi)*(1-eta) * self.data[ti, yi, xi] + \ - xsi*(1-eta) * self.data[ti, yi, xi+1] + \ - xsi*eta * self.data[ti, yi+1, xi+1] + \ - (1-xsi)*eta * self.data[ti, yi+1, xi] + elif self.interp_method in ["linear", "bgrid_velocity", "partialslip", "freeslip"]: + val = ( + (1 - xsi) * (1 - eta) * self.data[ti, yi, xi] + + xsi * (1 - eta) * self.data[ti, yi, xi + 1] + + xsi * eta * self.data[ti, yi + 1, xi + 1] + + (1 - xsi) * eta * self.data[ti, yi + 1, xi] + ) return val - elif self.interp_method == 'linear_invdist_land_tracer': - land = np.isclose(self.data[ti, yi:yi+2, xi:xi+2], 0.) + elif self.interp_method == "linear_invdist_land_tracer": + land = np.isclose(self.data[ti, yi : yi + 2, xi : xi + 2], 0.0) nb_land = np.sum(land) if nb_land == 4: return 0 @@ -1027,42 +1145,46 @@ class Field: if land[j][i] == 1: # index search led us directly onto land return 0 else: - return self.data[ti, yi+j, xi+i] + return self.data[ti, yi + j, xi + i] elif land[j][i] == 0: - val += self.data[ti, yi+j, xi+i] / distance + val += self.data[ti, yi + j, xi + i] / distance w_sum += 1 / distance return val / w_sum else: - val = (1 - xsi) * (1 - eta) * self.data[ti, yi, xi] + \ - xsi * (1 - eta) * self.data[ti, yi, xi + 1] + \ - xsi * eta * self.data[ti, yi + 1, xi + 1] + \ - (1 - xsi) * eta * self.data[ti, yi + 1, xi] + val = ( + (1 - xsi) * (1 - eta) * self.data[ti, yi, xi] + + xsi * (1 - eta) * self.data[ti, yi, xi + 1] + + xsi * eta * self.data[ti, yi + 1, xi + 1] + + (1 - xsi) * eta * self.data[ti, yi + 1, xi] + ) return val - elif self.interp_method in ['cgrid_tracer', 'bgrid_tracer']: - return self.data[ti, yi+1, xi+1] - elif self.interp_method == 'cgrid_velocity': - raise RuntimeError(f"{self.name} is a scalar field. cgrid_velocity interpolation method should be used for vector fields (e.g. FieldSet.UV)") + elif self.interp_method in ["cgrid_tracer", "bgrid_tracer"]: + return self.data[ti, yi + 1, xi + 1] + elif self.interp_method == "cgrid_velocity": + raise RuntimeError( + f"{self.name} is a scalar field. cgrid_velocity interpolation method should be used for vector fields (e.g. FieldSet.UV)" + ) else: - raise RuntimeError(self.interp_method+" is not implemented for 2D grids") + raise RuntimeError(self.interp_method + " is not implemented for 2D grids") def interpolator3D(self, ti, z, y, x, time, particle=None): (xsi, eta, zeta, xi, yi, zi) = self.search_indices(x, y, z, ti, time, particle=particle) - if self.interp_method == 'nearest': - xii = xi if xsi <= .5 else xi+1 - yii = yi if eta <= .5 else yi+1 - zii = zi if zeta <= .5 else zi+1 + if self.interp_method == "nearest": + xii = xi if xsi <= 0.5 else xi + 1 + yii = yi if eta <= 0.5 else yi + 1 + zii = zi if zeta <= 0.5 else zi + 1 return self.data[ti, zii, yii, xii] - elif self.interp_method == 'cgrid_velocity': + elif self.interp_method == "cgrid_velocity": # evaluating W velocity in c_grid - if self.gridindexingtype == 'nemo': - f0 = self.data[ti, zi, yi+1, xi+1] - f1 = self.data[ti, zi+1, yi+1, xi+1] - elif self.gridindexingtype == 'mitgcm': + if self.gridindexingtype == "nemo": + f0 = self.data[ti, zi, yi + 1, xi + 1] + f1 = self.data[ti, zi + 1, yi + 1, xi + 1] + elif self.gridindexingtype == "mitgcm": f0 = self.data[ti, zi, yi, xi] - f1 = self.data[ti, zi+1, yi, xi] - return (1-zeta) * f0 + zeta * f1 - elif self.interp_method == 'linear_invdist_land_tracer': - land = np.isclose(self.data[ti, zi:zi+2, yi:yi+2, xi:xi+2], 0.) + f1 = self.data[ti, zi + 1, yi, xi] + return (1 - zeta) * f0 + zeta * f1 + elif self.interp_method == "linear_invdist_land_tracer": + land = np.isclose(self.data[ti, zi : zi + 2, yi : yi + 2, xi : xi + 2], 0.0) nb_land = np.sum(land) if nb_land == 8: return 0 @@ -1077,54 +1199,62 @@ class Field: if land[k][j][i] == 1: # index search led us directly onto land return 0 else: - return self.data[ti, zi+k, yi+j, xi+i] + return self.data[ti, zi + k, yi + j, xi + i] elif land[k][j][i] == 0: - val += self.data[ti, zi+k, yi+j, xi+i] / distance + val += self.data[ti, zi + k, yi + j, xi + i] / distance w_sum += 1 / distance return val / w_sum else: data = self.data[ti, zi, :, :] - f0 = (1 - xsi) * (1 - eta) * data[yi, xi] + \ - xsi * (1 - eta) * data[yi, xi + 1] + \ - xsi * eta * data[yi + 1, xi + 1] + \ - (1 - xsi) * eta * data[yi + 1, xi] + f0 = ( + (1 - xsi) * (1 - eta) * data[yi, xi] + + xsi * (1 - eta) * data[yi, xi + 1] + + xsi * eta * data[yi + 1, xi + 1] + + (1 - xsi) * eta * data[yi + 1, xi] + ) data = self.data[ti, zi + 1, :, :] - f1 = (1 - xsi) * (1 - eta) * data[yi, xi] + \ - xsi * (1 - eta) * data[yi, xi + 1] + \ - xsi * eta * data[yi + 1, xi + 1] + \ - (1 - xsi) * eta * data[yi + 1, xi] + f1 = ( + (1 - xsi) * (1 - eta) * data[yi, xi] + + xsi * (1 - eta) * data[yi, xi + 1] + + xsi * eta * data[yi + 1, xi + 1] + + (1 - xsi) * eta * data[yi + 1, xi] + ) return (1 - zeta) * f0 + zeta * f1 - elif self.interp_method in ['linear', 'bgrid_velocity', 'bgrid_w_velocity', 'partialslip', 'freeslip']: - if self.interp_method == 'bgrid_velocity': - if self.gridindexingtype == 'mom5': - zeta = 1. + elif self.interp_method in ["linear", "bgrid_velocity", "bgrid_w_velocity", "partialslip", "freeslip"]: + if self.interp_method == "bgrid_velocity": + if self.gridindexingtype == "mom5": + zeta = 1.0 else: - zeta = 0. - elif self.interp_method == 'bgrid_w_velocity': - eta = 1. - xsi = 1. + zeta = 0.0 + elif self.interp_method == "bgrid_w_velocity": + eta = 1.0 + xsi = 1.0 data = self.data[ti, zi, :, :] - f0 = (1-xsi)*(1-eta) * data[yi, xi] + \ - xsi*(1-eta) * data[yi, xi+1] + \ - xsi*eta * data[yi+1, xi+1] + \ - (1-xsi)*eta * data[yi+1, xi] - if self.gridindexingtype == 'pop' and zi >= self.grid.zdim-2: + f0 = ( + (1 - xsi) * (1 - eta) * data[yi, xi] + + xsi * (1 - eta) * data[yi, xi + 1] + + xsi * eta * data[yi + 1, xi + 1] + + (1 - xsi) * eta * data[yi + 1, xi] + ) + if self.gridindexingtype == "pop" and zi >= self.grid.zdim - 2: # Since POP is indexed at cell top, allow linear interpolation of W to zero in lowest cell - return (1-zeta) * f0 - data = self.data[ti, zi+1, :, :] - f1 = (1-xsi)*(1-eta) * data[yi, xi] + \ - xsi*(1-eta) * data[yi, xi+1] + \ - xsi*eta * data[yi+1, xi+1] + \ - (1-xsi)*eta * data[yi+1, xi] - if self.interp_method == 'bgrid_w_velocity' and self.gridindexingtype == 'mom5' and zi == -1: + return (1 - zeta) * f0 + data = self.data[ti, zi + 1, :, :] + f1 = ( + (1 - xsi) * (1 - eta) * data[yi, xi] + + xsi * (1 - eta) * data[yi, xi + 1] + + xsi * eta * data[yi + 1, xi + 1] + + (1 - xsi) * eta * data[yi + 1, xi] + ) + if self.interp_method == "bgrid_w_velocity" and self.gridindexingtype == "mom5" and zi == -1: # Since MOM5 is indexed at cell bottom, allow linear interpolation of W to zero in uppermost cell return zeta * f1 else: - return (1-zeta) * f0 + zeta * f1 - elif self.interp_method in ['cgrid_tracer', 'bgrid_tracer']: - return self.data[ti, zi, yi+1, xi+1] + return (1 - zeta) * f0 + zeta * f1 + elif self.interp_method in ["cgrid_tracer", "bgrid_tracer"]: + return self.data[ti, zi, yi + 1, xi + 1] else: - raise RuntimeError(self.interp_method+" is not implemented for 3D grids") + raise RuntimeError(self.interp_method + " is not implemented for 3D grids") def temporal_interpolate_fullfield(self, ti, time): """Calculate the data of a field between two snapshots using linear interpolation. @@ -1139,12 +1269,12 @@ class Field: t0 = self.grid.time[ti] if time == t0: return self.data[ti, :] - elif ti+1 >= len(self.grid.time): - raise TimeExtrapolationError(time, field=self, msg='show_time') + elif ti + 1 >= len(self.grid.time): + raise TimeExtrapolationError(time, field=self, msg="show_time") else: - t1 = self.grid.time[ti+1] + t1 = self.grid.time[ti + 1] f0 = self.data[ti, :] - f1 = self.data[ti+1, :] + f1 = self.data[ti + 1, :] return f0 + (f1 - f0) * ((time - t0) / (t1 - t0)) def spatial_interpolation(self, ti, z, y, x, time, particle=None): @@ -1167,17 +1297,23 @@ class Field: Note that we normalize to either the first or the last index if the sampled value is outside the time value range. """ - if not self.time_periodic and not self.allow_time_extrapolation and (time < self.grid.time[0] or time > self.grid.time[-1]): + if ( + not self.time_periodic + and not self.allow_time_extrapolation + and (time < self.grid.time[0] or time > self.grid.time[-1]) + ): raise TimeExtrapolationError(time, field=self) time_index = self.grid.time <= time if self.time_periodic: if time_index.all() or np.logical_not(time_index).all(): - periods = int(math.floor((time-self.grid.time_full[0])/(self.grid.time_full[-1]-self.grid.time_full[0]))) + periods = int( + math.floor((time - self.grid.time_full[0]) / (self.grid.time_full[-1] - self.grid.time_full[0])) + ) if isinstance(self.grid.periods, c_int): self.grid.periods.value = periods else: self.grid.periods = periods - time -= periods*(self.grid.time_full[-1]-self.grid.time_full[0]) + time -= periods * (self.grid.time_full[-1] - self.grid.time_full[0]) time_index = self.grid.time <= time ti = time_index.argmin() - 1 if time_index.any() else 0 return (ti, periods) @@ -1194,8 +1330,10 @@ class Field: return (time_index.argmin() - 1 if time_index.any() else 0, 0) def _check_velocitysampling(self): - if self.name in ['U', 'V', 'W']: - logger.warning_once("Sampling of velocities should normally be done using fieldset.UV or fieldset.UVW object; tread carefully") + if self.name in ["U", "V", "W"]: + logger.warning_once( + "Sampling of velocities should normally be done using fieldset.UV or fieldset.UVW object; tread carefully" + ) def __getitem__(self, key): self._check_velocitysampling() @@ -1215,8 +1353,8 @@ class Field: scipy.interpolate to perform spatial interpolation. """ (ti, periods) = self.time_index(time) - time -= periods*(self.grid.time_full[-1]-self.grid.time_full[0]) - if ti < self.grid.tdim-1 and time > self.grid.time[ti]: + time -= periods * (self.grid.time_full[-1] - self.grid.time_full[0]) + if ti < self.grid.tdim - 1 and time > self.grid.time[ti]: f0 = self.spatial_interpolation(ti, z, y, x, time, particle=particle) f1 = self.spatial_interpolation(ti + 1, z, y, x, time, particle=particle) t0 = self.grid.time[ti] @@ -1266,10 +1404,14 @@ class Field: self.data_chunks = [None] * npartitions self.c_data_chunks = [None] * npartitions - self.grid.load_chunk = np.zeros(npartitions, dtype=c_int, order='C') + self.grid.load_chunk = np.zeros(npartitions, dtype=c_int, order="C") # self.grid.chunk_info format: number of dimensions (without tdim); number of chunks per dimensions; # chunksizes (the 0th dim sizes for all chunk of dim[0], then so on for next dims - self.grid.chunk_info = [[len(self.nchunks)-1], list(self.nchunks[1:]), sum(list(list(ci) for ci in chunks[1:]), [])] # noqa: RUF017 # TODO: Perhaps avoid quadratic list summation here + self.grid.chunk_info = [ + [len(self.nchunks) - 1], + list(self.nchunks[1:]), + sum(list(list(ci) for ci in chunks[1:]), []), + ] # noqa: RUF017 # TODO: Perhaps avoid quadratic list summation here self.grid.chunk_info = sum(self.grid.chunk_info, []) # noqa: RUF017 self.chunk_set = True @@ -1279,10 +1421,13 @@ class Field: g = self.grid if isinstance(self.data, da.core.Array): for block_id in range(len(self.grid.load_chunk)): - if g.load_chunk[block_id] == g.chunk_loading_requested \ - or g.load_chunk[block_id] in g.chunk_loaded and self.data_chunks[block_id] is None: + if ( + g.load_chunk[block_id] == g.chunk_loading_requested + or g.load_chunk[block_id] in g.chunk_loaded + and self.data_chunks[block_id] is None + ): block = self.get_block(block_id) - self.data_chunks[block_id] = np.array(self.data.blocks[(slice(self.grid.tdim),) + block], order='C') + self.data_chunks[block_id] = np.array(self.data.blocks[(slice(self.grid.tdim),) + block], order="C") elif g.load_chunk[block_id] == g.chunk_not_loaded: if isinstance(self.data_chunks, list): self.data_chunks[block_id] = None @@ -1296,37 +1441,52 @@ class Field: self.data_chunks[0, :] = None self.c_data_chunks[0] = None self.grid.load_chunk[0] = g.chunk_loaded_touched - self.data_chunks[0] = np.array(self.data, order='C') + self.data_chunks[0] = np.array(self.data, order="C") @property def ctypes_struct(self): """Returns a ctypes struct object containing all relevant pointers and sizes for this field.""" + # Ctypes struct corresponding to the type definition in parcels.h class CField(Structure): - _fields_ = [('xdim', c_int), ('ydim', c_int), ('zdim', c_int), - ('tdim', c_int), ('igrid', c_int), - ('allow_time_extrapolation', c_int), - ('time_periodic', c_int), - ('data_chunks', POINTER(POINTER(POINTER(c_float)))), - ('grid', POINTER(CGrid))] + _fields_ = [ + ("xdim", c_int), + ("ydim", c_int), + ("zdim", c_int), + ("tdim", c_int), + ("igrid", c_int), + ("allow_time_extrapolation", c_int), + ("time_periodic", c_int), + ("data_chunks", POINTER(POINTER(POINTER(c_float)))), + ("grid", POINTER(CGrid)), + ] # Create and populate the c-struct object allow_time_extrapolation = 1 if self.allow_time_extrapolation else 0 time_periodic = 1 if self.time_periodic else 0 for i in range(len(self.grid.load_chunk)): if self.grid.load_chunk[i] == self.grid.chunk_loading_requested: - raise ValueError('data_chunks should have been loaded by now if requested. grid.load_chunk[bid] cannot be 1') + raise ValueError( + "data_chunks should have been loaded by now if requested. grid.load_chunk[bid] cannot be 1" + ) if self.grid.load_chunk[i] in self.grid.chunk_loaded: - if not self.data_chunks[i].flags['C_CONTIGUOUS']: - self.data_chunks[i] = np.array(self.data_chunks[i], order='C') + if not self.data_chunks[i].flags["C_CONTIGUOUS"]: + self.data_chunks[i] = np.array(self.data_chunks[i], order="C") self.c_data_chunks[i] = self.data_chunks[i].ctypes.data_as(POINTER(POINTER(c_float))) else: self.c_data_chunks[i] = None - cstruct = CField(self.grid.xdim, self.grid.ydim, self.grid.zdim, - self.grid.tdim, self.igrid, allow_time_extrapolation, time_periodic, - (POINTER(POINTER(c_float)) * len(self.c_data_chunks))(*self.c_data_chunks), - pointer(self.grid.ctypes_struct)) + cstruct = CField( + self.grid.xdim, + self.grid.ydim, + self.grid.zdim, + self.grid.tdim, + self.igrid, + allow_time_extrapolation, + time_periodic, + (POINTER(POINTER(c_float)) * len(self.c_data_chunks))(*self.c_data_chunks), + pointer(self.grid.ctypes_struct), + ) return cstruct def add_periodic_halo(self, zonal, meridional, halosize=5, data=None): @@ -1357,23 +1517,23 @@ class Field: lib = np if isinstance(data, np.ndarray) else da if zonal: if len(data.shape) == 3: - data = lib.concatenate((data[:, :, -halosize:], data, - data[:, :, 0:halosize]), axis=len(data.shape)-1) + data = lib.concatenate((data[:, :, -halosize:], data, data[:, :, 0:halosize]), axis=len(data.shape) - 1) assert data.shape[2] == self.grid.xdim, "Third dim must be x." else: - data = lib.concatenate((data[:, :, :, -halosize:], data, - data[:, :, :, 0:halosize]), axis=len(data.shape) - 1) + data = lib.concatenate( + (data[:, :, :, -halosize:], data, data[:, :, :, 0:halosize]), axis=len(data.shape) - 1 + ) assert data.shape[3] == self.grid.xdim, "Fourth dim must be x." self.lon = self.grid.lon self.lat = self.grid.lat if meridional: if len(data.shape) == 3: - data = lib.concatenate((data[:, -halosize:, :], data, - data[:, 0:halosize, :]), axis=len(data.shape)-2) + data = lib.concatenate((data[:, -halosize:, :], data, data[:, 0:halosize, :]), axis=len(data.shape) - 2) assert data.shape[1] == self.grid.ydim, "Second dim must be y." else: - data = lib.concatenate((data[:, :, -halosize:, :], data, - data[:, :, 0:halosize, :]), axis=len(data.shape) - 2) + data = lib.concatenate( + (data[:, :, -halosize:, :], data, data[:, :, 0:halosize, :]), axis=len(data.shape) - 2 + ) assert data.shape[2] == self.grid.ydim, "Third dim must be y." self.lat = self.grid.lat if dataNone: @@ -1391,39 +1551,42 @@ class Field: varname : str Name of the field, to be appended to the filename. (Default value = None) """ - filepath = str(Path(f'{filename}{self.name}.nc')) + filepath = str(Path(f"{filename}{self.name}.nc")) if varname is None: varname = self.name # Derive name of 'depth' variable for NEMO convention - vname_depth = 'depth%s' % self.name.lower() + vname_depth = "depth%s" % self.name.lower() # Create DataArray objects for file I/O if self.grid.gtype == GridType.RectilinearZGrid: - nav_lon = xr.DataArray(self.grid.lon + np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32), - coords=[('y', self.grid.lat), ('x', self.grid.lon)]) - nav_lat = xr.DataArray(self.grid.lat.reshape(self.grid.ydim, 1) + np.zeros(self.grid.xdim, dtype=np.float32), - coords=[('y', self.grid.lat), ('x', self.grid.lon)]) + nav_lon = xr.DataArray( + self.grid.lon + np.zeros((self.grid.ydim, self.grid.xdim), dtype=np.float32), + coords=[("y", self.grid.lat), ("x", self.grid.lon)], + ) + nav_lat = xr.DataArray( + self.grid.lat.reshape(self.grid.ydim, 1) + np.zeros(self.grid.xdim, dtype=np.float32), + coords=[("y", self.grid.lat), ("x", self.grid.lon)], + ) elif self.grid.gtype == GridType.CurvilinearZGrid: - nav_lon = xr.DataArray(self.grid.lon, coords=[('y', range(self.grid.ydim)), - ('x', range(self.grid.xdim))]) - nav_lat = xr.DataArray(self.grid.lat, coords=[('y', range(self.grid.ydim)), - ('x', range(self.grid.xdim))]) + nav_lon = xr.DataArray(self.grid.lon, coords=[("y", range(self.grid.ydim)), ("x", range(self.grid.xdim))]) + nav_lat = xr.DataArray(self.grid.lat, coords=[("y", range(self.grid.ydim)), ("x", range(self.grid.xdim))]) else: - raise NotImplementedError('Field.write only implemented for RectilinearZGrid and CurvilinearZGrid') - - attrs = {'units': 'seconds since ' + str(self.grid.time_origin)} if self.grid.time_origin.calendar else {} - time_counter = xr.DataArray(self.grid.time, - dims=['time_counter'], - attrs=attrs) - vardata = xr.DataArray(self.data.reshape((self.grid.tdim, self.grid.zdim, self.grid.ydim, self.grid.xdim)), - dims=['time_counter', vname_depth, 'y', 'x']) + raise NotImplementedError("Field.write only implemented for RectilinearZGrid and CurvilinearZGrid") + + attrs = {"units": "seconds since " + str(self.grid.time_origin)} if self.grid.time_origin.calendar else {} + time_counter = xr.DataArray(self.grid.time, dims=["time_counter"], attrs=attrs) + vardata = xr.DataArray( + self.data.reshape((self.grid.tdim, self.grid.zdim, self.grid.ydim, self.grid.xdim)), + dims=["time_counter", vname_depth, "y", "x"], + ) # Create xarray Dataset and output to netCDF format - attrs = {'parcels_mesh': self.grid.mesh} - dset = xr.Dataset({varname: vardata}, coords={'nav_lon': nav_lon, - 'nav_lat': nav_lat, - 'time_counter': time_counter, - vname_depth: self.grid.depth}, attrs=attrs) - dset.to_netcdf(filepath, unlimited_dims='time_counter') + attrs = {"parcels_mesh": self.grid.mesh} + dset = xr.Dataset( + {varname: vardata}, + coords={"nav_lon": nav_lon, "nav_lat": nav_lat, "time_counter": time_counter, vname_depth: self.grid.depth}, + attrs=attrs, + ) + dset.to_netcdf(filepath, unlimited_dims="time_counter") def rescale_and_set_minmax(self, data): data[np.isnan(data)] = 0 @@ -1443,7 +1606,7 @@ class Field: del data[tindex] lib = np if isinstance(data, np.ndarray) else da if tindex == 0: - data = lib.concatenate([data_to_concat, data[tindex+1:, :]], axis=0) + data = lib.concatenate([data_to_concat, data[tindex + 1 :, :]], axis=0) elif tindex == 1: data = lib.concatenate([data[:tindex, :], data_to_concat], axis=0) else: @@ -1462,29 +1625,46 @@ class Field: timestamp = self.timestamps[np.where(ti < summedlen)[0][0]] rechunk_callback_fields = self.chunk_setup if isinstance(tindex, list) else None - filebuffer = self._field_fb_class(self.dataFiles[g.ti + tindex], self.dimensions, self.indices, - netcdf_engine=self.netcdf_engine, timestamp=timestamp, - interp_method=self.interp_method, - data_full_zdim=self.data_full_zdim, - chunksize=self.chunksize, - cast_data_dtype=self.cast_data_dtype, - rechunk_callback_fields=rechunk_callback_fields, - chunkdims_name_map=self.netcdf_chunkdims_name_map, - netcdf_decodewarning=self.netcdf_decodewarning) + filebuffer = self._field_fb_class( + self.dataFiles[g.ti + tindex], + self.dimensions, + self.indices, + netcdf_engine=self.netcdf_engine, + timestamp=timestamp, + interp_method=self.interp_method, + data_full_zdim=self.data_full_zdim, + chunksize=self.chunksize, + cast_data_dtype=self.cast_data_dtype, + rechunk_callback_fields=rechunk_callback_fields, + chunkdims_name_map=self.netcdf_chunkdims_name_map, + netcdf_decodewarning=self.netcdf_decodewarning, + ) filebuffer.__enter__() time_data = filebuffer.time time_data = g.time_origin.reltime(time_data) filebuffer.ti = (time_data <= g.time[tindex]).argmin() - 1 - if self.netcdf_engine != 'xarray': + if self.netcdf_engine != "xarray": filebuffer.name = filebuffer.parse_name(self.filebuffername) buffer_data = filebuffer.data lib = np if isinstance(buffer_data, np.ndarray) else da if len(buffer_data.shape) == 2: buffer_data = lib.reshape(buffer_data, sum(((1, 1), buffer_data.shape), ())) elif len(buffer_data.shape) == 3 and g.zdim > 1: - buffer_data = lib.reshape(buffer_data, sum(((1, ), buffer_data.shape), ())) + buffer_data = lib.reshape(buffer_data, sum(((1,), buffer_data.shape), ())) elif len(buffer_data.shape) == 3: - buffer_data = lib.reshape(buffer_data, sum(((buffer_data.shape[0], 1, ), buffer_data.shape[1:]), ())) + buffer_data = lib.reshape( + buffer_data, + sum( + ( + ( + buffer_data.shape[0], + 1, + ), + buffer_data.shape[1:], + ), + (), + ), + ) data = self.data_concatenate(data, buffer_data, tindex) self.filebuffers[tindex] = filebuffer return data @@ -1511,41 +1691,41 @@ class VectorField: self.U = U self.V = V self.W = W - self.vector_type = '3D' if W else '2D' + self.vector_type = "3D" if W else "2D" self.gridindexingtype = U.gridindexingtype - if self.U.interp_method == 'cgrid_velocity': - assert self.V.interp_method == 'cgrid_velocity', ( - 'Interpolation methods of U and V are not the same.') - assert self._check_grid_dimensions(U.grid, V.grid), ( - 'Dimensions of U and V are not the same.') - if self.vector_type == '3D': - assert self.W.interp_method == 'cgrid_velocity', ( - 'Interpolation methods of U and W are not the same.') - assert self._check_grid_dimensions(U.grid, W.grid), ( - 'Dimensions of U and W are not the same.') + if self.U.interp_method == "cgrid_velocity": + assert self.V.interp_method == "cgrid_velocity", "Interpolation methods of U and V are not the same." + assert self._check_grid_dimensions(U.grid, V.grid), "Dimensions of U and V are not the same." + if self.vector_type == "3D": + assert self.W.interp_method == "cgrid_velocity", "Interpolation methods of U and W are not the same." + assert self._check_grid_dimensions(U.grid, W.grid), "Dimensions of U and W are not the same." @staticmethod def _check_grid_dimensions(grid1, grid2): - return (np.allclose(grid1.lon, grid2.lon) and np.allclose(grid1.lat, grid2.lat) - and np.allclose(grid1.depth, grid2.depth) and np.allclose(grid1.time_full, grid2.time_full)) + return ( + np.allclose(grid1.lon, grid2.lon) + and np.allclose(grid1.lat, grid2.lat) + and np.allclose(grid1.depth, grid2.depth) + and np.allclose(grid1.time_full, grid2.time_full) + ) def dist(self, lon1, lon2, lat1, lat2, mesh, lat): - if mesh == 'spherical': - rad = np.pi/180. - deg2m = 1852 * 60. - return np.sqrt(((lon2-lon1)*deg2m*math.cos(rad * lat))**2 + ((lat2-lat1)*deg2m)**2) + if mesh == "spherical": + rad = np.pi / 180.0 + deg2m = 1852 * 60.0 + return np.sqrt(((lon2 - lon1) * deg2m * math.cos(rad * lat)) ** 2 + ((lat2 - lat1) * deg2m) ** 2) else: - return np.sqrt((lon2-lon1)**2 + (lat2-lat1)**2) + return np.sqrt((lon2 - lon1) ** 2 + (lat2 - lat1) ** 2) def jacobian(self, xsi, eta, px, py): - dphidxsi = [eta-1, 1-eta, eta, -eta] - dphideta = [xsi-1, -xsi, xsi, 1-xsi] + dphidxsi = [eta - 1, 1 - eta, eta, -eta] + dphideta = [xsi - 1, -xsi, xsi, 1 - xsi] dxdxsi = np.dot(px, dphidxsi) dxdeta = np.dot(px, dphideta) dydxsi = np.dot(py, dphidxsi) dydeta = np.dot(py, dphideta) - jac = dxdxsi*dydeta - dxdeta*dydxsi + jac = dxdxsi * dydeta - dxdeta * dydxsi return jac def spatial_c_grid_interpolation2D(self, ti, z, y, x, time, particle=None, applyConversion=True): @@ -1553,64 +1733,68 @@ class VectorField: (xsi, eta, zeta, xi, yi, zi) = self.U.search_indices(x, y, z, ti, time, particle=particle) if grid.gtype in [GridType.RectilinearSGrid, GridType.RectilinearZGrid]: - px = np.array([grid.lon[xi], grid.lon[xi+1], grid.lon[xi+1], grid.lon[xi]]) - py = np.array([grid.lat[yi], grid.lat[yi], grid.lat[yi+1], grid.lat[yi+1]]) + px = np.array([grid.lon[xi], grid.lon[xi + 1], grid.lon[xi + 1], grid.lon[xi]]) + py = np.array([grid.lat[yi], grid.lat[yi], grid.lat[yi + 1], grid.lat[yi + 1]]) else: - px = np.array([grid.lon[yi, xi], grid.lon[yi, xi+1], grid.lon[yi+1, xi+1], grid.lon[yi+1, xi]]) - py = np.array([grid.lat[yi, xi], grid.lat[yi, xi+1], grid.lat[yi+1, xi+1], grid.lat[yi+1, xi]]) - - if grid.mesh == 'spherical': - px[0] = px[0]+360 if px[0] < x-225 else px[0] - px[0] = px[0]-360 if px[0] > x+225 else px[0] - px[1:] = np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:]) - px[1:] = np.where(-px[1:] + px[0] > 180, px[1:]+360, px[1:]) - xx = (1-xsi)*(1-eta) * px[0] + xsi*(1-eta) * px[1] + xsi*eta * px[2] + (1-xsi)*eta * px[3] - assert abs(xx-x) < 1e-4 - c1 = self.dist(px[0], px[1], py[0], py[1], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 0.), py)) - c2 = self.dist(px[1], px[2], py[1], py[2], grid.mesh, np.dot(i_u.phi2D_lin(1., eta), py)) - c3 = self.dist(px[2], px[3], py[2], py[3], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 1.), py)) - c4 = self.dist(px[3], px[0], py[3], py[0], grid.mesh, np.dot(i_u.phi2D_lin(0., eta), py)) + px = np.array([grid.lon[yi, xi], grid.lon[yi, xi + 1], grid.lon[yi + 1, xi + 1], grid.lon[yi + 1, xi]]) + py = np.array([grid.lat[yi, xi], grid.lat[yi, xi + 1], grid.lat[yi + 1, xi + 1], grid.lat[yi + 1, xi]]) + + if grid.mesh == "spherical": + px[0] = px[0] + 360 if px[0] < x - 225 else px[0] + px[0] = px[0] - 360 if px[0] > x + 225 else px[0] + px[1:] = np.where(px[1:] - px[0] > 180, px[1:] - 360, px[1:]) + px[1:] = np.where(-px[1:] + px[0] > 180, px[1:] + 360, px[1:]) + xx = (1 - xsi) * (1 - eta) * px[0] + xsi * (1 - eta) * px[1] + xsi * eta * px[2] + (1 - xsi) * eta * px[3] + assert abs(xx - x) < 1e-4 + c1 = self.dist(px[0], px[1], py[0], py[1], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 0.0), py)) + c2 = self.dist(px[1], px[2], py[1], py[2], grid.mesh, np.dot(i_u.phi2D_lin(1.0, eta), py)) + c3 = self.dist(px[2], px[3], py[2], py[3], grid.mesh, np.dot(i_u.phi2D_lin(xsi, 1.0), py)) + c4 = self.dist(px[3], px[0], py[3], py[0], grid.mesh, np.dot(i_u.phi2D_lin(0.0, eta), py)) if grid.zdim == 1: - if self.gridindexingtype == 'nemo': - U0 = self.U.data[ti, yi+1, xi] * c4 - U1 = self.U.data[ti, yi+1, xi+1] * c2 - V0 = self.V.data[ti, yi, xi+1] * c1 - V1 = self.V.data[ti, yi+1, xi+1] * c3 - elif self.gridindexingtype == 'mitgcm': + if self.gridindexingtype == "nemo": + U0 = self.U.data[ti, yi + 1, xi] * c4 + U1 = self.U.data[ti, yi + 1, xi + 1] * c2 + V0 = self.V.data[ti, yi, xi + 1] * c1 + V1 = self.V.data[ti, yi + 1, xi + 1] * c3 + elif self.gridindexingtype == "mitgcm": U0 = self.U.data[ti, yi, xi] * c4 U1 = self.U.data[ti, yi, xi + 1] * c2 V0 = self.V.data[ti, yi, xi] * c1 V1 = self.V.data[ti, yi + 1, xi] * c3 else: - if self.gridindexingtype == 'nemo': - U0 = self.U.data[ti, zi, yi+1, xi] * c4 - U1 = self.U.data[ti, zi, yi+1, xi+1] * c2 - V0 = self.V.data[ti, zi, yi, xi+1] * c1 - V1 = self.V.data[ti, zi, yi+1, xi+1] * c3 - elif self.gridindexingtype == 'mitgcm': + if self.gridindexingtype == "nemo": + U0 = self.U.data[ti, zi, yi + 1, xi] * c4 + U1 = self.U.data[ti, zi, yi + 1, xi + 1] * c2 + V0 = self.V.data[ti, zi, yi, xi + 1] * c1 + V1 = self.V.data[ti, zi, yi + 1, xi + 1] * c3 + elif self.gridindexingtype == "mitgcm": U0 = self.U.data[ti, zi, yi, xi] * c4 U1 = self.U.data[ti, zi, yi, xi + 1] * c2 V0 = self.V.data[ti, zi, yi, xi] * c1 V1 = self.V.data[ti, zi, yi + 1, xi] * c3 - U = (1-xsi) * U0 + xsi * U1 - V = (1-eta) * V0 + eta * V1 - rad = np.pi/180. - deg2m = 1852 * 60. + U = (1 - xsi) * U0 + xsi * U1 + V = (1 - eta) * V0 + eta * V1 + rad = np.pi / 180.0 + deg2m = 1852 * 60.0 if applyConversion: - meshJac = (deg2m * deg2m * math.cos(rad * y)) if grid.mesh == 'spherical' else 1 + meshJac = (deg2m * deg2m * math.cos(rad * y)) if grid.mesh == "spherical" else 1 else: - meshJac = deg2m if grid.mesh == 'spherical' else 1 + meshJac = deg2m if grid.mesh == "spherical" else 1 jac = self.jacobian(xsi, eta, px, py) * meshJac - u = ((-(1-eta) * U - (1-xsi) * V) * px[0] - + ((1-eta) * U - xsi * V) * px[1] - + (eta * U + xsi * V) * px[2] - + (-eta * U + (1-xsi) * V) * px[3]) / jac - v = ((-(1-eta) * U - (1-xsi) * V) * py[0] - + ((1-eta) * U - xsi * V) * py[1] - + (eta * U + xsi * V) * py[2] - + (-eta * U + (1-xsi) * V) * py[3]) / jac + u = ( + (-(1 - eta) * U - (1 - xsi) * V) * px[0] + + ((1 - eta) * U - xsi * V) * px[1] + + (eta * U + xsi * V) * px[2] + + (-eta * U + (1 - xsi) * V) * px[3] + ) / jac + v = ( + (-(1 - eta) * U - (1 - xsi) * V) * py[0] + + ((1 - eta) * U - xsi * V) * py[1] + + (eta * U + xsi * V) * py[2] + + (-eta * U + (1 - xsi) * V) * py[3] + ) / jac if isinstance(u, da.core.Array): u = u.compute() v = v.compute() @@ -1621,86 +1805,187 @@ class VectorField: (xsi, eta, zet, xi, yi, zi) = self.U.search_indices(x, y, z, ti, time, particle=particle) if grid.gtype in [GridType.RectilinearSGrid, GridType.RectilinearZGrid]: - px = np.array([grid.lon[xi], grid.lon[xi+1], grid.lon[xi+1], grid.lon[xi]]) - py = np.array([grid.lat[yi], grid.lat[yi], grid.lat[yi+1], grid.lat[yi+1]]) + px = np.array([grid.lon[xi], grid.lon[xi + 1], grid.lon[xi + 1], grid.lon[xi]]) + py = np.array([grid.lat[yi], grid.lat[yi], grid.lat[yi + 1], grid.lat[yi + 1]]) else: - px = np.array([grid.lon[yi, xi], grid.lon[yi, xi+1], grid.lon[yi+1, xi+1], grid.lon[yi+1, xi]]) - py = np.array([grid.lat[yi, xi], grid.lat[yi, xi+1], grid.lat[yi+1, xi+1], grid.lat[yi+1, xi]]) + px = np.array([grid.lon[yi, xi], grid.lon[yi, xi + 1], grid.lon[yi + 1, xi + 1], grid.lon[yi + 1, xi]]) + py = np.array([grid.lat[yi, xi], grid.lat[yi, xi + 1], grid.lat[yi + 1, xi + 1], grid.lat[yi + 1, xi]]) - if grid.mesh == 'spherical': - px[0] = px[0]+360 if px[0] < x-225 else px[0] - px[0] = px[0]-360 if px[0] > x+225 else px[0] - px[1:] = np.where(px[1:] - px[0] > 180, px[1:]-360, px[1:]) - px[1:] = np.where(-px[1:] + px[0] > 180, px[1:]+360, px[1:]) - xx = (1-xsi)*(1-eta) * px[0] + xsi*(1-eta) * px[1] + xsi*eta * px[2] + (1-xsi)*eta * px[3] - assert abs(xx-x) < 1e-4 + if grid.mesh == "spherical": + px[0] = px[0] + 360 if px[0] < x - 225 else px[0] + px[0] = px[0] - 360 if px[0] > x + 225 else px[0] + px[1:] = np.where(px[1:] - px[0] > 180, px[1:] - 360, px[1:]) + px[1:] = np.where(-px[1:] + px[0] > 180, px[1:] + 360, px[1:]) + xx = (1 - xsi) * (1 - eta) * px[0] + xsi * (1 - eta) * px[1] + xsi * eta * px[2] + (1 - xsi) * eta * px[3] + assert abs(xx - x) < 1e-4 px = np.concatenate((px, px)) py = np.concatenate((py, py)) if grid.z4d: - pz = np.array([grid.depth[0, zi, yi, xi], grid.depth[0, zi, yi, xi+1], grid.depth[0, zi, yi+1, xi+1], grid.depth[0, zi, yi+1, xi], - grid.depth[0, zi+1, yi, xi], grid.depth[0, zi+1, yi, xi+1], grid.depth[0, zi+1, yi+1, xi+1], grid.depth[0, zi+1, yi+1, xi]]) + pz = np.array( + [ + grid.depth[0, zi, yi, xi], + grid.depth[0, zi, yi, xi + 1], + grid.depth[0, zi, yi + 1, xi + 1], + grid.depth[0, zi, yi + 1, xi], + grid.depth[0, zi + 1, yi, xi], + grid.depth[0, zi + 1, yi, xi + 1], + grid.depth[0, zi + 1, yi + 1, xi + 1], + grid.depth[0, zi + 1, yi + 1, xi], + ] + ) else: - pz = np.array([grid.depth[zi, yi, xi], grid.depth[zi, yi, xi+1], grid.depth[zi, yi+1, xi+1], grid.depth[zi, yi+1, xi], - grid.depth[zi+1, yi, xi], grid.depth[zi+1, yi, xi+1], grid.depth[zi+1, yi+1, xi+1], grid.depth[zi+1, yi+1, xi]]) - - u0 = self.U.data[ti, zi, yi+1, xi] - u1 = self.U.data[ti, zi, yi+1, xi+1] - v0 = self.V.data[ti, zi, yi, xi+1] - v1 = self.V.data[ti, zi, yi+1, xi+1] - w0 = self.W.data[ti, zi, yi+1, xi+1] - w1 = self.W.data[ti, zi+1, yi+1, xi+1] - - U0 = u0 * i_u.jacobian3D_lin_face(px, py, pz, 0, eta, zet, 'zonal', grid.mesh) - U1 = u1 * i_u.jacobian3D_lin_face(px, py, pz, 1, eta, zet, 'zonal', grid.mesh) - V0 = v0 * i_u.jacobian3D_lin_face(px, py, pz, xsi, 0, zet, 'meridional', grid.mesh) - V1 = v1 * i_u.jacobian3D_lin_face(px, py, pz, xsi, 1, zet, 'meridional', grid.mesh) - W0 = w0 * i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, 0, 'vertical', grid.mesh) - W1 = w1 * i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, 1, 'vertical', grid.mesh) + pz = np.array( + [ + grid.depth[zi, yi, xi], + grid.depth[zi, yi, xi + 1], + grid.depth[zi, yi + 1, xi + 1], + grid.depth[zi, yi + 1, xi], + grid.depth[zi + 1, yi, xi], + grid.depth[zi + 1, yi, xi + 1], + grid.depth[zi + 1, yi + 1, xi + 1], + grid.depth[zi + 1, yi + 1, xi], + ] + ) + + u0 = self.U.data[ti, zi, yi + 1, xi] + u1 = self.U.data[ti, zi, yi + 1, xi + 1] + v0 = self.V.data[ti, zi, yi, xi + 1] + v1 = self.V.data[ti, zi, yi + 1, xi + 1] + w0 = self.W.data[ti, zi, yi + 1, xi + 1] + w1 = self.W.data[ti, zi + 1, yi + 1, xi + 1] + + U0 = u0 * i_u.jacobian3D_lin_face(px, py, pz, 0, eta, zet, "zonal", grid.mesh) + U1 = u1 * i_u.jacobian3D_lin_face(px, py, pz, 1, eta, zet, "zonal", grid.mesh) + V0 = v0 * i_u.jacobian3D_lin_face(px, py, pz, xsi, 0, zet, "meridional", grid.mesh) + V1 = v1 * i_u.jacobian3D_lin_face(px, py, pz, xsi, 1, zet, "meridional", grid.mesh) + W0 = w0 * i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, 0, "vertical", grid.mesh) + W1 = w1 * i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, 1, "vertical", grid.mesh) # Computing fluxes in half left hexahedron -> flux_u05 - xx = [px[0], (px[0]+px[1])/2, (px[2]+px[3])/2, px[3], px[4], (px[4]+px[5])/2, (px[6]+px[7])/2, px[7]] - yy = [py[0], (py[0]+py[1])/2, (py[2]+py[3])/2, py[3], py[4], (py[4]+py[5])/2, (py[6]+py[7])/2, py[7]] - zz = [pz[0], (pz[0]+pz[1])/2, (pz[2]+pz[3])/2, pz[3], pz[4], (pz[4]+pz[5])/2, (pz[6]+pz[7])/2, pz[7]] - flux_u0 = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, .5, .5, 'zonal', grid.mesh) - flux_v0_halfx = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, 0, .5, 'meridional', grid.mesh) - flux_v1_halfx = v1 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, 1, .5, 'meridional', grid.mesh) - flux_w0_halfx = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, .5, 0, 'vertical', grid.mesh) - flux_w1_halfx = w1 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, .5, 1, 'vertical', grid.mesh) + xx = [ + px[0], + (px[0] + px[1]) / 2, + (px[2] + px[3]) / 2, + px[3], + px[4], + (px[4] + px[5]) / 2, + (px[6] + px[7]) / 2, + px[7], + ] + yy = [ + py[0], + (py[0] + py[1]) / 2, + (py[2] + py[3]) / 2, + py[3], + py[4], + (py[4] + py[5]) / 2, + (py[6] + py[7]) / 2, + py[7], + ] + zz = [ + pz[0], + (pz[0] + pz[1]) / 2, + (pz[2] + pz[3]) / 2, + pz[3], + pz[4], + (pz[4] + pz[5]) / 2, + (pz[6] + pz[7]) / 2, + pz[7], + ] + flux_u0 = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, 0.5, 0.5, "zonal", grid.mesh) + flux_v0_halfx = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0, 0.5, "meridional", grid.mesh) + flux_v1_halfx = v1 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 1, 0.5, "meridional", grid.mesh) + flux_w0_halfx = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0.5, 0, "vertical", grid.mesh) + flux_w1_halfx = w1 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0.5, 1, "vertical", grid.mesh) flux_u05 = flux_u0 + flux_v0_halfx - flux_v1_halfx + flux_w0_halfx - flux_w1_halfx # Computing fluxes in half front hexahedron -> flux_v05 - xx = [px[0], px[1], (px[1]+px[2])/2, (px[0]+px[3])/2, px[4], px[5], (px[5]+px[6])/2, (px[4]+px[7])/2] - yy = [py[0], py[1], (py[1]+py[2])/2, (py[0]+py[3])/2, py[4], py[5], (py[5]+py[6])/2, (py[4]+py[7])/2] - zz = [pz[0], pz[1], (pz[1]+pz[2])/2, (pz[0]+pz[3])/2, pz[4], pz[5], (pz[5]+pz[6])/2, (pz[4]+pz[7])/2] - flux_u0_halfy = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, .5, .5, 'zonal', grid.mesh) - flux_u1_halfy = u1 * i_u.jacobian3D_lin_face(xx, yy, zz, 1, .5, .5, 'zonal', grid.mesh) - flux_v0 = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, 0, .5, 'meridional', grid.mesh) - flux_w0_halfy = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, .5, 0, 'vertical', grid.mesh) - flux_w1_halfy = w1 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, .5, 1, 'vertical', grid.mesh) + xx = [ + px[0], + px[1], + (px[1] + px[2]) / 2, + (px[0] + px[3]) / 2, + px[4], + px[5], + (px[5] + px[6]) / 2, + (px[4] + px[7]) / 2, + ] + yy = [ + py[0], + py[1], + (py[1] + py[2]) / 2, + (py[0] + py[3]) / 2, + py[4], + py[5], + (py[5] + py[6]) / 2, + (py[4] + py[7]) / 2, + ] + zz = [ + pz[0], + pz[1], + (pz[1] + pz[2]) / 2, + (pz[0] + pz[3]) / 2, + pz[4], + pz[5], + (pz[5] + pz[6]) / 2, + (pz[4] + pz[7]) / 2, + ] + flux_u0_halfy = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, 0.5, 0.5, "zonal", grid.mesh) + flux_u1_halfy = u1 * i_u.jacobian3D_lin_face(xx, yy, zz, 1, 0.5, 0.5, "zonal", grid.mesh) + flux_v0 = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0, 0.5, "meridional", grid.mesh) + flux_w0_halfy = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0.5, 0, "vertical", grid.mesh) + flux_w1_halfy = w1 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0.5, 1, "vertical", grid.mesh) flux_v05 = flux_u0_halfy - flux_u1_halfy + flux_v0 + flux_w0_halfy - flux_w1_halfy # Computing fluxes in half lower hexahedron -> flux_w05 - xx = [px[0], px[1], px[2], px[3], (px[0]+px[4])/2, (px[1]+px[5])/2, (px[2]+px[6])/2, (px[3]+px[7])/2] - yy = [py[0], py[1], py[2], py[3], (py[0]+py[4])/2, (py[1]+py[5])/2, (py[2]+py[6])/2, (py[3]+py[7])/2] - zz = [pz[0], pz[1], pz[2], pz[3], (pz[0]+pz[4])/2, (pz[1]+pz[5])/2, (pz[2]+pz[6])/2, (pz[3]+pz[7])/2] - flux_u0_halfz = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, .5, .5, 'zonal', grid.mesh) - flux_u1_halfz = u1 * i_u.jacobian3D_lin_face(xx, yy, zz, 1, .5, .5, 'zonal', grid.mesh) - flux_v0_halfz = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, 0, .5, 'meridional', grid.mesh) - flux_v1_halfz = v1 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, 1, .5, 'meridional', grid.mesh) - flux_w0 = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, .5, .5, 0, 'vertical', grid.mesh) + xx = [ + px[0], + px[1], + px[2], + px[3], + (px[0] + px[4]) / 2, + (px[1] + px[5]) / 2, + (px[2] + px[6]) / 2, + (px[3] + px[7]) / 2, + ] + yy = [ + py[0], + py[1], + py[2], + py[3], + (py[0] + py[4]) / 2, + (py[1] + py[5]) / 2, + (py[2] + py[6]) / 2, + (py[3] + py[7]) / 2, + ] + zz = [ + pz[0], + pz[1], + pz[2], + pz[3], + (pz[0] + pz[4]) / 2, + (pz[1] + pz[5]) / 2, + (pz[2] + pz[6]) / 2, + (pz[3] + pz[7]) / 2, + ] + flux_u0_halfz = u0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0, 0.5, 0.5, "zonal", grid.mesh) + flux_u1_halfz = u1 * i_u.jacobian3D_lin_face(xx, yy, zz, 1, 0.5, 0.5, "zonal", grid.mesh) + flux_v0_halfz = v0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0, 0.5, "meridional", grid.mesh) + flux_v1_halfz = v1 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 1, 0.5, "meridional", grid.mesh) + flux_w0 = w0 * i_u.jacobian3D_lin_face(xx, yy, zz, 0.5, 0.5, 0, "vertical", grid.mesh) flux_w05 = flux_u0_halfz - flux_u1_halfz + flux_v0_halfz - flux_v1_halfz + flux_w0 - surf_u05 = i_u.jacobian3D_lin_face(px, py, pz, .5, .5, .5, 'zonal', grid.mesh) - jac_u05 = i_u.jacobian3D_lin_face(px, py, pz, .5, eta, zet, 'zonal', grid.mesh) + surf_u05 = i_u.jacobian3D_lin_face(px, py, pz, 0.5, 0.5, 0.5, "zonal", grid.mesh) + jac_u05 = i_u.jacobian3D_lin_face(px, py, pz, 0.5, eta, zet, "zonal", grid.mesh) U05 = flux_u05 / surf_u05 * jac_u05 - surf_v05 = i_u.jacobian3D_lin_face(px, py, pz, .5, .5, .5, 'meridional', grid.mesh) - jac_v05 = i_u.jacobian3D_lin_face(px, py, pz, xsi, .5, zet, 'meridional', grid.mesh) + surf_v05 = i_u.jacobian3D_lin_face(px, py, pz, 0.5, 0.5, 0.5, "meridional", grid.mesh) + jac_v05 = i_u.jacobian3D_lin_face(px, py, pz, xsi, 0.5, zet, "meridional", grid.mesh) V05 = flux_v05 / surf_v05 * jac_v05 - surf_w05 = i_u.jacobian3D_lin_face(px, py, pz, .5, .5, .5, 'vertical', grid.mesh) - jac_w05 = i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, .5, 'vertical', grid.mesh) + surf_w05 = i_u.jacobian3D_lin_face(px, py, pz, 0.5, 0.5, 0.5, "vertical", grid.mesh) + jac_w05 = i_u.jacobian3D_lin_face(px, py, pz, xsi, eta, 0.5, "vertical", grid.mesh) W05 = flux_w05 / surf_w05 * jac_w05 jac = i_u.jacobian3D_lin(px, py, pz, xsi, eta, zet, grid.mesh) @@ -1748,12 +2033,12 @@ class VectorField: def _is_land2D(self, di, yi, xi): if self.U.data.ndim == 3: if di < np.shape(self.U.data)[0]: - return np.isclose(self.U.data[di, yi, xi], 0.) and np.isclose(self.V.data[di, yi, xi], 0.) + return np.isclose(self.U.data[di, yi, xi], 0.0) and np.isclose(self.V.data[di, yi, xi], 0.0) else: return True else: if di < self.U.grid.zdim and yi < np.shape(self.U.data)[-2] and xi < np.shape(self.U.data)[-1]: - return np.isclose(self.U.data[0, di, yi, xi], 0.) and np.isclose(self.V.data[0, di, yi, xi], 0.) + return np.isclose(self.U.data[0, di, yi, xi], 0.0) and np.isclose(self.V.data[0, di, yi, xi], 0.0) else: return True @@ -1762,80 +2047,110 @@ class VectorField: di = ti if self.U.grid.zdim == 1 else zi # general third dimension f_u, f_v, f_w = 1, 1, 1 - if self._is_land2D(di, yi, xi) and self._is_land2D(di, yi, xi+1) and self._is_land2D(di+1, yi, xi) \ - and self._is_land2D(di+1, yi, xi+1) and eta > 0: - if self.U.interp_method == 'partialslip': - f_u = f_u * (.5 + .5 * eta) / eta - if self.vector_type == '3D': - f_w = f_w * (.5 + .5 * eta) / eta - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di, yi, xi) + and self._is_land2D(di, yi, xi + 1) + and self._is_land2D(di + 1, yi, xi) + and self._is_land2D(di + 1, yi, xi + 1) + and eta > 0 + ): + if self.U.interp_method == "partialslip": + f_u = f_u * (0.5 + 0.5 * eta) / eta + if self.vector_type == "3D": + f_w = f_w * (0.5 + 0.5 * eta) / eta + elif self.U.interp_method == "freeslip": f_u = f_u / eta - if self.vector_type == '3D': + if self.vector_type == "3D": f_w = f_w / eta - if self._is_land2D(di, yi+1, xi) and self._is_land2D(di, yi+1, xi+1) and self._is_land2D(di+1, yi+1, xi) \ - and self._is_land2D(di+1, yi+1, xi+1) and eta < 1: - if self.U.interp_method == 'partialslip': - f_u = f_u * (1 - .5 * eta) / (1 - eta) - if self.vector_type == '3D': - f_w = f_w * (1 - .5 * eta) / (1 - eta) - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di, yi + 1, xi) + and self._is_land2D(di, yi + 1, xi + 1) + and self._is_land2D(di + 1, yi + 1, xi) + and self._is_land2D(di + 1, yi + 1, xi + 1) + and eta < 1 + ): + if self.U.interp_method == "partialslip": + f_u = f_u * (1 - 0.5 * eta) / (1 - eta) + if self.vector_type == "3D": + f_w = f_w * (1 - 0.5 * eta) / (1 - eta) + elif self.U.interp_method == "freeslip": f_u = f_u / (1 - eta) - if self.vector_type == '3D': + if self.vector_type == "3D": f_w = f_w / (1 - eta) - if self._is_land2D(di, yi, xi) and self._is_land2D(di, yi+1, xi) and self._is_land2D(di+1, yi, xi) \ - and self._is_land2D(di+1, yi+1, xi) and xsi > 0: - if self.U.interp_method == 'partialslip': - f_v = f_v * (.5 + .5 * xsi) / xsi - if self.vector_type == '3D': - f_w = f_w * (.5 + .5 * xsi) / xsi - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di, yi, xi) + and self._is_land2D(di, yi + 1, xi) + and self._is_land2D(di + 1, yi, xi) + and self._is_land2D(di + 1, yi + 1, xi) + and xsi > 0 + ): + if self.U.interp_method == "partialslip": + f_v = f_v * (0.5 + 0.5 * xsi) / xsi + if self.vector_type == "3D": + f_w = f_w * (0.5 + 0.5 * xsi) / xsi + elif self.U.interp_method == "freeslip": f_v = f_v / xsi - if self.vector_type == '3D': + if self.vector_type == "3D": f_w = f_w / xsi - if self._is_land2D(di, yi, xi+1) and self._is_land2D(di, yi+1, xi+1) and self._is_land2D(di+1, yi, xi+1) \ - and self._is_land2D(di+1, yi+1, xi+1) and xsi < 1: - if self.U.interp_method == 'partialslip': - f_v = f_v * (1 - .5 * xsi) / (1 - xsi) - if self.vector_type == '3D': - f_w = f_w * (1 - .5 * xsi) / (1 - xsi) - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di, yi, xi + 1) + and self._is_land2D(di, yi + 1, xi + 1) + and self._is_land2D(di + 1, yi, xi + 1) + and self._is_land2D(di + 1, yi + 1, xi + 1) + and xsi < 1 + ): + if self.U.interp_method == "partialslip": + f_v = f_v * (1 - 0.5 * xsi) / (1 - xsi) + if self.vector_type == "3D": + f_w = f_w * (1 - 0.5 * xsi) / (1 - xsi) + elif self.U.interp_method == "freeslip": f_v = f_v / (1 - xsi) - if self.vector_type == '3D': + if self.vector_type == "3D": f_w = f_w / (1 - xsi) if self.U.grid.zdim > 1: - if self._is_land2D(di, yi, xi) and self._is_land2D(di, yi, xi+1) and self._is_land2D(di, yi+1, xi) \ - and self._is_land2D(di, yi+1, xi+1) and zeta > 0: - if self.U.interp_method == 'partialslip': - f_u = f_u * (.5 + .5 * zeta) / zeta - f_v = f_v * (.5 + .5 * zeta) / zeta - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di, yi, xi) + and self._is_land2D(di, yi, xi + 1) + and self._is_land2D(di, yi + 1, xi) + and self._is_land2D(di, yi + 1, xi + 1) + and zeta > 0 + ): + if self.U.interp_method == "partialslip": + f_u = f_u * (0.5 + 0.5 * zeta) / zeta + f_v = f_v * (0.5 + 0.5 * zeta) / zeta + elif self.U.interp_method == "freeslip": f_u = f_u / zeta f_v = f_v / zeta - if self._is_land2D(di+1, yi, xi) and self._is_land2D(di+1, yi, xi+1) and self._is_land2D(di+1, yi+1, xi) \ - and self._is_land2D(di+1, yi+1, xi+1) and zeta < 1: - if self.U.interp_method == 'partialslip': - f_u = f_u * (1 - .5 * zeta) / (1 - zeta) - f_v = f_v * (1 - .5 * zeta) / (1 - zeta) - elif self.U.interp_method == 'freeslip': + if ( + self._is_land2D(di + 1, yi, xi) + and self._is_land2D(di + 1, yi, xi + 1) + and self._is_land2D(di + 1, yi + 1, xi) + and self._is_land2D(di + 1, yi + 1, xi + 1) + and zeta < 1 + ): + if self.U.interp_method == "partialslip": + f_u = f_u * (1 - 0.5 * zeta) / (1 - zeta) + f_v = f_v * (1 - 0.5 * zeta) / (1 - zeta) + elif self.U.interp_method == "freeslip": f_u = f_u / (1 - zeta) f_v = f_v / (1 - zeta) u = f_u * self.U.eval(time, z, y, x, particle, applyConversion=applyConversion) v = f_v * self.V.eval(time, z, y, x, particle, applyConversion=applyConversion) - if self.vector_type == '3D': + if self.vector_type == "3D": w = f_w * self.W.eval(time, z, y, x, particle, applyConversion=applyConversion) return u, v, w else: return u, v def eval(self, time, z, y, x, particle=None, applyConversion=True): - if self.U.interp_method not in ['cgrid_velocity', 'partialslip', 'freeslip']: + if self.U.interp_method not in ["cgrid_velocity", "partialslip", "freeslip"]: u = self.U.eval(time, z, y, x, particle=particle, applyConversion=False) v = self.V.eval(time, z, y, x, particle=particle, applyConversion=False) if applyConversion: u = self.U.units.to_target(u, x, y, z) v = self.V.units.to_target(v, x, y, z) - if self.vector_type == '3D': + if self.vector_type == "3D": w = self.W.eval(time, z, y, x, particle=particle, applyConversion=False) if applyConversion: w = self.W.units.to_target(w, x, y, z) @@ -1843,25 +2158,38 @@ class VectorField: else: return (u, v) else: - interp = {'cgrid_velocity': {'2D': self.spatial_c_grid_interpolation2D, '3D': self.spatial_c_grid_interpolation3D}, - 'partialslip': {'2D': self.spatial_slip_interpolation, '3D': self.spatial_slip_interpolation}, - 'freeslip': {'2D': self.spatial_slip_interpolation, '3D': self.spatial_slip_interpolation}} + interp = { + "cgrid_velocity": { + "2D": self.spatial_c_grid_interpolation2D, + "3D": self.spatial_c_grid_interpolation3D, + }, + "partialslip": {"2D": self.spatial_slip_interpolation, "3D": self.spatial_slip_interpolation}, + "freeslip": {"2D": self.spatial_slip_interpolation, "3D": self.spatial_slip_interpolation}, + } grid = self.U.grid (ti, periods) = self.U.time_index(time) - time -= periods*(grid.time_full[-1]-grid.time_full[0]) - if ti < grid.tdim-1 and time > grid.time[ti]: + time -= periods * (grid.time_full[-1] - grid.time_full[0]) + if ti < grid.tdim - 1 and time > grid.time[ti]: t0 = grid.time[ti] t1 = grid.time[ti + 1] - if self.vector_type == '3D': - (u0, v0, w0) = interp[self.U.interp_method]['3D'](ti, z, y, x, time, particle=particle, applyConversion=applyConversion) - (u1, v1, w1) = interp[self.U.interp_method]['3D'](ti + 1, z, y, x, time, particle=particle, applyConversion=applyConversion) + if self.vector_type == "3D": + (u0, v0, w0) = interp[self.U.interp_method]["3D"]( + ti, z, y, x, time, particle=particle, applyConversion=applyConversion + ) + (u1, v1, w1) = interp[self.U.interp_method]["3D"]( + ti + 1, z, y, x, time, particle=particle, applyConversion=applyConversion + ) w = w0 + (w1 - w0) * ((time - t0) / (t1 - t0)) else: - (u0, v0) = interp[self.U.interp_method]['2D'](ti, z, y, x, time, particle=particle, applyConversion=applyConversion) - (u1, v1) = interp[self.U.interp_method]['2D'](ti + 1, z, y, x, time, particle=particle, applyConversion=applyConversion) + (u0, v0) = interp[self.U.interp_method]["2D"]( + ti, z, y, x, time, particle=particle, applyConversion=applyConversion + ) + (u1, v1) = interp[self.U.interp_method]["2D"]( + ti + 1, z, y, x, time, particle=particle, applyConversion=applyConversion + ) u = u0 + (u1 - u0) * ((time - t0) / (t1 - t0)) v = v0 + (v1 - v0) * ((time - t0) / (t1 - t0)) - if self.vector_type == '3D': + if self.vector_type == "3D": return (u, v, w) else: return (u, v) @@ -1869,10 +2197,14 @@ class VectorField: # Skip temporal interpolation if time is outside # of the defined time range or if we have hit an # exact value in the time array. - if self.vector_type == '3D': - return interp[self.U.interp_method]['3D'](ti, z, y, x, grid.time[ti], particle=particle, applyConversion=applyConversion) + if self.vector_type == "3D": + return interp[self.U.interp_method]["3D"]( + ti, z, y, x, grid.time[ti], particle=particle, applyConversion=applyConversion + ) else: - return interp[self.U.interp_method]['2D'](ti, z, y, x, grid.time[ti], particle=particle, applyConversion=applyConversion) + return interp[self.U.interp_method]["2D"]( + ti, z, y, x, grid.time[ti], particle=particle, applyConversion=applyConversion + ) def __getitem__(self, key): try: @@ -1885,18 +2217,22 @@ class VectorField: def ccode_eval(self, varU, varV, varW, U, V, W, t, z, y, x): ccode_str = "" - if self.vector_type == '3D': - ccode_str = f"temporal_interpolationUVW({x}, {y}, {z}, {t}, {U.ccode_name}, {V.ccode_name}, {W.ccode_name}, " + \ - "&particles->xi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->ti[pnum*ngrid]," + \ - f"&{varU}, &{varV}, &{varW}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" + if self.vector_type == "3D": + ccode_str = ( + f"temporal_interpolationUVW({x}, {y}, {z}, {t}, {U.ccode_name}, {V.ccode_name}, {W.ccode_name}, " + + "&particles->xi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->ti[pnum*ngrid]," + + f"&{varU}, &{varV}, &{varW}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" + ) else: - ccode_str = f"temporal_interpolationUV({x}, {y}, {z}, {t}, {U.ccode_name}, {V.ccode_name}, " + \ - "&particles->xi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->ti[pnum*ngrid]," + \ - f" &{varU}, &{varV}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" + ccode_str = ( + f"temporal_interpolationUV({x}, {y}, {z}, {t}, {U.ccode_name}, {V.ccode_name}, " + + "&particles->xi[pnum*ngrid], &particles->yi[pnum*ngrid], &particles->zi[pnum*ngrid], &particles->ti[pnum*ngrid]," + + f" &{varU}, &{varV}, {U.interp_method.upper()}, {U.gridindexingtype.upper()})" + ) return ccode_str -class DeferredArray(): +class DeferredArray: """Class used for throwing error when Field.data is not read in deferred loading mode.""" data_shape = () @@ -1917,7 +2253,9 @@ class DeferredArray(): return self.data_shape def __getitem__(self, key): - raise RuntimeError("Field is in deferred_load mode, so can't be accessed. Use .computeTimeChunk() method to force loading of data") + raise RuntimeError( + "Field is in deferred_load mode, so can't be accessed. Use .computeTimeChunk() method to force loading of data" + ) class NestedField(list): @@ -1954,18 +2292,22 @@ class NestedField(list): if isinstance(F[0], VectorField): vector_type = F[0].vector_type for Fi in F: - assert isinstance(Fi, Field) or (isinstance(Fi, VectorField) and Fi.vector_type == vector_type), 'Components of a NestedField must be Field or VectorField' + assert isinstance(Fi, Field) or ( + isinstance(Fi, VectorField) and Fi.vector_type == vector_type + ), "Components of a NestedField must be Field or VectorField" self.append(Fi) elif W is None: - for (i, Fi, Vi) in zip(range(len(F)), F, V): - assert isinstance(Fi, Field) and isinstance(Vi, Field), \ - 'F, and V components of a NestedField must be Field' - self.append(VectorField(name+'_%d' % i, Fi, Vi)) + for i, Fi, Vi in zip(range(len(F)), F, V): + assert isinstance(Fi, Field) and isinstance( + Vi, Field + ), "F, and V components of a NestedField must be Field" + self.append(VectorField(name + "_%d" % i, Fi, Vi)) else: - for (i, Fi, Vi, Wi) in zip(range(len(F)), F, V, W): - assert isinstance(Fi, Field) and isinstance(Vi, Field) and isinstance(Wi, Field), \ - 'F, V and W components of a NestedField must be Field' - self.append(VectorField(name+'_%d' % i, Fi, Vi, Wi)) + for i, Fi, Vi, Wi in zip(range(len(F)), F, V, W): + assert ( + isinstance(Fi, Field) and isinstance(Vi, Field) and isinstance(Wi, Field) + ), "F, V and W components of a NestedField must be Field" + self.append(VectorField(name + "_%d" % i, Fi, Vi, Wi)) self.name = name def __getitem__(self, key): @@ -1980,7 +2322,7 @@ class NestedField(list): val = list.__getitem__(self, iField).eval(*key) break except tuple(AllParcelsErrorCodes.keys()) as error: - if iField == len(self)-1: + if iField == len(self) - 1: vector_type = self[iField].vector_type if isinstance(self[iField], VectorField) else None return _deal_with_errors(error, key, vector_type=vector_type) else: diff --git a/parcels/fieldfilebuffer.py b/parcels/fieldfilebuffer.py index 9568de30..dd702270 100644 --- a/parcels/fieldfilebuffer.py +++ b/parcels/fieldfilebuffer.py @@ -15,18 +15,19 @@ from parcels.tools.statuscodes import DaskChunkingError class _FileBuffer: - def __init__(self, filename, dimensions, indices, timestamp=None, - interp_method='linear', data_full_zdim=None, **kwargs): + def __init__( + self, filename, dimensions, indices, timestamp=None, interp_method="linear", data_full_zdim=None, **kwargs + ): self.filename = filename self.dimensions = dimensions # Dict with dimension keys for file data self.indices = indices self.dataset = None self.timestamp = timestamp - self.cast_data_dtype = kwargs.pop('cast_data_dtype', np.float32) + self.cast_data_dtype = kwargs.pop("cast_data_dtype", np.float32) self.ti = None self.interp_method = interp_method self.data_full_zdim = data_full_zdim - if ('lon' in self.indices) or ('lat' in self.indices): + if ("lon" in self.indices) or ("lat" in self.indices): self.nolonlatindices = False else: self.nolonlatindices = True @@ -35,8 +36,8 @@ class _FileBuffer: class NetcdfFileBuffer(_FileBuffer): def __init__(self, *args, **kwargs): self.lib = np - self.netcdf_engine = kwargs.pop('netcdf_engine', 'netcdf4') - self.netcdf_decodewarning = kwargs.pop('netcdf_decodewarning', True) + self.netcdf_engine = kwargs.pop("netcdf_engine", "netcdf4") + self.netcdf_decodewarning = kwargs.pop("netcdf_decodewarning", True) super().__init__(*args, **kwargs) def __enter__(self): @@ -45,17 +46,19 @@ class NetcdfFileBuffer(_FileBuffer): # (which we would rather want to have being auto-managed). # If 'lock' is not specified, the Lock-object is auto-created and managed by xarray internally. self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine) - self.dataset['decoded'] = True + self.dataset["decoded"] = True except: if self.netcdf_decodewarning: - logger.warning_once(f"File {self.filename} could not be decoded properly by xarray (version {xr.__version__}). " - "It will be opened with no decoding. Filling values might be wrongly parsed.") + logger.warning_once( + f"File {self.filename} could not be decoded properly by xarray (version {xr.__version__}). " + "It will be opened with no decoding. Filling values might be wrongly parsed." + ) self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine) - self.dataset['decoded'] = False + self.dataset["decoded"] = False for inds in self.indices.values(): if type(inds) not in [list, range]: - raise RuntimeError('Indices for field subsetting need to be a list') + raise RuntimeError("Indices for field subsetting need to be a list") return self def __exit__(self, type, value, traceback): @@ -73,13 +76,13 @@ class NetcdfFileBuffer(_FileBuffer): name = nm break if isinstance(name, list): - raise OSError('None of variables in list found in file') + raise OSError("None of variables in list found in file") return name @property def lonlat(self): - lon = self.dataset[self.dimensions['lon']] - lat = self.dataset[self.dimensions['lat']] + lon = self.dataset[self.dimensions["lon"]] + lat = self.dataset[self.dimensions["lat"]] if self.nolonlatindices: if len(lon.shape) < 3: lon_subset = np.array(lon) @@ -93,20 +96,20 @@ class NetcdfFileBuffer(_FileBuffer): else: xdim = lon.size if len(lon.shape) == 1 else lon.shape[-1] ydim = lat.size if len(lat.shape) == 1 else lat.shape[-2] - self.indices['lon'] = self.indices['lon'] if 'lon' in self.indices else range(xdim) - self.indices['lat'] = self.indices['lat'] if 'lat' in self.indices else range(ydim) + self.indices["lon"] = self.indices["lon"] if "lon" in self.indices else range(xdim) + self.indices["lat"] = self.indices["lat"] if "lat" in self.indices else range(ydim) if len(lon.shape) == 1: - lon_subset = np.array(lon[self.indices['lon']]) - lat_subset = np.array(lat[self.indices['lat']]) + lon_subset = np.array(lon[self.indices["lon"]]) + lat_subset = np.array(lat[self.indices["lat"]]) elif len(lon.shape) == 2: - lon_subset = np.array(lon[self.indices['lat'], self.indices['lon']]) - lat_subset = np.array(lat[self.indices['lat'], self.indices['lon']]) + lon_subset = np.array(lon[self.indices["lat"], self.indices["lon"]]) + lat_subset = np.array(lat[self.indices["lat"], self.indices["lon"]]) elif len(lon.shape) == 3: # some lon, lat have a time dimension 1 - lon_subset = np.array(lon[0, self.indices['lat'], self.indices['lon']]) - lat_subset = np.array(lat[0, self.indices['lat'], self.indices['lon']]) + lon_subset = np.array(lon[0, self.indices["lat"], self.indices["lon"]]) + lat_subset = np.array(lat[0, self.indices["lat"], self.indices["lon"]]) elif len(lon.shape) == 4: # some lon, lat have a time and depth dimension 1 - lon_subset = np.array(lon[0, 0, self.indices['lat'], self.indices['lon']]) - lat_subset = np.array(lat[0, 0, self.indices['lat'], self.indices['lon']]) + lon_subset = np.array(lon[0, 0, self.indices["lat"], self.indices["lon"]]) + lat_subset = np.array(lat[0, 0, self.indices["lat"], self.indices["lon"]]) if len(lon.shape) > 1: # Tests if lon, lat are rectilinear but were stored in arrays rectilinear = True @@ -127,77 +130,79 @@ class NetcdfFileBuffer(_FileBuffer): @property def depth(self): - if 'depth' in self.dimensions: - depth = self.dataset[self.dimensions['depth']] + if "depth" in self.dimensions: + depth = self.dataset[self.dimensions["depth"]] depthsize = depth.size if len(depth.shape) == 1 else depth.shape[-3] self.data_full_zdim = depthsize - self.indices['depth'] = self.indices['depth'] if 'depth' in self.indices else range(depthsize) + self.indices["depth"] = self.indices["depth"] if "depth" in self.indices else range(depthsize) if len(depth.shape) == 1: - return np.array(depth[self.indices['depth']]) + return np.array(depth[self.indices["depth"]]) elif len(depth.shape) == 3: if self.nolonlatindices: - return np.array(depth[self.indices['depth'], :, :]) + return np.array(depth[self.indices["depth"], :, :]) else: - return np.array(depth[self.indices['depth'], self.indices['lat'], self.indices['lon']]) + return np.array(depth[self.indices["depth"], self.indices["lat"], self.indices["lon"]]) elif len(depth.shape) == 4: if self.nolonlatindices: - return np.array(depth[:, self.indices['depth'], :, :]) + return np.array(depth[:, self.indices["depth"], :, :]) else: - return np.array(depth[:, self.indices['depth'], self.indices['lat'], self.indices['lon']]) + return np.array(depth[:, self.indices["depth"], self.indices["lat"], self.indices["lon"]]) else: - self.indices['depth'] = [0] + self.indices["depth"] = [0] return np.zeros(1) @property def depth_dimensions(self): - if 'depth' in self.dimensions: + if "depth" in self.dimensions: data = self.dataset[self.name] depthsize = data.shape[-3] self.data_full_zdim = depthsize - self.indices['depth'] = self.indices['depth'] if 'depth' in self.indices else range(depthsize) + self.indices["depth"] = self.indices["depth"] if "depth" in self.indices else range(depthsize) if self.nolonlatindices: - return np.empty((0, len(self.indices['depth'])) + data.shape[-2:]) + return np.empty((0, len(self.indices["depth"])) + data.shape[-2:]) else: - return np.empty((0, len(self.indices['depth']), len(self.indices['lat']), len(self.indices['lon']))) + return np.empty((0, len(self.indices["depth"]), len(self.indices["lat"]), len(self.indices["lon"]))) def _check_extend_depth(self, data, di): - return (self.indices['depth'][-1] == self.data_full_zdim-1 - and data.shape[di] == self.data_full_zdim-1 - and self.interp_method in ['bgrid_velocity', 'bgrid_w_velocity', 'bgrid_tracer']) + return ( + self.indices["depth"][-1] == self.data_full_zdim - 1 + and data.shape[di] == self.data_full_zdim - 1 + and self.interp_method in ["bgrid_velocity", "bgrid_w_velocity", "bgrid_tracer"] + ) def _apply_indices(self, data, ti): if len(data.shape) == 2: if self.nolonlatindices: pass else: - data = data[self.indices['lat'], self.indices['lon']] + data = data[self.indices["lat"], self.indices["lon"]] elif len(data.shape) == 3: if self._check_extend_depth(data, 0): if self.nolonlatindices: - data = data[self.indices['depth'][:-1], :, :] + data = data[self.indices["depth"][:-1], :, :] else: - data = data[self.indices['depth'][:-1], self.indices['lat'], self.indices['lon']] - elif len(self.indices['depth']) > 1: + data = data[self.indices["depth"][:-1], self.indices["lat"], self.indices["lon"]] + elif len(self.indices["depth"]) > 1: if self.nolonlatindices: - data = data[self.indices['depth'], :, :] + data = data[self.indices["depth"], :, :] else: - data = data[self.indices['depth'], self.indices['lat'], self.indices['lon']] + data = data[self.indices["depth"], self.indices["lat"], self.indices["lon"]] else: if self.nolonlatindices: data = data[ti, :, :] else: - data = data[ti, self.indices['lat'], self.indices['lon']] + data = data[ti, self.indices["lat"], self.indices["lon"]] else: if self._check_extend_depth(data, 1): if self.nolonlatindices: - data = data[ti, self.indices['depth'][:-1], :, :] + data = data[ti, self.indices["depth"][:-1], :, :] else: - data = data[ti, self.indices['depth'][:-1], self.indices['lat'], self.indices['lon']] + data = data[ti, self.indices["depth"][:-1], self.indices["lat"], self.indices["lon"]] else: if self.nolonlatindices: - data = data[ti, self.indices['depth'], :, :] + data = data[ti, self.indices["depth"], :, :] else: - data = data[ti, self.indices['depth'], self.indices['lat'], self.indices['lon']] + data = data[ti, self.indices["depth"], self.indices["lat"], self.indices["lon"]] return data @property @@ -218,14 +223,20 @@ class NetcdfFileBuffer(_FileBuffer): if self.timestamp is not None: return self.timestamp - if 'time' not in self.dimensions: + if "time" not in self.dimensions: return np.array([None]) - time_da = self.dataset[self.dimensions['time']] - convert_xarray_time_units(time_da, self.dimensions['time']) - time = np.array([time_da[self.dimensions['time']].data]) if len(time_da.shape) == 0 else np.array(time_da[self.dimensions['time']]) + time_da = self.dataset[self.dimensions["time"]] + convert_xarray_time_units(time_da, self.dimensions["time"]) + time = ( + np.array([time_da[self.dimensions["time"]].data]) + if len(time_da.shape) == 0 + else np.array(time_da[self.dimensions["time"]]) + ) if isinstance(time[0], datetime.datetime): - raise NotImplementedError('Parcels currently only parses dates ranging from 1678 AD to 2262 AD, which are stored by xarray as np.datetime64. If you need a wider date range, please open an Issue on the parcels github page.') + raise NotImplementedError( + "Parcels currently only parses dates ranging from 1678 AD to 2262 AD, which are stored by xarray as np.datetime64. If you need a wider date range, please open an Issue on the parcels github page." + ) return time @@ -235,15 +246,39 @@ class DeferredNetcdfFileBuffer(NetcdfFileBuffer): class DaskFileBuffer(NetcdfFileBuffer): - _static_name_maps = {'time': ['time', 'time_count', 'time_counter', 'timer_count', 't'], - 'depth': ['depth', 'depthu', 'depthv', 'depthw', 'depths', 'deptht', 'depthx', 'depthy', - 'depthz', 'z', 'z_u', 'z_v', 'z_w', 'd', 'k', 'w_dep', 'w_deps', 'Z', 'Zp1', - 'Zl', 'Zu', 'level'], - 'lat': ['lat', 'nav_lat', 'y', 'latitude', 'la', 'lt', 'j', 'YC', 'YG'], - 'lon': ['lon', 'nav_lon', 'x', 'longitude', 'lo', 'ln', 'i', 'XC', 'XG']} + _static_name_maps = { + "time": ["time", "time_count", "time_counter", "timer_count", "t"], + "depth": [ + "depth", + "depthu", + "depthv", + "depthw", + "depths", + "deptht", + "depthx", + "depthy", + "depthz", + "z", + "z_u", + "z_v", + "z_w", + "d", + "k", + "w_dep", + "w_deps", + "Z", + "Zp1", + "Zl", + "Zu", + "level", + ], + "lat": ["lat", "nav_lat", "y", "latitude", "la", "lt", "j", "YC", "YG"], + "lon": ["lon", "nav_lon", "x", "longitude", "lo", "ln", "i", "XC", "XG"], + } _min_dim_chunksize = 16 """ Class that encapsulates and manages deferred access to file data. """ + def __init__(self, *args, **kwargs): """ Initializes this specific filebuffer type. As a result of using dask, the internal library is set to 'da'. @@ -251,10 +286,10 @@ class DaskFileBuffer(NetcdfFileBuffer): rechunk callback function. Also chunking-related variables are initialized. """ self.lib = da - self.chunksize = kwargs.pop('chunksize', 'auto') - self.lock_file = kwargs.pop('lock_file', True) + self.chunksize = kwargs.pop("chunksize", "auto") + self.lock_file = kwargs.pop("lock_file", True) self.chunk_mapping = None - self.rechunk_callback_fields = kwargs.pop('rechunk_callback_fields', None) + self.rechunk_callback_fields = kwargs.pop("rechunk_callback_fields", None) self.chunking_finalized = False self.autochunkingfailed = False super().__init__(*args, **kwargs) @@ -269,8 +304,10 @@ class DaskFileBuffer(NetcdfFileBuffer): where - due to the chunking, the file is 'locked', meaning that it cannot be simultaneously accessed by another process. This is significant in a cluster setup. """ - if self.chunksize not in [False, None, 'auto'] and type(self.chunksize) is not dict: - raise AttributeError("'chunksize' is of wrong type. Parameter is expected to be a dict per data dimension, or be False, None or 'auto'.") + if self.chunksize not in [False, None, "auto"] and type(self.chunksize) is not dict: + raise AttributeError( + "'chunksize' is of wrong type. Parameter is expected to be a dict per data dimension, or be False, None or 'auto'." + ) if isinstance(self.chunksize, list): self.chunksize = tuple(self.chunksize) @@ -282,21 +319,31 @@ class DaskFileBuffer(NetcdfFileBuffer): # (which we would rather want to have being auto-managed). # If 'lock' is not specified, the Lock-object is auto-created and managed by xarray internally. if self.lock_file: - self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict) + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict + ) else: - self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False) - self.dataset['decoded'] = True + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False + ) + self.dataset["decoded"] = True except: - logger.warning_once(f"File {self.filename} could not be decoded properly by xarray (version {xr.__version__}). It will be opened with no decoding. Filling values might be wrongly parsed.") + logger.warning_once( + f"File {self.filename} could not be decoded properly by xarray (version {xr.__version__}). It will be opened with no decoding. Filling values might be wrongly parsed." + ) if self.lock_file: - self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks=init_chunk_dict) + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks=init_chunk_dict + ) else: - self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False) - self.dataset['decoded'] = False + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False + ) + self.dataset["decoded"] = False for inds in self.indices.values(): if type(inds) not in [list, range]: - raise RuntimeError('Indices for field subsetting need to be a list') + raise RuntimeError("Indices for field subsetting need to be a list") return self def __exit__(self, type, value, traceback): @@ -368,18 +415,29 @@ class DaskFileBuffer(NetcdfFileBuffer): result = {} neg_offset = 0 tpl_offset = 0 - for name in ['time', 'depth', 'lat', 'lon']: + for name in ["time", "depth", "lat", "lon"]: i = list(self._static_name_maps.keys()).index(name) - if (name not in self.dimensions): + if name not in self.dimensions: result[name] = None tpl_offset += 1 neg_offset += 1 - elif ((type(self.chunksize) is dict) and (name not in self.chunksize or (type(self.chunksize[name]) is tuple and len(self.chunksize[name]) == 2 and self.chunksize[name][1] <= 1))) or \ - ((type(self.chunksize) is tuple) and name in self.dimensions and (self.chunksize[i-tpl_offset] <= 1)): + elif ( + (type(self.chunksize) is dict) + and ( + name not in self.chunksize + or ( + type(self.chunksize[name]) is tuple + and len(self.chunksize[name]) == 2 + and self.chunksize[name][1] <= 1 + ) + ) + ) or ( + (type(self.chunksize) is tuple) and name in self.dimensions and (self.chunksize[i - tpl_offset] <= 1) + ): result[name] = None neg_offset += 1 else: - result[name] = i-neg_offset + result[name] = i - neg_offset return result def _get_available_dims_indices_by_namemap(self): @@ -389,7 +447,7 @@ class DaskFileBuffer(NetcdfFileBuffer): Example: {'time': 0, 'depth': 1, 'lat': 2, 'lon': 3} """ result = {} - for name in ['time', 'depth', 'lat', 'lon']: + for name in ["time", "depth", "lat", "lon"]: result[name] = list(self._static_name_maps.keys()).index(name) return result @@ -410,7 +468,7 @@ class DaskFileBuffer(NetcdfFileBuffer): if self.dataset is None: raise OSError("Trying to parse NetCDF header information before opening the file.") result = {} - for pcls_dimname in ['time', 'depth', 'lat', 'lon']: + for pcls_dimname in ["time", "depth", "lat", "lon"]: for nc_dimname in self._static_name_maps[pcls_dimname]: if nc_dimname not in self.dataset.sizes.keys(): continue @@ -433,10 +491,14 @@ class DaskFileBuffer(NetcdfFileBuffer): dimensions, the NetCDF file dataset, and is also required to be chunked according to the requested chunksize dictionary. If any of the two conditions is not met, if returns 'False'. """ - if self.dimensions is None or self.dataset is None or self.chunksize in [None, False, 'auto']: + if self.dimensions is None or self.dataset is None or self.chunksize in [None, False, "auto"]: return False dim_chunked = False - dim_chunked = True if (not dim_chunked and type(self.chunksize) is dict and dimension_name in self.chunksize.keys()) else False + dim_chunked = ( + True + if (not dim_chunked and type(self.chunksize) is dict and dimension_name in self.chunksize.keys()) + else False + ) dim_chunked = True if (not dim_chunked and type(self.chunksize) in [None, False]) else False return (dimension_name in self.dimensions) and dim_chunked @@ -450,7 +512,7 @@ class DaskFileBuffer(NetcdfFileBuffer): """ if self.dataset is None: raise OSError("Trying to parse NetCDF header information before opening the file.") - k, dname, dvalue = (-1, '', 0) + k, dname, dvalue = (-1, "", 0) dimension_name = parcels_dimension_name.lower() dim_indices = self._get_available_dims_indices_by_request() i = dim_indices[dimension_name] @@ -474,7 +536,7 @@ class DaskFileBuffer(NetcdfFileBuffer): of tuples of parcels dimensions and their chunk mapping (i.e. dict(parcels_dim_name => (netcdf_dim_name, chunksize)). It requires as input the name of the related parcels dimension (i.e. one of ['time', 'depth', 'lat', 'lon']. """ - k, dname, dvalue = (-1, '', 0) + k, dname, dvalue = (-1, "", 0) if self.dimensions is None or self.dataset is None: return k, dname, dvalue parcels_dimension_name = parcels_dimension_name.lower() @@ -500,32 +562,32 @@ class DaskFileBuffer(NetcdfFileBuffer): return self.chunksize = {} chunk_map = self.chunk_mapping - timei, timename, timevalue = self._is_dimension_in_dataset('time') - depthi, depthname, depthvalue = self._is_dimension_in_dataset('depth') - lati, latname, latvalue = self._is_dimension_in_dataset('lat') - loni, lonname, lonvalue = self._is_dimension_in_dataset('lon') + timei, timename, timevalue = self._is_dimension_in_dataset("time") + depthi, depthname, depthvalue = self._is_dimension_in_dataset("depth") + lati, latname, latvalue = self._is_dimension_in_dataset("lat") + loni, lonname, lonvalue = self._is_dimension_in_dataset("lon") if len(chunk_map) == 2: - self.chunksize['lon'] = (latname, chunk_map[0]) - self.chunksize['lat'] = (lonname, chunk_map[1]) + self.chunksize["lon"] = (latname, chunk_map[0]) + self.chunksize["lat"] = (lonname, chunk_map[1]) elif len(chunk_map) == 3: chunk_dim_index = 0 - if depthi is not None and depthi >= 0 and depthvalue > 1 and self._is_dimension_available('depth'): - self.chunksize['depth'] = (depthname, chunk_map[chunk_dim_index]) + if depthi is not None and depthi >= 0 and depthvalue > 1 and self._is_dimension_available("depth"): + self.chunksize["depth"] = (depthname, chunk_map[chunk_dim_index]) chunk_dim_index += 1 - elif timei is not None and timei >= 0 and timevalue > 1 and self._is_dimension_available('time'): - self.chunksize['time'] = (timename, chunk_map[chunk_dim_index]) + elif timei is not None and timei >= 0 and timevalue > 1 and self._is_dimension_available("time"): + self.chunksize["time"] = (timename, chunk_map[chunk_dim_index]) chunk_dim_index += 1 - self.chunksize['lat'] = (latname, chunk_map[chunk_dim_index]) + self.chunksize["lat"] = (latname, chunk_map[chunk_dim_index]) chunk_dim_index += 1 - self.chunksize['lon'] = (lonname, chunk_map[chunk_dim_index]) + self.chunksize["lon"] = (lonname, chunk_map[chunk_dim_index]) elif len(chunk_map) >= 4: - self.chunksize['time'] = (timename, chunk_map[0]) - self.chunksize['depth'] = (depthname, chunk_map[1]) - self.chunksize['lat'] = (latname, chunk_map[2]) - self.chunksize['lon'] = (lonname, chunk_map[3]) + self.chunksize["time"] = (timename, chunk_map[0]) + self.chunksize["depth"] = (depthname, chunk_map[1]) + self.chunksize["lat"] = (latname, chunk_map[2]) + self.chunksize["lon"] = (lonname, chunk_map[3]) dim_index = 4 for dim_name in self.dimensions: - if dim_name not in ['time', 'depth', 'lat', 'lon']: + if dim_name not in ["time", "depth", "lat", "lon"]: self.chunksize[dim_name] = (self.dimensions[dim_name], chunk_map[dim_index]) dim_index += 1 @@ -542,42 +604,50 @@ class DaskFileBuffer(NetcdfFileBuffer): chunk_dict = {} chunk_index_map = {} neg_offset = 0 - if 'time' in self.chunksize.keys(): - timei, timename, timesize = self._is_dimension_in_dataset(parcels_dimension_name='time', netcdf_dimension_name=self.chunksize['time'][0]) - timevalue = self.chunksize['time'][1] + if "time" in self.chunksize.keys(): + timei, timename, timesize = self._is_dimension_in_dataset( + parcels_dimension_name="time", netcdf_dimension_name=self.chunksize["time"][0] + ) + timevalue = self.chunksize["time"][1] if timei is not None and timei >= 0 and timevalue > 1: timevalue = min(timesize, timevalue) chunk_dict[timename] = timevalue - chunk_index_map[timei-neg_offset] = timevalue + chunk_index_map[timei - neg_offset] = timevalue else: - self.chunksize.pop('time') - if 'depth' in self.chunksize.keys(): - depthi, depthname, depthsize = self._is_dimension_in_dataset(parcels_dimension_name='depth', netcdf_dimension_name=self.chunksize['depth'][0]) - depthvalue = self.chunksize['depth'][1] + self.chunksize.pop("time") + if "depth" in self.chunksize.keys(): + depthi, depthname, depthsize = self._is_dimension_in_dataset( + parcels_dimension_name="depth", netcdf_dimension_name=self.chunksize["depth"][0] + ) + depthvalue = self.chunksize["depth"][1] if depthi is not None and depthi >= 0 and depthvalue > 1: depthvalue = min(depthsize, depthvalue) chunk_dict[depthname] = depthvalue - chunk_index_map[depthi-neg_offset] = depthvalue + chunk_index_map[depthi - neg_offset] = depthvalue else: - self.chunksize.pop('depth') - if 'lat' in self.chunksize.keys(): - lati, latname, latsize = self._is_dimension_in_dataset(parcels_dimension_name='lat', netcdf_dimension_name=self.chunksize['lat'][0]) - latvalue = self.chunksize['lat'][1] + self.chunksize.pop("depth") + if "lat" in self.chunksize.keys(): + lati, latname, latsize = self._is_dimension_in_dataset( + parcels_dimension_name="lat", netcdf_dimension_name=self.chunksize["lat"][0] + ) + latvalue = self.chunksize["lat"][1] if lati is not None and lati >= 0 and latvalue > 1: latvalue = min(latsize, latvalue) chunk_dict[latname] = latvalue - chunk_index_map[lati-neg_offset] = latvalue + chunk_index_map[lati - neg_offset] = latvalue else: - self.chunksize.pop('lat') - if 'lon' in self.chunksize.keys(): - loni, lonname, lonsize = self._is_dimension_in_dataset(parcels_dimension_name='lon', netcdf_dimension_name=self.chunksize['lon'][0]) - lonvalue = self.chunksize['lon'][1] + self.chunksize.pop("lat") + if "lon" in self.chunksize.keys(): + loni, lonname, lonsize = self._is_dimension_in_dataset( + parcels_dimension_name="lon", netcdf_dimension_name=self.chunksize["lon"][0] + ) + lonvalue = self.chunksize["lon"][1] if loni is not None and loni >= 0 and lonvalue > 1: lonvalue = min(lonsize, lonvalue) chunk_dict[lonname] = lonvalue - chunk_index_map[loni-neg_offset] = lonvalue + chunk_index_map[loni - neg_offset] = lonvalue else: - self.chunksize.pop('lon') + self.chunksize.pop("lon") return chunk_dict, chunk_index_map def _failsafe_parse_(self): @@ -617,7 +687,12 @@ class DaskFileBuffer(NetcdfFileBuffer): nc_dimsize = self.dataset.dimensions[nc_dname].size if pcls_dname in self.chunksize.keys(): pcls_dim_chunksize = self.chunksize[pcls_dname][1] - if pcls_dname is not None and nc_dname is not None and nc_dimsize is not None and pcls_dim_chunksize is not None: + if ( + pcls_dname is not None + and nc_dname is not None + and nc_dimsize is not None + and pcls_dim_chunksize is not None + ): init_chunk_dict[nc_dname] = pcls_dim_chunksize # ==== because in this case it has shown that the requested chunksize setup cannot be used, ==== # @@ -638,38 +713,42 @@ class DaskFileBuffer(NetcdfFileBuffer): """ # ==== check-opening requested dataset to access metadata ==== # # ==== file-opening and dimension-reading does not require a decode or lock ==== # - self.dataset = xr.open_dataset(str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks={}, lock=False) - self.dataset['decoded'] = False + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=False, engine=self.netcdf_engine, chunks={}, lock=False + ) + self.dataset["decoded"] = False # ==== self.dataset temporarily available ==== # init_chunk_dict = {} init_chunk_map = {} if isinstance(self.chunksize, dict): init_chunk_dict, init_chunk_map = self._get_initial_chunk_dictionary_by_dict_() - elif self.chunksize == 'auto': + elif self.chunksize == "auto": av_mem = psutil.virtual_memory().available - chunk_cap = av_mem * (1/8) * (1/3) - if 'array.chunk-size' in da_conf.config.keys(): - chunk_cap = da_utils.parse_bytes(da_conf.config.get('array.chunk-size')) + chunk_cap = av_mem * (1 / 8) * (1 / 3) + if "array.chunk-size" in da_conf.config.keys(): + chunk_cap = da_utils.parse_bytes(da_conf.config.get("array.chunk-size")) else: - predefined_cap = da_conf.get('array.chunk-size') + predefined_cap = da_conf.get("array.chunk-size") if predefined_cap is not None: chunk_cap = da_utils.parse_bytes(predefined_cap) else: - logger.info_once("Unable to locate chunking hints from dask, thus estimating the max. chunk size heuristically." - "Please consider defining the 'chunk-size' for 'array' in your local dask configuration file (see https://docs.oceanparcels.org/en/latest/examples/documentation_MPI.html#Chunking-the-FieldSet-with-dask and https://docs.dask.org).") - loni, lonname, lonvalue = self._is_dimension_in_dataset('lon') - lati, latname, latvalue = self._is_dimension_in_dataset('lat') + logger.info_once( + "Unable to locate chunking hints from dask, thus estimating the max. chunk size heuristically." + "Please consider defining the 'chunk-size' for 'array' in your local dask configuration file (see https://docs.oceanparcels.org/en/latest/examples/documentation_MPI.html#Chunking-the-FieldSet-with-dask and https://docs.dask.org)." + ) + loni, lonname, lonvalue = self._is_dimension_in_dataset("lon") + lati, latname, latvalue = self._is_dimension_in_dataset("lat") if lati is not None and loni is not None and lati >= 0 and loni >= 0: - pDim = int(math.floor(math.sqrt(chunk_cap/np.dtype(np.float64).itemsize))) + pDim = int(math.floor(math.sqrt(chunk_cap / np.dtype(np.float64).itemsize))) init_chunk_dict[latname] = min(latvalue, pDim) init_chunk_map[lati] = min(latvalue, pDim) init_chunk_dict[lonname] = min(lonvalue, pDim) init_chunk_map[loni] = min(lonvalue, pDim) - timei, timename, timevalue = self._is_dimension_in_dataset('time') + timei, timename, timevalue = self._is_dimension_in_dataset("time") if timei is not None and timei >= 0: init_chunk_dict[timename] = min(1, timevalue) init_chunk_map[timei] = min(1, timevalue) - depthi, depthname, depthvalue = self._is_dimension_in_dataset('depth') + depthi, depthname, depthvalue = self._is_dimension_in_dataset("depth") if depthi is not None and depthi >= 0: init_chunk_dict[depthname] = max(1, depthvalue) init_chunk_map[depthi] = max(1, depthvalue) @@ -677,17 +756,24 @@ class DaskFileBuffer(NetcdfFileBuffer): self.dataset.close() # ==== check if the chunksize reading is successful. if not, load the file ONCE really into memory and ==== # # ==== deduce the chunking from the array dims. ==== # - if len(init_chunk_dict) == 0 and self.chunksize not in [False, None, 'auto']: + if len(init_chunk_dict) == 0 and self.chunksize not in [False, None, "auto"]: self.autochunkingfailed = True - raise DaskChunkingError(self.__class__.__name__, "No correct mapping found between Parcels- and NetCDF dimensions! Please correct the 'FieldSet(..., chunksize={...})' parameter and try again.") + raise DaskChunkingError( + self.__class__.__name__, + "No correct mapping found between Parcels- and NetCDF dimensions! Please correct the 'FieldSet(..., chunksize={...})' parameter and try again.", + ) else: self.autochunkingfailed = False try: - self.dataset = xr.open_dataset(str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False) + self.dataset = xr.open_dataset( + str(self.filename), decode_cf=True, engine=self.netcdf_engine, chunks=init_chunk_dict, lock=False + ) if isinstance(self.chunksize, dict): self.chunksize = init_chunk_dict except: - logger.warning(f"Chunking with init_chunk_dict = {init_chunk_dict} failed - Executing Dask chunking 'failsafe'...") + logger.warning( + f"Chunking with init_chunk_dict = {init_chunk_dict} failed - Executing Dask chunking 'failsafe'..." + ) self.autochunkingfailed = True if not self.autochunkingfailed: init_chunk_dict = self._failsafe_parse_() @@ -714,7 +800,7 @@ class DaskFileBuffer(NetcdfFileBuffer): if isinstance(data, da.core.Array): if not self.chunking_finalized: - if self.chunksize == 'auto': + if self.chunksize == "auto": # ==== as the chunksize is not initiated, the data is chunked automatically by Dask. ==== # # ==== the resulting chunk dictionary is stored, to be re-used later. This prevents ==== # # ==== the expensive re-calculation and PHYSICAL FILE RECHUNKING on each data access. ==== # @@ -734,7 +820,7 @@ class DaskFileBuffer(NetcdfFileBuffer): self.chunking_finalized = True else: da_data = da.from_array(data, chunks=self.chunksize) - if self.chunksize == 'auto' and da_data.shape[-2:] == da_data.chunksize[-2:]: + if self.chunksize == "auto" and da_data.shape[-2:] == da_data.chunksize[-2:]: data = np.array(data) else: data = da_data diff --git a/parcels/fieldset.py b/parcels/fieldset.py index 910f9415..03c992ec 100644 --- a/parcels/fieldset.py +++ b/parcels/fieldset.py @@ -20,7 +20,7 @@ except ModuleNotFoundError: MPI = None -__all__ = ['FieldSet'] +__all__ = ["FieldSet"] class FieldSet: @@ -42,10 +42,10 @@ class FieldSet: self.completed = False self.particlefile = None if U: - self.add_field(U, 'U') + self.add_field(U, "U") self.time_origin = self.U.grid.time_origin if isinstance(self.U, Field) else self.U[0].grid.time_origin if V: - self.add_field(V, 'V') + self.add_field(V, "V") # Add additional fields as attributes if fields: @@ -58,12 +58,20 @@ class FieldSet: @staticmethod def checkvaliddimensionsdict(dims): for d in dims: - if d not in ['lon', 'lat', 'depth', 'time']: - raise NameError(f'{d} is not a valid key in the dimensions dictionary') + if d not in ["lon", "lat", "depth", "time"]: + raise NameError(f"{d} is not a valid key in the dimensions dictionary") @classmethod - def from_data(cls, data, dimensions, transpose=False, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, **kwargs): + def from_data( + cls, + data, + dimensions, + transpose=False, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + **kwargs, + ): """Initialise FieldSet object from raw data. Parameters @@ -122,26 +130,33 @@ class FieldSet: cls.checkvaliddimensionsdict(dims) if allow_time_extrapolation is None: - allow_time_extrapolation = False if 'time' in dims else True + allow_time_extrapolation = False if "time" in dims else True - lon = dims['lon'] - lat = dims['lat'] - depth = np.zeros(1, dtype=np.float32) if 'depth' not in dims else dims['depth'] - time = np.zeros(1, dtype=np.float64) if 'time' not in dims else dims['time'] + lon = dims["lon"] + lat = dims["lat"] + depth = np.zeros(1, dtype=np.float32) if "depth" not in dims else dims["depth"] + time = np.zeros(1, dtype=np.float64) if "time" not in dims else dims["time"] time = np.array(time) if not isinstance(time, np.ndarray) else time if isinstance(time[0], np.datetime64): time_origin = TimeConverter(time[0]) time = np.array([time_origin.reltime(t) for t in time]) else: - time_origin = kwargs.pop('time_origin', TimeConverter(0)) + time_origin = kwargs.pop("time_origin", TimeConverter(0)) grid = Grid.create_grid(lon, lat, depth, time, time_origin=time_origin, mesh=mesh) - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_data' - - fields[name] = Field(name, datafld, grid=grid, transpose=transpose, - allow_time_extrapolation=allow_time_extrapolation, time_periodic=time_periodic, **kwargs) - u = fields.pop('U', None) - v = fields.pop('V', None) + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_data" + + fields[name] = Field( + name, + datafld, + grid=grid, + transpose=transpose, + allow_time_extrapolation=allow_time_extrapolation, + time_periodic=time_periodic, + **kwargs, + ) + u = fields.pop("U", None) + v = fields.pop("V", None) return cls(u, v, fields=fields) def add_field(self, field, name=None): @@ -165,7 +180,9 @@ class FieldSet: """ if self.completed: - raise RuntimeError("FieldSet has already been completed. Are you trying to add a Field after you've created the ParticleSet?") + raise RuntimeError( + "FieldSet has already been completed. Are you trying to add a Field after you've created the ParticleSet?" + ) name = field.name if name is None else name if hasattr(self, name): # check if Field with same name already exists when adding new Field raise RuntimeError(f"FieldSet already has a Field with name '{name}'") @@ -179,7 +196,7 @@ class FieldSet: self.gridset.add_grid(field) field.fieldset = self - def add_constant_field(self, name, value, mesh='flat'): + def add_constant_field(self, name, value, mesh="flat"): """Wrapper function to add a Field that is constant in space, useful e.g. when using constant horizontal diffusivity @@ -217,58 +234,65 @@ class FieldSet: f.fieldset = self def add_UVfield(self): - if not hasattr(self, 'UV') and hasattr(self, 'U') and hasattr(self, 'V'): + if not hasattr(self, "UV") and hasattr(self, "U") and hasattr(self, "V"): if isinstance(self.U, NestedField): - self.add_vector_field(NestedField('UV', self.U, self.V)) + self.add_vector_field(NestedField("UV", self.U, self.V)) else: - self.add_vector_field(VectorField('UV', self.U, self.V)) - if not hasattr(self, 'UVW') and hasattr(self, 'W'): + self.add_vector_field(VectorField("UV", self.U, self.V)) + if not hasattr(self, "UVW") and hasattr(self, "W"): if isinstance(self.U, NestedField): - self.add_vector_field(NestedField('UVW', self.U, self.V, self.W)) + self.add_vector_field(NestedField("UVW", self.U, self.V, self.W)) else: - self.add_vector_field(VectorField('UVW', self.U, self.V, self.W)) + self.add_vector_field(VectorField("UVW", self.U, self.V, self.W)) def check_complete(self): assert self.U, 'FieldSet does not have a Field named "U"' assert self.V, 'FieldSet does not have a Field named "V"' for attr, value in vars(self).items(): if type(value) is Field: - assert value.name == attr, f'Field {value.name}.name ({attr}) is not consistent' + assert value.name == attr, f"Field {value.name}.name ({attr}) is not consistent" def check_velocityfields(U, V, W): - if (U.interp_method == 'cgrid_velocity' and V.interp_method != 'cgrid_velocity') or \ - (U.interp_method != 'cgrid_velocity' and V.interp_method == 'cgrid_velocity'): + if (U.interp_method == "cgrid_velocity" and V.interp_method != "cgrid_velocity") or ( + U.interp_method != "cgrid_velocity" and V.interp_method == "cgrid_velocity" + ): raise ValueError("If one of U,V.interp_method='cgrid_velocity', the other should be too") - if 'linear_invdist_land_tracer' in [U.interp_method, V.interp_method]: - raise NotImplementedError("interp_method='linear_invdist_land_tracer' is not implemented for U and V Fields") + if "linear_invdist_land_tracer" in [U.interp_method, V.interp_method]: + raise NotImplementedError( + "interp_method='linear_invdist_land_tracer' is not implemented for U and V Fields" + ) - if U.interp_method == 'cgrid_velocity': + if U.interp_method == "cgrid_velocity": if U.grid.xdim == 1 or U.grid.ydim == 1 or V.grid.xdim == 1 or V.grid.ydim == 1: - raise NotImplementedError('C-grid velocities require longitude and latitude dimensions at least length 2') + raise NotImplementedError( + "C-grid velocities require longitude and latitude dimensions at least length 2" + ) - if U.gridindexingtype not in ['nemo', 'mitgcm', 'mom5', 'pop']: + if U.gridindexingtype not in ["nemo", "mitgcm", "mom5", "pop"]: raise ValueError("Field.gridindexing has to be one of 'nemo', 'mitgcm', 'mom5' or 'pop'") if V.gridindexingtype != U.gridindexingtype or (W and W.gridindexingtype != U.gridindexingtype): - raise ValueError('Not all velocity Fields have the same gridindexingtype') + raise ValueError("Not all velocity Fields have the same gridindexingtype") if U.cast_data_dtype != V.cast_data_dtype or (W and W.cast_data_dtype != U.cast_data_dtype): - raise ValueError('Not all velocity Fields have the same dtype') + raise ValueError("Not all velocity Fields have the same dtype") if isinstance(self.U, NestedField): - w = self.W if hasattr(self, 'W') else [None]*len(self.U) + w = self.W if hasattr(self, "W") else [None] * len(self.U) for U, V, W in zip(self.U, self.V, w): check_velocityfields(U, V, W) else: - W = self.W if hasattr(self, 'W') else None + W = self.W if hasattr(self, "W") else None check_velocityfields(self.U, self.V, W) for g in self.gridset.grids: g.check_zonal_periodic() if len(g.time) == 1: continue - assert isinstance(g.time_origin.time_origin, type(self.time_origin.time_origin)), 'time origins of different grids must be have the same type' + assert isinstance( + g.time_origin.time_origin, type(self.time_origin.time_origin) + ), "time origins of different grids must be have the same type" g.time = g.time + self.time_origin.reltime(g.time_origin) if g.defer_load: g.time_full = g.time_full + self.time_origin.reltime(g.time_origin) @@ -289,8 +313,10 @@ class FieldSet: if isinstance(f, (VectorField, NestedField)) or f.dataFiles is None: continue if f.grid.depth_field is not None: - if f.grid.depth_field == 'not_yet_set': - raise ValueError("If depth dimension is set at 'not_yet_set', it must be added later using Field.set_depth_from_field(field)") + if f.grid.depth_field == "not_yet_set": + raise ValueError( + "If depth dimension is set at 'not_yet_set', it must be added later using Field.set_depth_from_field(field)" + ) if not f.grid.defer_load: depth_data = f.grid.depth_field.data f.grid.depth = depth_data if isinstance(depth_data, np.ndarray) else np.array(depth_data) @@ -309,9 +335,21 @@ class FieldSet: return paths @classmethod - def from_netcdf(cls, filenames, variables, dimensions, indices=None, fieldtype=None, - mesh='spherical', timestamps=None, allow_time_extrapolation=None, time_periodic=False, - deferred_load=True, chunksize=None, **kwargs): + def from_netcdf( + cls, + filenames, + variables, + dimensions, + indices=None, + fieldtype=None, + mesh="spherical", + timestamps=None, + allow_time_extrapolation=None, + time_periodic=False, + deferred_load=True, + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files. Parameters @@ -396,13 +434,13 @@ class FieldSet: """ # Ensure that times are not provided both in netcdf file and in 'timestamps'. - if timestamps is not None and 'time' in dimensions: + if timestamps is not None and "time" in dimensions: logger.warning_once("Time already provided, defaulting to dimensions['time'] over timestamps.") timestamps = None fields = {} - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_netcdf' + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_netcdf" for var, name in variables.items(): # Resolve all matching paths for the current variable paths = filenames[var] if type(filenames) is dict and var in filenames else filenames @@ -417,7 +455,9 @@ class FieldSet: cls.checkvaliddimensionsdict(dims) inds = indices[var] if (indices and var in indices) else indices fieldtype = fieldtype[var] if (fieldtype and var in fieldtype) else fieldtype - varchunksize = chunksize[var] if (chunksize and var in chunksize) else chunksize # -> {: (, ) } + varchunksize = ( + chunksize[var] if (chunksize and var in chunksize) else chunksize + ) # -> {: (, ) } grid = None dFiles = None @@ -436,16 +476,16 @@ class FieldSet: possibly_samegrid &= False if not possibly_samegrid: break - if varchunksize == 'auto': + if varchunksize == "auto": break - if 'depth' in dims and dims['depth'] == 'not_yet_set': + if "depth" in dims and dims["depth"] == "not_yet_set": break processedGrid = False - if ((not isinstance(filenames, dict)) or filenames[procvar] == filenames[var]): + if (not isinstance(filenames, dict)) or filenames[procvar] == filenames[var]: processedGrid = True elif isinstance(filenames[procvar], dict): processedGrid = True - for dim in ['lon', 'lat', 'depth']: + for dim in ["lon", "lat", "depth"]: if dim in dimensions: processedGrid *= filenames[procvar][dim] == filenames[var][dim] if processedGrid: @@ -453,19 +493,41 @@ class FieldSet: if procpaths == nowpaths: dFiles = fields[procvar].dataFiles break - fields[var] = Field.from_netcdf(paths, (var, name), dims, inds, grid=grid, mesh=mesh, timestamps=timestamps, - allow_time_extrapolation=allow_time_extrapolation, - time_periodic=time_periodic, deferred_load=deferred_load, - fieldtype=fieldtype, chunksize=varchunksize, dataFiles=dFiles, **kwargs) - - u = fields.pop('U', None) - v = fields.pop('V', None) + fields[var] = Field.from_netcdf( + paths, + (var, name), + dims, + inds, + grid=grid, + mesh=mesh, + timestamps=timestamps, + allow_time_extrapolation=allow_time_extrapolation, + time_periodic=time_periodic, + deferred_load=deferred_load, + fieldtype=fieldtype, + chunksize=varchunksize, + dataFiles=dFiles, + **kwargs, + ) + + u = fields.pop("U", None) + v = fields.pop("V", None) return cls(u, v, fields=fields) @classmethod - def from_nemo(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='cgrid_tracer', chunksize=None, **kwargs): + def from_nemo( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="cgrid_tracer", + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields. See `here <../examples/tutorial_nemo_curvilinear.ipynb>`__ @@ -540,21 +602,43 @@ class FieldSet: Keyword arguments passed to the :func:`Fieldset.from_c_grid_dataset` constructor. """ - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_nemo' - if kwargs.pop('gridindexingtype', 'nemo') != 'nemo': - raise ValueError("gridindexingtype must be 'nemo' in FieldSet.from_nemo(). Use FieldSet.from_c_grid_dataset otherwise") - fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method, - chunksize=chunksize, gridindexingtype='nemo', **kwargs) - if hasattr(fieldset, 'W'): - fieldset.W.set_scaling_factor(-1.) + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_nemo" + if kwargs.pop("gridindexingtype", "nemo") != "nemo": + raise ValueError( + "gridindexingtype must be 'nemo' in FieldSet.from_nemo(). Use FieldSet.from_c_grid_dataset otherwise" + ) + fieldset = cls.from_c_grid_dataset( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + tracer_interp_method=tracer_interp_method, + chunksize=chunksize, + gridindexingtype="nemo", + **kwargs, + ) + if hasattr(fieldset, "W"): + fieldset.W.set_scaling_factor(-1.0) return fieldset @classmethod - def from_mitgcm(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='cgrid_tracer', chunksize=None, **kwargs): + def from_mitgcm( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="cgrid_tracer", + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files of MITgcm fields. All parameters and keywords are exactly the same as for FieldSet.from_nemo(), except that gridindexing is set to 'mitgcm' for grids that have the shape:: @@ -570,19 +654,42 @@ class FieldSet: For indexing details: https://mitgcm.readthedocs.io/en/latest/algorithm/algorithm.html#spatial-discretization-of-the-dynamical-equations Note that vertical velocity (W) is assumed positive in the positive z direction (which is upward in MITgcm) """ - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_mitgcm' - if kwargs.pop('gridindexingtype', 'mitgcm') != 'mitgcm': - raise ValueError("gridindexingtype must be 'mitgcm' in FieldSet.from_mitgcm(). Use FieldSet.from_c_grid_dataset otherwise") - fieldset = cls.from_c_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method, - chunksize=chunksize, gridindexingtype='mitgcm', **kwargs) + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_mitgcm" + if kwargs.pop("gridindexingtype", "mitgcm") != "mitgcm": + raise ValueError( + "gridindexingtype must be 'mitgcm' in FieldSet.from_mitgcm(). Use FieldSet.from_c_grid_dataset otherwise" + ) + fieldset = cls.from_c_grid_dataset( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + tracer_interp_method=tracer_interp_method, + chunksize=chunksize, + gridindexingtype="mitgcm", + **kwargs, + ) return fieldset @classmethod - def from_c_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='cgrid_tracer', gridindexingtype='nemo', chunksize=None, **kwargs): + def from_c_grid_dataset( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="cgrid_tracer", + gridindexingtype="nemo", + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files of Curvilinear NEMO fields. See `here <../examples/documentation_indexing.ipynb>`__ @@ -654,32 +761,57 @@ class FieldSet: **kwargs : Keyword arguments passed to the :func:`Fieldset.from_netcdf` constructor. """ - if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']: - raise ValueError("On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U and V. " - "See also ../examples/documentation_indexing.ipynb") - if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']: - raise ValueError("On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U, V and W. " - "See also ../examples/documentation_indexing.ipynb") - if 'interp_method' in kwargs.keys(): + if "U" in dimensions and "V" in dimensions and dimensions["U"] != dimensions["V"]: + raise ValueError( + "On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U and V. " + "See also ../examples/documentation_indexing.ipynb" + ) + if "U" in dimensions and "W" in dimensions and dimensions["U"] != dimensions["W"]: + raise ValueError( + "On a C-grid, the dimensions of velocities should be the corners (f-points) of the cells, so the same for U, V and W. " + "See also ../examples/documentation_indexing.ipynb" + ) + if "interp_method" in kwargs.keys(): raise TypeError("On a C-grid, the interpolation method for velocities should not be overridden") interp_method = {} for v in variables: - if v in ['U', 'V', 'W']: - interp_method[v] = 'cgrid_velocity' + if v in ["U", "V", "W"]: + interp_method[v] = "cgrid_velocity" else: interp_method[v] = tracer_interp_method - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_c_grid_dataset' - - return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method, - chunksize=chunksize, gridindexingtype=gridindexingtype, **kwargs) + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_c_grid_dataset" + + return cls.from_netcdf( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + interp_method=interp_method, + chunksize=chunksize, + gridindexingtype=gridindexingtype, + **kwargs, + ) @classmethod - def from_pop(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='bgrid_tracer', chunksize=None, depth_units='m', **kwargs): + def from_pop( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="bgrid_tracer", + chunksize=None, + depth_units="m", + **kwargs, + ): """Initialises FieldSet object from NetCDF files of POP fields. It is assumed that the velocities in the POP fields is in cm/s. @@ -755,29 +887,51 @@ class FieldSet: Keyword arguments passed to the :func:`Fieldset.from_b_grid_dataset` constructor. """ - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_pop' - fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method, - chunksize=chunksize, gridindexingtype='pop', **kwargs) - if hasattr(fieldset, 'U'): + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_pop" + fieldset = cls.from_b_grid_dataset( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + tracer_interp_method=tracer_interp_method, + chunksize=chunksize, + gridindexingtype="pop", + **kwargs, + ) + if hasattr(fieldset, "U"): fieldset.U.set_scaling_factor(0.01) # cm/s to m/s - if hasattr(fieldset, 'V'): + if hasattr(fieldset, "V"): fieldset.V.set_scaling_factor(0.01) # cm/s to m/s - if hasattr(fieldset, 'W'): - if depth_units == 'm': + if hasattr(fieldset, "W"): + if depth_units == "m": fieldset.W.set_scaling_factor(-0.01) # cm/s to m/s and change the W direction - logger.warning_once("Parcels assumes depth in POP output to be in 'm'. Use depth_units='cm' if the output depth is in 'cm'.") - elif depth_units == 'cm': - fieldset.W.set_scaling_factor(-1.) # change the W direction but keep W in cm/s because depth is in cm + logger.warning_once( + "Parcels assumes depth in POP output to be in 'm'. Use depth_units='cm' if the output depth is in 'cm'." + ) + elif depth_units == "cm": + fieldset.W.set_scaling_factor(-1.0) # change the W direction but keep W in cm/s because depth is in cm else: raise SyntaxError("'depth_units' has to be 'm' or 'cm'") return fieldset @classmethod - def from_mom5(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='bgrid_tracer', chunksize=None, **kwargs): + def from_mom5( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="bgrid_tracer", + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files of MOM5 fields. Parameters @@ -846,12 +1000,22 @@ class FieldSet: **kwargs : Keyword arguments passed to the :func:`Fieldset.from_b_grid_dataset` constructor. """ - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_mom5' - fieldset = cls.from_b_grid_dataset(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, tracer_interp_method=tracer_interp_method, - chunksize=chunksize, gridindexingtype='mom5', **kwargs) - if hasattr(fieldset, 'W'): + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_mom5" + fieldset = cls.from_b_grid_dataset( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + tracer_interp_method=tracer_interp_method, + chunksize=chunksize, + gridindexingtype="mom5", + **kwargs, + ) + if hasattr(fieldset, "W"): fieldset.W.set_scaling_factor(-1) return fieldset @@ -879,9 +1043,19 @@ class FieldSet: return cls.from_netcdf(filenames, variables, dimensions, **kwargs) @classmethod - def from_b_grid_dataset(cls, filenames, variables, dimensions, indices=None, mesh='spherical', - allow_time_extrapolation=None, time_periodic=False, - tracer_interp_method='bgrid_tracer', chunksize=None, **kwargs): + def from_b_grid_dataset( + cls, + filenames, + variables, + dimensions, + indices=None, + mesh="spherical", + allow_time_extrapolation=None, + time_periodic=False, + tracer_interp_method="bgrid_tracer", + chunksize=None, + **kwargs, + ): """Initialises FieldSet object from NetCDF files of Bgrid fields. Parameters @@ -949,32 +1123,55 @@ class FieldSet: **kwargs : Keyword arguments passed to the :func:`Fieldset.from_netcdf` constructor. """ - if 'U' in dimensions and 'V' in dimensions and dimensions['U'] != dimensions['V']: - raise ValueError("On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U and V. " - "See also ../examples/documentation_indexing.ipynb") - if 'U' in dimensions and 'W' in dimensions and dimensions['U'] != dimensions['W']: - raise ValueError("On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U, V and W. " - "See also ../examples/documentation_indexing.ipynb") + if "U" in dimensions and "V" in dimensions and dimensions["U"] != dimensions["V"]: + raise ValueError( + "On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U and V. " + "See also ../examples/documentation_indexing.ipynb" + ) + if "U" in dimensions and "W" in dimensions and dimensions["U"] != dimensions["W"]: + raise ValueError( + "On a B-grid, the dimensions of velocities should be the (top) corners of the grid cells, so the same for U, V and W. " + "See also ../examples/documentation_indexing.ipynb" + ) interp_method = {} for v in variables: - if v in ['U', 'V']: - interp_method[v] = 'bgrid_velocity' - elif v in ['W']: - interp_method[v] = 'bgrid_w_velocity' + if v in ["U", "V"]: + interp_method[v] = "bgrid_velocity" + elif v in ["W"]: + interp_method[v] = "bgrid_w_velocity" else: interp_method[v] = tracer_interp_method - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_b_grid_dataset' - - return cls.from_netcdf(filenames, variables, dimensions, mesh=mesh, indices=indices, time_periodic=time_periodic, - allow_time_extrapolation=allow_time_extrapolation, interp_method=interp_method, - chunksize=chunksize, **kwargs) + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_b_grid_dataset" + + return cls.from_netcdf( + filenames, + variables, + dimensions, + mesh=mesh, + indices=indices, + time_periodic=time_periodic, + allow_time_extrapolation=allow_time_extrapolation, + interp_method=interp_method, + chunksize=chunksize, + **kwargs, + ) @classmethod - def from_parcels(cls, basename, uvar='vozocrtx', vvar='vomecrty', indices=None, extra_fields=None, - allow_time_extrapolation=None, time_periodic=False, deferred_load=True, - chunksize=None, **kwargs): + def from_parcels( + cls, + basename, + uvar="vozocrtx", + vvar="vomecrty", + indices=None, + extra_fields=None, + allow_time_extrapolation=None, + time_periodic=False, + deferred_load=True, + chunksize=None, + **kwargs, + ): """Initialises FieldSet data from NetCDF files using the Parcels FieldSet.write() conventions. Parameters @@ -1015,25 +1212,32 @@ class FieldSet: """ if extra_fields is None: extra_fields = {} - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_parcels' + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_parcels" dimensions = {} - default_dims = {'lon': 'nav_lon', 'lat': 'nav_lat', - 'depth': 'depth', 'time': 'time_counter'} - extra_fields.update({'U': uvar, 'V': vvar}) + default_dims = {"lon": "nav_lon", "lat": "nav_lat", "depth": "depth", "time": "time_counter"} + extra_fields.update({"U": uvar, "V": vvar}) for vars in extra_fields: dimensions[vars] = deepcopy(default_dims) - dimensions[vars]['depth'] = 'depth%s' % vars.lower() + dimensions[vars]["depth"] = "depth%s" % vars.lower() filenames = {v: str(f"{basename}{v}.nc") for v in extra_fields.keys()} - return cls.from_netcdf(filenames, indices=indices, variables=extra_fields, - dimensions=dimensions, allow_time_extrapolation=allow_time_extrapolation, - time_periodic=time_periodic, deferred_load=deferred_load, - chunksize=chunksize, **kwargs) + return cls.from_netcdf( + filenames, + indices=indices, + variables=extra_fields, + dimensions=dimensions, + allow_time_extrapolation=allow_time_extrapolation, + time_periodic=time_periodic, + deferred_load=deferred_load, + chunksize=chunksize, + **kwargs, + ) @classmethod - def from_xarray_dataset(cls, ds, variables, dimensions, mesh='spherical', allow_time_extrapolation=None, - time_periodic=False, **kwargs): + def from_xarray_dataset( + cls, ds, variables, dimensions, mesh="spherical", allow_time_extrapolation=None, time_periodic=False, **kwargs + ): """Initialises FieldSet data from xarray Datasets. Parameters @@ -1070,21 +1274,28 @@ class FieldSet: Keyword arguments passed to the :func:`Field.from_xarray` constructor. """ fields = {} - if 'creation_log' not in kwargs.keys(): - kwargs['creation_log'] = 'from_xarray_dataset' - if 'time' in dimensions: - if 'units' not in ds[dimensions['time']].attrs and 'Unit' in ds[dimensions['time']].attrs: + if "creation_log" not in kwargs.keys(): + kwargs["creation_log"] = "from_xarray_dataset" + if "time" in dimensions: + if "units" not in ds[dimensions["time"]].attrs and "Unit" in ds[dimensions["time"]].attrs: # Fix DataArrays that have time.Unit instead of expected time.units - convert_xarray_time_units(ds, dimensions['time']) + convert_xarray_time_units(ds, dimensions["time"]) for var, name in variables.items(): dims = dimensions[var] if var in dimensions else dimensions cls.checkvaliddimensionsdict(dims) - fields[var] = Field.from_xarray(ds[name], var, dims, mesh=mesh, allow_time_extrapolation=allow_time_extrapolation, - time_periodic=time_periodic, **kwargs) - u = fields.pop('U', None) - v = fields.pop('V', None) + fields[var] = Field.from_xarray( + ds[name], + var, + dims, + mesh=mesh, + allow_time_extrapolation=allow_time_extrapolation, + time_periodic=time_periodic, + **kwargs, + ) + u = fields.pop("U", None) + v = fields.pop("V", None) return cls(u, v, fields=fields) @classmethod @@ -1183,16 +1394,16 @@ class FieldSet: if MPI is None or MPI.COMM_WORLD.Get_rank() == 0: logger.info(f"Generating FieldSet output with basename: {filename}") - if hasattr(self, 'U'): - self.U.write(filename, varname='vozocrtx') - if hasattr(self, 'V'): - self.V.write(filename, varname='vomecrty') + if hasattr(self, "U"): + self.U.write(filename, varname="vozocrtx") + if hasattr(self, "V"): + self.V.write(filename, varname="vomecrty") for v in self.get_fields(): - if isinstance(v, Field) and (v.name != 'U') and (v.name != 'V'): + if isinstance(v, Field) and (v.name != "U") and (v.name != "V"): v.write(filename) - def computeTimeChunk(self, time=0., dt=1): + def computeTimeChunk(self, time=0.0, dt=1): """Load a chunk of three data time steps into the FieldSet. This is used when FieldSet uses data imported from netcdf, with default option deferred_load. The loaded time steps are at or immediatly before time @@ -1212,21 +1423,21 @@ class FieldSet: nextTime = np.inf if dt > 0 else -np.inf for g in self.gridset.grids: - g.update_status = 'not_updated' + g.update_status = "not_updated" for f in self.get_fields(): if isinstance(f, (VectorField, NestedField)) or not f.grid.defer_load: continue - if f.grid.update_status == 'not_updated': + if f.grid.update_status == "not_updated": nextTime_loc = f.grid.computeTimeChunk(f, time, signdt) if time == nextTime_loc and signdt != 0: - raise TimeExtrapolationError(time, field=f, msg='In fset.computeTimeChunk') + raise TimeExtrapolationError(time, field=f, msg="In fset.computeTimeChunk") nextTime = min(nextTime, nextTime_loc) if signdt >= 0 else max(nextTime, nextTime_loc) for f in self.get_fields(): if isinstance(f, (VectorField, NestedField)) or not f.grid.defer_load or f.dataFiles is None: continue g = f.grid - if g.update_status == 'first_updated': # First load of data + if g.update_status == "first_updated": # First load of data if f.data is not None and not isinstance(f.data, DeferredArray): if not isinstance(f.data, list): f.data = None @@ -1235,11 +1446,13 @@ class FieldSet: del f.data[i, :] lib = np if f.chunksize in [False, None] else da - if f.gridindexingtype == 'pop' and g.zdim > 1: + if f.gridindexingtype == "pop" and g.zdim > 1: zd = g.zdim - 1 else: zd = g.zdim - data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32) + data = lib.empty( + (g.tdim, zd, g.ydim - 2 * g.meridional_halo, g.xdim - 2 * g.zonal_halo), dtype=np.float32 + ) f.loaded_time_indices = range(2) for tind in f.loaded_time_indices: for fb in f.filebuffers: @@ -1249,24 +1462,26 @@ class FieldSet: data = f.computeTimeChunk(data, tind) data = f.rescale_and_set_minmax(data) - if (isinstance(f.data, DeferredArray)): + if isinstance(f.data, DeferredArray): f.data = DeferredArray() f.data = f.reshape(data) if not f.chunk_set: f.chunk_setup() if len(g.load_chunk) > g.chunk_not_loaded: - g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, - g.chunk_loading_requested, g.load_chunk) - g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated, - g.chunk_not_loaded, g.load_chunk) + g.load_chunk = np.where( + g.load_chunk == g.chunk_loaded_touched, g.chunk_loading_requested, g.load_chunk + ) + g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated, g.chunk_not_loaded, g.load_chunk) - elif g.update_status == 'updated': + elif g.update_status == "updated": lib = np if isinstance(f.data, np.ndarray) else da - if f.gridindexingtype == 'pop' and g.zdim > 1: + if f.gridindexingtype == "pop" and g.zdim > 1: zd = g.zdim - 1 else: zd = g.zdim - data = lib.empty((g.tdim, zd, g.ydim-2*g.meridional_halo, g.xdim-2*g.zonal_halo), dtype=np.float32) + data = lib.empty( + (g.tdim, zd, g.ydim - 2 * g.meridional_halo, g.xdim - 2 * g.zonal_halo), dtype=np.float32 + ) if signdt >= 0: f.loaded_time_indices = [1] if f.filebuffers[0] is not None: @@ -1306,10 +1521,8 @@ class FieldSet: f.data[1, :] = None f.data[1, :] = f.data[0, :] f.data[0, :] = data - g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, - g.chunk_loading_requested, g.load_chunk) - g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated, - g.chunk_not_loaded, g.load_chunk) + g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, g.chunk_loading_requested, g.load_chunk) + g.load_chunk = np.where(g.load_chunk == g.chunk_deprecated, g.chunk_not_loaded, g.load_chunk) if isinstance(f.data, da.core.Array) and len(g.load_chunk) > 0: if signdt >= 0: for block_id in range(len(g.load_chunk)): @@ -1320,7 +1533,7 @@ class FieldSet: break block = f.get_block(block_id) f.data_chunks[block_id][0] = None - f.data_chunks[block_id][1] = np.array(f.data.blocks[(slice(2),)+block][1]) + f.data_chunks[block_id][1] = np.array(f.data.blocks[(slice(2),) + block][1]) else: for block_id in range(len(g.load_chunk)): if g.load_chunk[block_id] == g.chunk_loaded_touched: @@ -1330,7 +1543,7 @@ class FieldSet: break block = f.get_block(block_id) f.data_chunks[block_id][1] = None - f.data_chunks[block_id][0] = np.array(f.data.blocks[(slice(2),)+block][0]) + f.data_chunks[block_id][0] = np.array(f.data.blocks[(slice(2),) + block][0]) # do user-defined computations on fieldset data if self.compute_on_defer: self.compute_on_defer(self) diff --git a/parcels/grid.py b/parcels/grid.py index d983313a..2ba8073c 100644 --- a/parcels/grid.py +++ b/parcels/grid.py @@ -7,7 +7,16 @@ import numpy as np from parcels.tools.converters import TimeConverter from parcels.tools.loggers import logger -__all__ = ['GridType', 'GridCode', 'RectilinearZGrid', 'RectilinearSGrid', 'CurvilinearZGrid', 'CurvilinearSGrid', 'CGrid', 'Grid'] +__all__ = [ + "GridType", + "GridCode", + "RectilinearZGrid", + "RectilinearSGrid", + "CurvilinearZGrid", + "CurvilinearSGrid", + "CGrid", + "Grid", +] class GridType(IntEnum): @@ -23,8 +32,7 @@ GridCode = GridType class CGrid(Structure): - _fields_ = [('gtype', c_int), - ('grid', c_void_p)] + _fields_ = [("gtype", c_int), ("grid", c_void_p)] class Grid: @@ -36,24 +44,26 @@ class Grid: self.zi = None self.ti = -1 self.lon = lon - if not self.lon.flags['C_CONTIGUOUS']: - self.lon = np.array(self.lon, order='C') + if not self.lon.flags["C_CONTIGUOUS"]: + self.lon = np.array(self.lon, order="C") self.lat = lat - if not self.lat.flags['C_CONTIGUOUS']: - self.lat = np.array(self.lat, order='C') + if not self.lat.flags["C_CONTIGUOUS"]: + self.lat = np.array(self.lat, order="C") self.time = np.zeros(1, dtype=np.float64) if time is None else time - if not self.time.flags['C_CONTIGUOUS']: - self.time = np.array(self.time, order='C') + if not self.time.flags["C_CONTIGUOUS"]: + self.time = np.array(self.time, order="C") if not self.lon.dtype == np.float32: self.lon = self.lon.astype(np.float32) if not self.lat.dtype == np.float32: self.lat = self.lat.astype(np.float32) if not self.time.dtype == np.float64: - assert isinstance(self.time[0], (np.integer, np.floating, float, int)), 'Time vector must be an array of int or floats' + assert isinstance( + self.time[0], (np.integer, np.floating, float, int) + ), "Time vector must be an array of int or floats" self.time = self.time.astype(np.float64) self.time_full = self.time # needed for deferred_loaded Fields self.time_origin = TimeConverter() if time_origin is None else time_origin - assert isinstance(self.time_origin, TimeConverter), 'time_origin needs to be a TimeConverter object' + assert isinstance(self.time_origin, TimeConverter), "time_origin needs to be a TimeConverter object" self.mesh = mesh self.cstruct = None self.cell_edge_sizes = {} @@ -62,7 +72,9 @@ class Grid: self.meridional_halo = 0 self.lat_flipped = False self.defer_load = False - self.lonlat_minmax = np.array([np.nanmin(lon), np.nanmax(lon), np.nanmin(lat), np.nanmax(lat)], dtype=np.float32) + self.lonlat_minmax = np.array( + [np.nanmin(lon), np.nanmax(lon), np.nanmin(lat), np.nanmax(lat)], dtype=np.float32 + ) self.periods = 0 self.load_chunk = [] self.chunk_info = None @@ -101,35 +113,53 @@ class Grid: """Returns a ctypes struct object containing all relevant pointers and sizes for this grid. """ + class CStructuredGrid(Structure): # z4d is only to have same cstruct as RectilinearSGrid - _fields_ = [('xdim', c_int), ('ydim', c_int), ('zdim', c_int), - ('tdim', c_int), ('z4d', c_int), - ('mesh_spherical', c_int), ('zonal_periodic', c_int), - ('chunk_info', POINTER(c_int)), - ('load_chunk', POINTER(c_int)), - ('tfull_min', c_double), ('tfull_max', c_double), ('periods', POINTER(c_int)), - ('lonlat_minmax', POINTER(c_float)), - ('lon', POINTER(c_float)), ('lat', POINTER(c_float)), - ('depth', POINTER(c_float)), ('time', POINTER(c_double)) - ] + _fields_ = [ + ("xdim", c_int), + ("ydim", c_int), + ("zdim", c_int), + ("tdim", c_int), + ("z4d", c_int), + ("mesh_spherical", c_int), + ("zonal_periodic", c_int), + ("chunk_info", POINTER(c_int)), + ("load_chunk", POINTER(c_int)), + ("tfull_min", c_double), + ("tfull_max", c_double), + ("periods", POINTER(c_int)), + ("lonlat_minmax", POINTER(c_float)), + ("lon", POINTER(c_float)), + ("lat", POINTER(c_float)), + ("depth", POINTER(c_float)), + ("time", POINTER(c_double)), + ] # Create and populate the c-struct object if not self.cstruct: # Not to point to the same grid various times if grid in various fields if not isinstance(self.periods, c_int): self.periods = c_int() self.periods.value = 0 - self.cstruct = CStructuredGrid(self.xdim, self.ydim, self.zdim, - self.tdim, self.z4d, - int(self.mesh == 'spherical'), int(self.zonal_periodic), - (c_int * len(self.chunk_info))(*self.chunk_info), - self.load_chunk.ctypes.data_as(POINTER(c_int)), - self.time_full[0], self.time_full[-1], pointer(self.periods), - self.lonlat_minmax.ctypes.data_as(POINTER(c_float)), - self.lon.ctypes.data_as(POINTER(c_float)), - self.lat.ctypes.data_as(POINTER(c_float)), - self.depth.ctypes.data_as(POINTER(c_float)), - self.time.ctypes.data_as(POINTER(c_double))) + self.cstruct = CStructuredGrid( + self.xdim, + self.ydim, + self.zdim, + self.tdim, + self.z4d, + int(self.mesh == "spherical"), + int(self.zonal_periodic), + (c_int * len(self.chunk_info))(*self.chunk_info), + self.load_chunk.ctypes.data_as(POINTER(c_int)), + self.time_full[0], + self.time_full[-1], + pointer(self.periods), + self.lonlat_minmax.ctypes.data_as(POINTER(c_float)), + self.lon.ctypes.data_as(POINTER(c_float)), + self.lat.ctypes.data_as(POINTER(c_float)), + self.depth.ctypes.data_as(POINTER(c_float)), + self.time.ctypes.data_as(POINTER(c_double)), + ) return self.cstruct def lon_grid_to_target(self): @@ -146,79 +176,113 @@ class Grid: return lon def check_zonal_periodic(self): - if self.zonal_periodic or self.mesh == 'flat' or self.lon.size == 1: + if self.zonal_periodic or self.mesh == "flat" or self.lon.size == 1: return dx = (self.lon[1:] - self.lon[:-1]) if len(self.lon.shape) == 1 else self.lon[0, 1:] - self.lon[0, :-1] - dx = np.where(dx < -180, dx+360, dx) - dx = np.where(dx > 180, dx-360, dx) + dx = np.where(dx < -180, dx + 360, dx) + dx = np.where(dx > 180, dx - 360, dx) self.zonal_periodic = sum(dx) > 359.9 def add_Sdepth_periodic_halo(self, zonal, meridional, halosize): if zonal: if len(self.depth.shape) == 3: - self.depth = np.concatenate((self.depth[:, :, -halosize:], self.depth, - self.depth[:, :, 0:halosize]), axis=len(self.depth.shape) - 1) + self.depth = np.concatenate( + (self.depth[:, :, -halosize:], self.depth, self.depth[:, :, 0:halosize]), + axis=len(self.depth.shape) - 1, + ) assert self.depth.shape[2] == self.xdim, "Third dim must be x." else: - self.depth = np.concatenate((self.depth[:, :, :, -halosize:], self.depth, - self.depth[:, :, :, 0:halosize]), axis=len(self.depth.shape) - 1) + self.depth = np.concatenate( + (self.depth[:, :, :, -halosize:], self.depth, self.depth[:, :, :, 0:halosize]), + axis=len(self.depth.shape) - 1, + ) assert self.depth.shape[3] == self.xdim, "Fourth dim must be x." if meridional: if len(self.depth.shape) == 3: - self.depth = np.concatenate((self.depth[:, -halosize:, :], self.depth, - self.depth[:, 0:halosize, :]), axis=len(self.depth.shape) - 2) + self.depth = np.concatenate( + (self.depth[:, -halosize:, :], self.depth, self.depth[:, 0:halosize, :]), + axis=len(self.depth.shape) - 2, + ) assert self.depth.shape[1] == self.ydim, "Second dim must be y." else: - self.depth = np.concatenate((self.depth[:, :, -halosize:, :], self.depth, - self.depth[:, :, 0:halosize, :]), axis=len(self.depth.shape) - 2) + self.depth = np.concatenate( + (self.depth[:, :, -halosize:, :], self.depth, self.depth[:, :, 0:halosize, :]), + axis=len(self.depth.shape) - 2, + ) assert self.depth.shape[2] == self.ydim, "Third dim must be y." def computeTimeChunk(self, f, time, signdt): nextTime_loc = np.inf if signdt >= 0 else -np.inf periods = self.periods.value if isinstance(self.periods, c_int) else self.periods prev_time_indices = self.time - if self.update_status == 'not_updated': + if self.update_status == "not_updated": if self.ti >= 0: - if time - periods*(self.time_full[-1]-self.time_full[0]) < self.time[0] or time - periods*(self.time_full[-1]-self.time_full[0]) > self.time[1]: + if ( + time - periods * (self.time_full[-1] - self.time_full[0]) < self.time[0] + or time - periods * (self.time_full[-1] - self.time_full[0]) > self.time[1] + ): self.ti = -1 # reset - elif signdt >= 0 and (time - periods*(self.time_full[-1]-self.time_full[0]) < self.time_full[0] or time - periods*(self.time_full[-1]-self.time_full[0]) >= self.time_full[-1]): + elif signdt >= 0 and ( + time - periods * (self.time_full[-1] - self.time_full[0]) < self.time_full[0] + or time - periods * (self.time_full[-1] - self.time_full[0]) >= self.time_full[-1] + ): self.ti = -1 # reset - elif signdt < 0 and (time - periods*(self.time_full[-1]-self.time_full[0]) <= self.time_full[0] or time - periods*(self.time_full[-1]-self.time_full[0]) > self.time_full[-1]): + elif signdt < 0 and ( + time - periods * (self.time_full[-1] - self.time_full[0]) <= self.time_full[0] + or time - periods * (self.time_full[-1] - self.time_full[0]) > self.time_full[-1] + ): self.ti = -1 # reset - elif signdt >= 0 and time - periods*(self.time_full[-1]-self.time_full[0]) >= self.time[1] and self.ti < len(self.time_full)-2: + elif ( + signdt >= 0 + and time - periods * (self.time_full[-1] - self.time_full[0]) >= self.time[1] + and self.ti < len(self.time_full) - 2 + ): self.ti += 1 - self.time = self.time_full[self.ti:self.ti+2] - self.update_status = 'updated' - elif signdt < 0 and time - periods*(self.time_full[-1]-self.time_full[0]) <= self.time[0] and self.ti > 0: + self.time = self.time_full[self.ti : self.ti + 2] + self.update_status = "updated" + elif ( + signdt < 0 + and time - periods * (self.time_full[-1] - self.time_full[0]) <= self.time[0] + and self.ti > 0 + ): self.ti -= 1 - self.time = self.time_full[self.ti:self.ti+2] - self.update_status = 'updated' + self.time = self.time_full[self.ti : self.ti + 2] + self.update_status = "updated" if self.ti == -1: self.time = self.time_full self.ti, _ = f.time_index(time) periods = self.periods.value if isinstance(self.periods, c_int) else self.periods - if signdt == -1 and self.ti == 0 and (time - periods*(self.time_full[-1]-self.time_full[0])) == self.time[0] and f.time_periodic: - self.ti = len(self.time)-1 + if ( + signdt == -1 + and self.ti == 0 + and (time - periods * (self.time_full[-1] - self.time_full[0])) == self.time[0] + and f.time_periodic + ): + self.ti = len(self.time) - 1 periods -= 1 if signdt == -1 and self.ti > 0 and self.time_full[self.ti] == time: self.ti -= 1 if self.ti >= len(self.time_full) - 1: self.ti = len(self.time_full) - 2 - self.time = self.time_full[self.ti:self.ti+2] + self.time = self.time_full[self.ti : self.ti + 2] self.tdim = 2 if prev_time_indices is None or len(prev_time_indices) != 2 or len(prev_time_indices) != len(self.time): - self.update_status = 'first_updated' - elif functools.reduce(lambda i, j: i and j, map(lambda m, k: m == k, self.time, prev_time_indices), True) and len(prev_time_indices) == len(self.time): - self.update_status = 'not_updated' - elif functools.reduce(lambda i, j: i and j, map(lambda m, k: m == k, self.time[:1], prev_time_indices[:1]), True) and len(prev_time_indices) == len(self.time): - self.update_status = 'updated' + self.update_status = "first_updated" + elif functools.reduce( + lambda i, j: i and j, map(lambda m, k: m == k, self.time, prev_time_indices), True + ) and len(prev_time_indices) == len(self.time): + self.update_status = "not_updated" + elif functools.reduce( + lambda i, j: i and j, map(lambda m, k: m == k, self.time[:1], prev_time_indices[:1]), True + ) and len(prev_time_indices) == len(self.time): + self.update_status = "updated" else: - self.update_status = 'first_updated' - if signdt >= 0 and (self.ti < len(self.time_full)-2 or not f.allow_time_extrapolation): - nextTime_loc = self.time[1] + periods*(self.time_full[-1]-self.time_full[0]) + self.update_status = "first_updated" + if signdt >= 0 and (self.ti < len(self.time_full) - 2 or not f.allow_time_extrapolation): + nextTime_loc = self.time[1] + periods * (self.time_full[-1] - self.time_full[0]) elif signdt < 0 and (self.ti > 0 or not f.allow_time_extrapolation): - nextTime_loc = self.time[0] + periods*(self.time_full[-1]-self.time_full[0]) + nextTime_loc = self.time[0] + periods * (self.time_full[-1] - self.time_full[0]) return nextTime_loc @property @@ -250,11 +314,11 @@ class RectilinearGrid(Grid): """ def __init__(self, lon, lat, time, time_origin, mesh): - assert (isinstance(lon, np.ndarray) and len(lon.shape) <= 1), 'lon is not a numpy vector' - assert (isinstance(lat, np.ndarray) and len(lat.shape) <= 1), 'lat is not a numpy vector' - assert (isinstance(time, np.ndarray) or not time), 'time is not a numpy array' + assert isinstance(lon, np.ndarray) and len(lon.shape) <= 1, "lon is not a numpy vector" + assert isinstance(lat, np.ndarray) and len(lat.shape) <= 1, "lat is not a numpy vector" + assert isinstance(time, np.ndarray) or not time, "time is not a numpy array" if isinstance(time, np.ndarray): - assert (len(time.shape) == 1), 'time is not a vector' + assert len(time.shape) == 1, "time is not a vector" super().__init__(lon, lat, time, time_origin, mesh) self.xdim = self.lon.size @@ -263,8 +327,10 @@ class RectilinearGrid(Grid): if self.ydim > 1 and self.lat[-1] < self.lat[0]: self.lat = np.flip(self.lat, axis=0) self.lat_flipped = True - logger.warning_once("Flipping lat data from North-South to South-North. " - "Note that this may lead to wrong sign for meridional velocity, so tread very carefully") + logger.warning_once( + "Flipping lat data from North-South to South-North. " + "Note that this may lead to wrong sign for meridional velocity, so tread very carefully" + ) def add_periodic_halo(self, zonal, meridional, halosize=5): """Add a 'halo' to the Grid, through extending the Grid (and lon/lat) @@ -280,23 +346,27 @@ class RectilinearGrid(Grid): size of the halo (in grid points). Default is 5 grid points """ if zonal: - lonshift = (self.lon[-1] - 2 * self.lon[0] + self.lon[1]) - if not np.allclose(self.lon[1]-self.lon[0], self.lon[-1]-self.lon[-2]): - logger.warning_once("The zonal halo is located at the east and west of current grid, with a dx = lon[1]-lon[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[1]-lon[0] != lon[-1]-lon[-2]. Is the halo computed as you expect?") - self.lon = np.concatenate((self.lon[-halosize:] - lonshift, - self.lon, self.lon[0:halosize] + lonshift)) + lonshift = self.lon[-1] - 2 * self.lon[0] + self.lon[1] + if not np.allclose(self.lon[1] - self.lon[0], self.lon[-1] - self.lon[-2]): + logger.warning_once( + "The zonal halo is located at the east and west of current grid, with a dx = lon[1]-lon[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[1]-lon[0] != lon[-1]-lon[-2]. Is the halo computed as you expect?" + ) + self.lon = np.concatenate((self.lon[-halosize:] - lonshift, self.lon, self.lon[0:halosize] + lonshift)) self.xdim = self.lon.size self.zonal_periodic = True self.zonal_halo = halosize if meridional: - if not np.allclose(self.lat[1]-self.lat[0], self.lat[-1]-self.lat[-2]): - logger.warning_once("The meridional halo is located at the north and south of current grid, with a dy = lat[1]-lat[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1]-lat[0] != lat[-1]-lat[-2]. Is the halo computed as you expect?") - latshift = (self.lat[-1] - 2 * self.lat[0] + self.lat[1]) - self.lat = np.concatenate((self.lat[-halosize:] - latshift, - self.lat, self.lat[0:halosize] + latshift)) + if not np.allclose(self.lat[1] - self.lat[0], self.lat[-1] - self.lat[-2]): + logger.warning_once( + "The meridional halo is located at the north and south of current grid, with a dy = lat[1]-lat[0] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1]-lat[0] != lat[-1]-lat[-2]. Is the halo computed as you expect?" + ) + latshift = self.lat[-1] - 2 * self.lat[0] + self.lat[1] + self.lat = np.concatenate((self.lat[-halosize:] - latshift, self.lat, self.lat[0:halosize] + latshift)) self.ydim = self.lat.size self.meridional_halo = halosize - self.lonlat_minmax = np.array([np.nanmin(self.lon), np.nanmax(self.lon), np.nanmin(self.lat), np.nanmax(self.lat)], dtype=np.float32) + self.lonlat_minmax = np.array( + [np.nanmin(self.lon), np.nanmax(self.lon), np.nanmin(self.lat), np.nanmax(self.lat)], dtype=np.float32 + ) if isinstance(self, RectilinearSGrid): self.add_Sdepth_periodic_halo(zonal, meridional, halosize) @@ -326,15 +396,15 @@ class RectilinearZGrid(RectilinearGrid): 2. flat: No conversion, lat/lon are assumed to be in m. """ - def __init__(self, lon, lat, depth=None, time=None, time_origin=None, mesh='flat'): + def __init__(self, lon, lat, depth=None, time=None, time_origin=None, mesh="flat"): super().__init__(lon, lat, time, time_origin, mesh) if isinstance(depth, np.ndarray): - assert (len(depth.shape) <= 1), 'depth is not a vector' + assert len(depth.shape) <= 1, "depth is not a vector" self.gtype = GridType.RectilinearZGrid self.depth = np.zeros(1, dtype=np.float32) if depth is None else depth - if not self.depth.flags['C_CONTIGUOUS']: - self.depth = np.array(self.depth, order='C') + if not self.depth.flags["C_CONTIGUOUS"]: + self.depth = np.array(self.depth, order="C") self.zdim = self.depth.size self.z4d = -1 # only used in RectilinearSGrid if not self.depth.dtype == np.float32: @@ -372,24 +442,34 @@ class RectilinearSGrid(RectilinearGrid): 2. flat: No conversion, lat/lon are assumed to be in m. """ - def __init__(self, lon, lat, depth, time=None, time_origin=None, mesh='flat'): + def __init__(self, lon, lat, depth, time=None, time_origin=None, mesh="flat"): super().__init__(lon, lat, time, time_origin, mesh) - assert (isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4]), 'depth is not a 3D or 4D numpy array' + assert isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4], "depth is not a 3D or 4D numpy array" self.gtype = GridType.RectilinearSGrid self.depth = depth - if not self.depth.flags['C_CONTIGUOUS']: - self.depth = np.array(self.depth, order='C') + if not self.depth.flags["C_CONTIGUOUS"]: + self.depth = np.array(self.depth, order="C") self.zdim = self.depth.shape[-3] self.z4d = 1 if len(self.depth.shape) == 4 else 0 if self.z4d: # self.depth.shape[0] is 0 for S grids loaded from netcdf file - assert self.tdim == self.depth.shape[0] or self.depth.shape[0] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' - assert self.xdim == self.depth.shape[-1] or self.depth.shape[-1] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' - assert self.ydim == self.depth.shape[-2] or self.depth.shape[-2] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' + assert ( + self.tdim == self.depth.shape[0] or self.depth.shape[0] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" + assert ( + self.xdim == self.depth.shape[-1] or self.depth.shape[-1] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" + assert ( + self.ydim == self.depth.shape[-2] or self.depth.shape[-2] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" else: - assert self.xdim == self.depth.shape[-1], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]' - assert self.ydim == self.depth.shape[-2], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]' + assert ( + self.xdim == self.depth.shape[-1] + ), "depth dimension has the wrong format. It should be [zdim, ydim, xdim]" + assert ( + self.ydim == self.depth.shape[-2] + ), "depth dimension has the wrong format. It should be [zdim, ydim, xdim]" if not self.depth.dtype == np.float32: self.depth = self.depth.astype(np.float32) if self.lat_flipped: @@ -397,13 +477,12 @@ class RectilinearSGrid(RectilinearGrid): class CurvilinearGrid(Grid): - - def __init__(self, lon, lat, time=None, time_origin=None, mesh='flat'): - assert (isinstance(lon, np.ndarray) and len(lon.squeeze().shape) == 2), 'lon is not a 2D numpy array' - assert (isinstance(lat, np.ndarray) and len(lat.squeeze().shape) == 2), 'lat is not a 2D numpy array' - assert (isinstance(time, np.ndarray) or not time), 'time is not a numpy array' + def __init__(self, lon, lat, time=None, time_origin=None, mesh="flat"): + assert isinstance(lon, np.ndarray) and len(lon.squeeze().shape) == 2, "lon is not a 2D numpy array" + assert isinstance(lat, np.ndarray) and len(lat.squeeze().shape) == 2, "lat is not a 2D numpy array" + assert isinstance(time, np.ndarray) or not time, "time is not a numpy array" if isinstance(time, np.ndarray): - assert (len(time.shape) == 1), 'time is not a vector' + assert len(time.shape) == 1, "time is not a vector" lon = lon.squeeze() lat = lat.squeeze() @@ -427,28 +506,42 @@ class CurvilinearGrid(Grid): """ if zonal: lonshift = self.lon[:, -1] - 2 * self.lon[:, 0] + self.lon[:, 1] - if not np.allclose(self.lon[:, 1]-self.lon[:, 0], self.lon[:, -1]-self.lon[:, -2]): - logger.warning_once("The zonal halo is located at the east and west of current grid, with a dx = lon[:,1]-lon[:,0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[:,1]-lon[:,0] != lon[:,-1]-lon[:,-2]. Is the halo computed as you expect?") - self.lon = np.concatenate((self.lon[:, -halosize:] - lonshift[:, np.newaxis], - self.lon, self.lon[:, 0:halosize] + lonshift[:, np.newaxis]), - axis=len(self.lon.shape)-1) - self.lat = np.concatenate((self.lat[:, -halosize:], - self.lat, self.lat[:, 0:halosize]), - axis=len(self.lat.shape)-1) + if not np.allclose(self.lon[:, 1] - self.lon[:, 0], self.lon[:, -1] - self.lon[:, -2]): + logger.warning_once( + "The zonal halo is located at the east and west of current grid, with a dx = lon[:,1]-lon[:,0] between the last nodes of the original grid and the first ones of the halo. In your grid, lon[:,1]-lon[:,0] != lon[:,-1]-lon[:,-2]. Is the halo computed as you expect?" + ) + self.lon = np.concatenate( + ( + self.lon[:, -halosize:] - lonshift[:, np.newaxis], + self.lon, + self.lon[:, 0:halosize] + lonshift[:, np.newaxis], + ), + axis=len(self.lon.shape) - 1, + ) + self.lat = np.concatenate( + (self.lat[:, -halosize:], self.lat, self.lat[:, 0:halosize]), axis=len(self.lat.shape) - 1 + ) self.xdim = self.lon.shape[1] self.ydim = self.lat.shape[0] self.zonal_periodic = True self.zonal_halo = halosize if meridional: - if not np.allclose(self.lat[1, :]-self.lat[0, :], self.lat[-1, :]-self.lat[-2, :]): - logger.warning_once("The meridional halo is located at the north and south of current grid, with a dy = lat[1,:]-lat[0,:] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1,:]-lat[0,:] != lat[-1,:]-lat[-2,:]. Is the halo computed as you expect?") + if not np.allclose(self.lat[1, :] - self.lat[0, :], self.lat[-1, :] - self.lat[-2, :]): + logger.warning_once( + "The meridional halo is located at the north and south of current grid, with a dy = lat[1,:]-lat[0,:] between the last nodes of the original grid and the first ones of the halo. In your grid, lat[1,:]-lat[0,:] != lat[-1,:]-lat[-2,:]. Is the halo computed as you expect?" + ) latshift = self.lat[-1, :] - 2 * self.lat[0, :] + self.lat[1, :] - self.lat = np.concatenate((self.lat[-halosize:, :] - latshift[np.newaxis, :], - self.lat, self.lat[0:halosize, :] + latshift[np.newaxis, :]), - axis=len(self.lat.shape)-2) - self.lon = np.concatenate((self.lon[-halosize:, :], - self.lon, self.lon[0:halosize, :]), - axis=len(self.lon.shape)-2) + self.lat = np.concatenate( + ( + self.lat[-halosize:, :] - latshift[np.newaxis, :], + self.lat, + self.lat[0:halosize, :] + latshift[np.newaxis, :], + ), + axis=len(self.lat.shape) - 2, + ) + self.lon = np.concatenate( + (self.lon[-halosize:, :], self.lon, self.lon[0:halosize, :]), axis=len(self.lon.shape) - 2 + ) self.xdim = self.lon.shape[1] self.ydim = self.lat.shape[0] self.meridional_halo = halosize @@ -481,15 +574,15 @@ class CurvilinearZGrid(CurvilinearGrid): 2. flat: No conversion, lat/lon are assumed to be in m. """ - def __init__(self, lon, lat, depth=None, time=None, time_origin=None, mesh='flat'): + def __init__(self, lon, lat, depth=None, time=None, time_origin=None, mesh="flat"): super().__init__(lon, lat, time, time_origin, mesh) if isinstance(depth, np.ndarray): - assert (len(depth.shape) == 1), 'depth is not a vector' + assert len(depth.shape) == 1, "depth is not a vector" self.gtype = GridType.CurvilinearZGrid self.depth = np.zeros(1, dtype=np.float32) if depth is None else depth - if not self.depth.flags['C_CONTIGUOUS']: - self.depth = np.array(self.depth, order='C') + if not self.depth.flags["C_CONTIGUOUS"]: + self.depth = np.array(self.depth, order="C") self.zdim = self.depth.size self.z4d = -1 # only for SGrid if not self.depth.dtype == np.float32: @@ -526,23 +619,33 @@ class CurvilinearSGrid(CurvilinearGrid): 2. flat: No conversion, lat/lon are assumed to be in m. """ - def __init__(self, lon, lat, depth, time=None, time_origin=None, mesh='flat'): + def __init__(self, lon, lat, depth, time=None, time_origin=None, mesh="flat"): super().__init__(lon, lat, time, time_origin, mesh) - assert (isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4]), 'depth is not a 4D numpy array' + assert isinstance(depth, np.ndarray) and len(depth.shape) in [3, 4], "depth is not a 4D numpy array" self.gtype = GridType.CurvilinearSGrid self.depth = depth # should be a C-contiguous array of floats - if not self.depth.flags['C_CONTIGUOUS']: - self.depth = np.array(self.depth, order='C') + if not self.depth.flags["C_CONTIGUOUS"]: + self.depth = np.array(self.depth, order="C") self.zdim = self.depth.shape[-3] self.z4d = 1 if len(self.depth.shape) == 4 else 0 if self.z4d: # self.depth.shape[0] is 0 for S grids loaded from netcdf file - assert self.tdim == self.depth.shape[0] or self.depth.shape[0] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' - assert self.xdim == self.depth.shape[-1] or self.depth.shape[-1] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' - assert self.ydim == self.depth.shape[-2] or self.depth.shape[-2] == 0, 'depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]' + assert ( + self.tdim == self.depth.shape[0] or self.depth.shape[0] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" + assert ( + self.xdim == self.depth.shape[-1] or self.depth.shape[-1] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" + assert ( + self.ydim == self.depth.shape[-2] or self.depth.shape[-2] == 0 + ), "depth dimension has the wrong format. It should be [tdim, zdim, ydim, xdim]" else: - assert self.xdim == self.depth.shape[-1], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]' - assert self.ydim == self.depth.shape[-2], 'depth dimension has the wrong format. It should be [zdim, ydim, xdim]' + assert ( + self.xdim == self.depth.shape[-1] + ), "depth dimension has the wrong format. It should be [zdim, ydim, xdim]" + assert ( + self.ydim == self.depth.shape[-2] + ), "depth dimension has the wrong format. It should be [zdim, ydim, xdim]" if not self.depth.dtype == np.float32: self.depth = self.depth.astype(np.float32) diff --git a/parcels/gridset.py b/parcels/gridset.py index 46a431fa..baf3e2dc 100644 --- a/parcels/gridset.py +++ b/parcels/gridset.py @@ -1,6 +1,6 @@ import numpy as np -__all__ = ['GridSet'] +__all__ = ["GridSet"] class GridSet: @@ -13,7 +13,7 @@ class GridSet: grid = field.grid existing_grid = False for g in self.grids: - if field.chunksize == 'auto': + if field.chunksize == "auto": break if g == grid: existing_grid = True @@ -21,7 +21,7 @@ class GridSet: sameGrid = True if grid.time_origin != g.time_origin: continue - for attr in ['lon', 'lat', 'depth', 'time']: + for attr in ["lon", "lat", "depth", "time"]: gattr = getattr(g, attr) gridattr = getattr(grid, attr) if gattr.shape != gridattr.shape or not np.allclose(gattr, gridattr): @@ -54,7 +54,7 @@ class GridSet: if getattr(g, dim).size == 1: continue # not including grids where only one entry else: - if dim == 'depth': + if dim == "depth": maxleft = max(maxleft, np.min(getattr(g, dim))) minright = min(minright, np.max(getattr(g, dim))) else: diff --git a/parcels/interaction/interactionkernel.py b/parcels/interaction/interactionkernel.py index 836ce105..1b9da571 100644 --- a/parcels/interaction/interactionkernel.py +++ b/parcels/interaction/interactionkernel.py @@ -14,7 +14,7 @@ from parcels.kernel import BaseKernel from parcels.tools.loggers import logger from parcels.tools.statuscodes import StatusCode -__all__ = ['InteractionKernel'] +__all__ = ["InteractionKernel"] class InteractionKernel(BaseKernel): @@ -26,29 +26,49 @@ class InteractionKernel(BaseKernel): InteractionKernel. """ - def __init__(self, fieldset, ptype, pyfunc=None, funcname=None, - funccode=None, py_ast=None, funcvars=None, - c_include="", delete_cfiles=True): + def __init__( + self, + fieldset, + ptype, + pyfunc=None, + funcname=None, + funccode=None, + py_ast=None, + funcvars=None, + c_include="", + delete_cfiles=True, + ): if MPI is not None and MPI.COMM_WORLD.Get_size() > 1: - raise NotImplementedError("InteractionKernels are not supported in an MPI environment. Please run your simulation outside MPI.") + raise NotImplementedError( + "InteractionKernels are not supported in an MPI environment. Please run your simulation outside MPI." + ) if MPI is not None and MPI.COMM_WORLD.Get_size() > 1: - raise NotImplementedError("InteractionKernels are not supported in an MPI environment. Please run your simulation outside MPI.") + raise NotImplementedError( + "InteractionKernels are not supported in an MPI environment. Please run your simulation outside MPI." + ) if pyfunc is not None: if isinstance(pyfunc, list): - funcname = ''.join([func.__name__ for func in pyfunc]) + funcname = "".join([func.__name__ for func in pyfunc]) else: funcname = pyfunc.__name__ super().__init__( - fieldset=fieldset, ptype=ptype, pyfunc=pyfunc, funcname=funcname, - funccode=funccode, py_ast=py_ast, funcvars=funcvars, - c_include=c_include, delete_cfiles=delete_cfiles) + fieldset=fieldset, + ptype=ptype, + pyfunc=pyfunc, + funcname=funcname, + funccode=funccode, + py_ast=py_ast, + funcvars=funcvars, + c_include=c_include, + delete_cfiles=delete_cfiles, + ) if pyfunc is not None: if isinstance(pyfunc, list): - funcname = ''.join([func.__name__ for func in pyfunc]) + funcname = "".join([func.__name__ for func in pyfunc]) else: funcname = pyfunc.__name__ @@ -59,18 +79,18 @@ class InteractionKernel(BaseKernel): self._pyfunc = [pyfunc] if self._ptype.uses_jit: - raise NotImplementedError("JIT mode is not supported for" - " InteractionKernels. Please run your" - " simulation in SciPy mode.") + raise NotImplementedError( + "JIT mode is not supported for" " InteractionKernels. Please run your" " simulation in SciPy mode." + ) for func in self._pyfunc: self.check_fieldsets_in_kernels(func) numkernelargs = self.check_kernel_signature_on_version() - assert numkernelargs[0] == 5 and \ - numkernelargs.count(numkernelargs[0]) == len(numkernelargs), \ - 'Interactionkernels take exactly 5 arguments: particle, fieldset, time, neighbors, mutator' + assert numkernelargs[0] == 5 and numkernelargs.count(numkernelargs[0]) == len( + numkernelargs + ), "Interactionkernels take exactly 5 arguments: particle, fieldset, time, neighbors, mutator" # At this time, JIT mode is not supported for InteractionKernels, # so there is no need for any further "processing" of pyfunc's. @@ -94,13 +114,9 @@ class InteractionKernel(BaseKernel): if self._pyfunc is not None and isinstance(self._pyfunc, list): for func in self._pyfunc: if sys.version_info[0] < 3: - numkernelargs.append( - len(inspect.getargspec(func).args) - ) + numkernelargs.append(len(inspect.getargspec(func).args)) else: - numkernelargs.append( - len(inspect.getfullargspec(func).args) - ) + numkernelargs.append(len(inspect.getfullargspec(func).args)) return numkernelargs def remove_lib(self): @@ -122,8 +138,7 @@ class InteractionKernel(BaseKernel): funcname = self.funcname + kernel.funcname # delete_cfiles = self.delete_cfiles and kernel.delete_cfiles pyfunc = self._pyfunc + kernel._pyfunc - return kclass(self._fieldset, self._ptype, pyfunc=pyfunc, - funcname=funcname) + return kclass(self._fieldset, self._ptype, pyfunc=pyfunc, funcname=funcname) def __add__(self, kernel): if not isinstance(kernel, InteractionKernel): @@ -150,9 +165,9 @@ class InteractionKernel(BaseKernel): raise NotImplementedError def execute_jit(self, pset, endtime, dt): - raise NotImplementedError("JIT mode is not supported for" - " InteractionKernels. Please run your" - " simulation in SciPy mode.") + raise NotImplementedError( + "JIT mode is not supported for" " InteractionKernels. Please run your" " simulation in SciPy mode." + ) def execute_python(self, pset, endtime, dt): """Performs the core update loop via Python. @@ -179,10 +194,10 @@ class InteractionKernel(BaseKernel): for particle_idx in active_idx: p = pset[particle_idx] # Don't use particles that are not started. - if (endtime-p.time)/dt <= -1e-7: + if (endtime - p.time) / dt <= -1e-7: continue - elif (endtime-p.time)/dt < 1: - p.dt = endtime-p.time + elif (endtime - p.time) / dt < 1: + p.dt = endtime - p.time reset_particle_idx.append(particle_idx) neighbors = pset.neighbors_by_index(particle_idx) @@ -195,7 +210,9 @@ class InteractionKernel(BaseKernel): # InteractionKernels do not implement a way to recover # from errors. if res != StatusCode.Success: - logger.warning_once("Some InteractionKernel was not completed succesfully, likely because a Particle threw an error that was not captured.") + logger.warning_once( + "Some InteractionKernel was not completed succesfully, likely because a Particle threw an error that was not captured." + ) for particle_idx in active_idx: p = pset[particle_idx] @@ -218,13 +235,14 @@ class InteractionKernel(BaseKernel): pset.particledata.state[:] = StatusCode.Evaluate if abs(dt) < 1e-6: - logger.warning_once("'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)") + logger.warning_once( + "'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)" + ) if pset.fieldset is not None: for g in pset.fieldset.gridset.grids: if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel - g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, - g.chunk_deprecated, g.load_chunk) + g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, g.chunk_deprecated, g.load_chunk) # Execute the kernel over the particle set if self.ptype.uses_jit: @@ -235,7 +253,7 @@ class InteractionKernel(BaseKernel): self.execute_python(pset, endtime, dt) # Remove all particles that signalled deletion - self.remove_deleted(pset) # Generalizable version! + self.remove_deleted(pset) # Generalizable version! # Identify particles that threw errors n_error = pset.num_error_particles @@ -251,11 +269,11 @@ class InteractionKernel(BaseKernel): elif p.state == StatusCode.Delete: pass else: - logger.warning_once(f'Deleting particle {p.id} because of non-recoverable error') + logger.warning_once(f"Deleting particle {p.id} because of non-recoverable error") p.delete() # Remove all particles that signalled deletion - self.remove_deleted(pset) # Generalizable version! + self.remove_deleted(pset) # Generalizable version! # Execute core loop again to continue interrupted particles if self.ptype.uses_jit: diff --git a/parcels/interaction/neighborsearch/base.py b/parcels/interaction/neighborsearch/base.py index a5d44fc1..09578aea 100644 --- a/parcels/interaction/neighborsearch/base.py +++ b/parcels/interaction/neighborsearch/base.py @@ -14,8 +14,7 @@ class BaseNeighborSearch(ABC): structure. """ - def __init__(self, inter_dist_vert, inter_dist_horiz, - max_depth=100000, periodic_domain_zonal=None): + def __init__(self, inter_dist_vert, inter_dist_horiz, max_depth=100000, periodic_domain_zonal=None): """Initialize neighbor search @@ -30,9 +29,7 @@ class BaseNeighborSearch(ABC): """ self.inter_dist_vert = inter_dist_vert self.inter_dist_horiz = inter_dist_horiz - self.inter_dist = np.array( - [inter_dist_vert, inter_dist_horiz, inter_dist_horiz] - ).reshape(3, 1) + self.inter_dist = np.array([inter_dist_vert, inter_dist_horiz, inter_dist_horiz]).reshape(3, 1) self.max_depth = max_depth # Maximum depth of particles. self._values = None # Coordinates of the particles. @@ -163,12 +160,12 @@ class BaseNeighborSearch(ABC): """ vert_distance, horiz_distance = self._distance(coor, subset_idx) - rel_distances = np.sqrt((horiz_distance/self.inter_dist_horiz)**2 - + (vert_distance/self.inter_dist_vert)**2) + rel_distances = np.sqrt( + (horiz_distance / self.inter_dist_horiz) ** 2 + (vert_distance / self.inter_dist_vert) ** 2 + ) rel_neighbor_idx = np.where(rel_distances < 1)[0] neighbor_idx = subset_idx[rel_neighbor_idx] - distances = np.vstack((vert_distance[rel_neighbor_idx], - horiz_distance[rel_neighbor_idx])) + distances = np.vstack((vert_distance[rel_neighbor_idx], horiz_distance[rel_neighbor_idx])) return neighbor_idx, distances @@ -177,21 +174,15 @@ class BaseFlatNeighborSearch(BaseNeighborSearch): def _distance(self, coor, subset_idx): coor = coor.reshape(3, 1) - horiz_distance = np.sqrt(np.sum(( - self._values[1:, subset_idx] - coor[1:])**2, - axis=0)) + horiz_distance = np.sqrt(np.sum((self._values[1:, subset_idx] - coor[1:]) ** 2, axis=0)) if self.periodic_domain_zonal: # If zonal periodic boundaries coor[2, 0] -= self.periodic_domain_zonal # distance through Western boundary - hd2 = np.sqrt(np.sum(( - self._values[1:, subset_idx] - coor[1:])**2, - axis=0)) - coor[2, 0] += 2*self.periodic_domain_zonal + hd2 = np.sqrt(np.sum((self._values[1:, subset_idx] - coor[1:]) ** 2, axis=0)) + coor[2, 0] += 2 * self.periodic_domain_zonal # distance through Eastern boundary - hd3 = np.sqrt(np.sum(( - self._values[1:, subset_idx] - coor[1:])**2, - axis=0)) + hd3 = np.sqrt(np.sum((self._values[1:, subset_idx] - coor[1:]) ** 2, axis=0)) coor[2, 0] -= self.periodic_domain_zonal else: hd2 = np.full(len(horiz_distance), np.inf) @@ -199,7 +190,7 @@ class BaseFlatNeighborSearch(BaseNeighborSearch): horiz_distance = np.column_stack((horiz_distance, hd2, hd3)) horiz_distance = np.min(horiz_distance, axis=1) - vert_distance = np.abs(self._values[0, subset_idx]-coor[0]) + vert_distance = np.abs(self._values[0, subset_idx] - coor[0]) return (vert_distance, horiz_distance) @@ -219,17 +210,13 @@ class BaseSphericalNeighborSearch(BaseNeighborSearch): coor[2, 0] -= self.periodic_domain_zonal # distance through Western boundary hd2 = spherical_distance( - *coor, - self._values[0, subset_idx], - self._values[1, subset_idx], - self._values[2, subset_idx])[1] - coor[2, 0] += 2*self.periodic_domain_zonal + *coor, self._values[0, subset_idx], self._values[1, subset_idx], self._values[2, subset_idx] + )[1] + coor[2, 0] += 2 * self.periodic_domain_zonal # distance through Eastern boundary hd3 = spherical_distance( - *coor, - self._values[0, subset_idx], - self._values[1, subset_idx], - self._values[2, subset_idx])[1] + *coor, self._values[0, subset_idx], self._values[1, subset_idx], self._values[2, subset_idx] + )[1] coor[2, 0] -= self.periodic_domain_zonal else: hd2 = np.full(len(horiz_distances), np.inf) diff --git a/parcels/interaction/neighborsearch/basehash.py b/parcels/interaction/neighborsearch/basehash.py index 9150241a..2033bf58 100644 --- a/parcels/interaction/neighborsearch/basehash.py +++ b/parcels/interaction/neighborsearch/basehash.py @@ -90,11 +90,9 @@ class BaseHashNeighborSearch(ABC): new_active_mask = np.full(new_values.shape[1], True) # Figure out the changes in the active mask. - deactivated_mask = np.logical_and( - self._active_mask, np.logical_not(new_active_mask)) + deactivated_mask = np.logical_and(self._active_mask, np.logical_not(new_active_mask)) stay_active_mask = np.logical_and(self._active_mask, new_active_mask) - activated_mask = np.logical_and( - np.logical_not(self._active_mask), new_active_mask) + activated_mask = np.logical_and(np.logical_not(self._active_mask), new_active_mask) stay_active_idx = np.where(stay_active_mask)[0] @@ -110,8 +108,7 @@ class BaseHashNeighborSearch(ABC): # Remove/add/modify particles. self._deactivate_particles(remove_idx) self._particle_hashes[stay_active_mask] = new_hashes - self._particle_hashes[activated_mask] = self._values_to_hashes( - new_values[:, activated_mask]) + self._particle_hashes[activated_mask] = self._values_to_hashes(new_values[:, activated_mask]) self._activate_particles(add_idx) # Set the state to the new values. @@ -151,10 +148,8 @@ class BaseHashNeighborSearch(ABC): # Else create a new array that doesn't include remove_idx. else: rel_remove_idx = self._hash_idx[particle_idx[remove_idx]] - self._hashtable[cur_hash] = np.delete( - self._hashtable[cur_hash], rel_remove_idx) - self._hash_idx[self._hashtable[cur_hash]] = np.arange( - len(self._hashtable[cur_hash])) + self._hashtable[cur_hash] = np.delete(self._hashtable[cur_hash], rel_remove_idx) + self._hash_idx[self._hashtable[cur_hash]] = np.arange(len(self._hashtable[cur_hash])) def _activate_particles(self, particle_idx): """Add particles to the hashtable""" @@ -166,10 +161,9 @@ class BaseHashNeighborSearch(ABC): self._hash_idx[particle_idx[add_idx]] = np.arange(len(add_idx)) else: self._hash_idx[particle_idx[add_idx]] = np.arange( - len(self._hashtable[cur_hash]), - len(self._hashtable[cur_hash]) + len(add_idx)) - self._hashtable[cur_hash] = np.append( - self._hashtable[cur_hash], particle_idx[add_idx]) + len(self._hashtable[cur_hash]), len(self._hashtable[cur_hash]) + len(add_idx) + ) + self._hashtable[cur_hash] = np.append(self._hashtable[cur_hash], particle_idx[add_idx]) def hash_split(hash_ids, active_idx=None): diff --git a/parcels/interaction/neighborsearch/hashflat.py b/parcels/interaction/neighborsearch/hashflat.py index 01314156..a83760a9 100644 --- a/parcels/interaction/neighborsearch/hashflat.py +++ b/parcels/interaction/neighborsearch/hashflat.py @@ -68,8 +68,8 @@ class HashFlatNeighborSearch(BaseHashNeighborSearch, BaseFlatNeighborSearch): for i_dim in range(3): val_min = active_values[i_dim, :].min() val_max = active_values[i_dim, :].max() - margin = (val_max-val_min)*0.3 - self._box.append([val_min-margin, val_max+margin]) + margin = (val_max - val_min) * 0.3 + self._box.append([val_min - margin, val_max + margin]) self._box = np.array(self._box) @@ -78,8 +78,7 @@ class HashFlatNeighborSearch(BaseHashNeighborSearch, BaseFlatNeighborSearch): # Compute the number of bits in each of the three dimensions # E.g. if we have 3 bits (depth), we must have less than 2^3 cells in # that direction. - n_bits = ((self._box[:, 1] - self._box[:, 0] - )/self.inter_dist.reshape(-1) + epsilon)/np.log(2) + n_bits = ((self._box[:, 1] - self._box[:, 0]) / self.inter_dist.reshape(-1) + epsilon) / np.log(2) self._bits = np.ceil(n_bits).astype(int) # Compute the starting point of the cell (0, 0, 0). @@ -88,8 +87,7 @@ class HashFlatNeighborSearch(BaseHashNeighborSearch, BaseFlatNeighborSearch): # Compute the hash table. particle_hashes = self._values_to_hashes(values, self.active_idx) - self._hashtable = hash_split(particle_hashes, - active_idx=self.active_idx) + self._hashtable = hash_split(particle_hashes, active_idx=self.active_idx) self._particle_hashes = particle_hashes # Keep track of the position of a particle index within a cell. @@ -104,9 +102,8 @@ class HashFlatNeighborSearch(BaseHashNeighborSearch, BaseFlatNeighborSearch): active_values = values[:, active_idx] # Compute the box_id/hashes. - box_i = ((active_values-self._min_box)/self.inter_dist).astype(int) - particle_hashes = np.bitwise_or( - box_i[0, :], np.left_shift(box_i[1, :], self._bits[0])) + box_i = ((active_values - self._min_box) / self.inter_dist).astype(int) + particle_hashes = np.bitwise_or(box_i[0, :], np.left_shift(box_i[1, :], self._bits[0])) if active_values is None: return particle_hashes @@ -139,7 +136,7 @@ def hash_to_neighbors(hash_id, bits): # Compute the (ix, iy, iz) coordinates of the hash. tot_bits = 0 for dim in range(len(bits)): - coor[dim] = (hash_id >> tot_bits) & ((1 << bits[dim])-1) + coor[dim] = (hash_id >> tot_bits) & ((1 << bits[dim]) - 1) tot_bits += bits[dim] coor_max = np.left_shift(1, bits) @@ -151,7 +148,7 @@ def hash_to_neighbors(hash_id, bits): # Compute the integer coordinates of the neighboring cell. divider = 1 for dim in range(len(bits)): - new_coor[dim] = coor[dim] + (1-((offset//divider) % 3)) + new_coor[dim] = coor[dim] + (1 - ((offset // divider) % 3)) divider *= 3 # Cell is outside the box/doesn't exist. @@ -162,7 +159,7 @@ def hash_to_neighbors(hash_id, bits): new_hash = 0 tot_bits = 0 for dim in range(len(bits)): - new_hash |= (new_coor[dim] << tot_bits) + new_hash |= new_coor[dim] << tot_bits tot_bits += bits[dim] neighbors.append(new_hash) return neighbors diff --git a/parcels/interaction/neighborsearch/hashspherical.py b/parcels/interaction/neighborsearch/hashspherical.py index 1f0e3081..32fff56f 100644 --- a/parcels/interaction/neighborsearch/hashspherical.py +++ b/parcels/interaction/neighborsearch/hashspherical.py @@ -9,8 +9,7 @@ from parcels.interaction.neighborsearch.basehash import ( ) -class HashSphericalNeighborSearch(BaseHashNeighborSearch, - BaseSphericalNeighborSearch): +class HashSphericalNeighborSearch(BaseHashNeighborSearch, BaseSphericalNeighborSearch): """Neighbor search using a hashtable (similar to octtrees). @@ -24,8 +23,7 @@ class HashSphericalNeighborSearch(BaseHashNeighborSearch, Maximum depth of the particles (default is 100000m). """ - def __init__(self, inter_dist_vert, inter_dist_horiz, - max_depth=100000): + def __init__(self, inter_dist_vert, inter_dist_horiz, max_depth=100000): super().__init__(inter_dist_vert, inter_dist_horiz, max_depth) self._init_structure() @@ -33,8 +31,7 @@ class HashSphericalNeighborSearch(BaseHashNeighborSearch, def _find_neighbors(self, hash_id, coor): """Get neighbors from hash_id and location.""" # Get the neighboring cells. - neighbor_blocks = geo_hash_to_neighbors( - hash_id, coor, self._bits, self.inter_arc_dist) + neighbor_blocks = geo_hash_to_neighbors(hash_id, coor, self._bits, self.inter_arc_dist) all_neighbor_points = [] # Get the particles from the neighboring cells. @@ -74,17 +71,17 @@ class HashSphericalNeighborSearch(BaseHashNeighborSearch, lat_sign = (lat > 0).astype(int) # Find the latitude part of the cell id. - i_depth = np.floor(depth/self.inter_dist_vert).astype(int) - i_lat = np.floor(np.abs(lat)/self.inter_degree_dist).astype(int) + i_depth = np.floor(depth / self.inter_dist_vert).astype(int) + i_lat = np.floor(np.abs(lat) / self.inter_degree_dist).astype(int) # Get the arc length of the smaller circle around the earth. - circ_small = 2*np.pi*np.cos((i_lat+1)*self.inter_arc_dist) - n_lon = np.floor(circ_small/self.inter_arc_dist).astype(int) + circ_small = 2 * np.pi * np.cos((i_lat + 1) * self.inter_arc_dist) + n_lon = np.floor(circ_small / self.inter_arc_dist).astype(int) n_lon[n_lon < 1] = 1 - d_lon = 360/n_lon + d_lon = 360 / n_lon # Get the longitude part of the cell id. - i_lon = np.floor(lon/d_lon).astype(int) + i_lon = np.floor(lon / d_lon).astype(int) # Merge the 4 parts of the cell into one id. point_hash = i_3d_to_hash(i_depth, i_lat, i_lon, lat_sign, self._bits) @@ -107,12 +104,10 @@ class HashSphericalNeighborSearch(BaseHashNeighborSearch, # Compute the hash values: self._particle_hashes = np.empty(self._values.shape[1], dtype=int) - self._particle_hashes[active_idx] = self._values_to_hashes( - values[:, active_idx]) + self._particle_hashes[active_idx] = self._values_to_hashes(values[:, active_idx]) # Create the hashtable. - self._hashtable = hash_split(self._particle_hashes, - active_idx=active_idx) + self._hashtable = hash_split(self._particle_hashes, active_idx=active_idx) # Keep track of the position of a particle index within a cell. self._hash_idx = np.empty_like(self._particle_hashes, dtype=int) @@ -124,15 +119,14 @@ class HashSphericalNeighborSearch(BaseHashNeighborSearch, epsilon = 1e-12 R_earth = 6371000 - self.inter_arc_dist = self.inter_dist_horiz/R_earth - self.inter_degree_dist = 180*self.inter_arc_dist/np.pi - n_lines_depth = int(ceil( - self.max_depth/self.inter_dist_vert + epsilon)) - n_lines_lat = int(ceil(np.pi/self.inter_arc_dist+epsilon)) - n_lines_lon = int(ceil(2*np.pi/self.inter_arc_dist+epsilon)) - n_bits_lat = ceil(np.log(n_lines_lat)/np.log(2)) - n_bits_lon = ceil(np.log(n_lines_lon)/np.log(2)) - n_bits_depth = ceil(np.log(n_lines_depth)/np.log(2)) + self.inter_arc_dist = self.inter_dist_horiz / R_earth + self.inter_degree_dist = 180 * self.inter_arc_dist / np.pi + n_lines_depth = int(ceil(self.max_depth / self.inter_dist_vert + epsilon)) + n_lines_lat = int(ceil(np.pi / self.inter_arc_dist + epsilon)) + n_lines_lon = int(ceil(2 * np.pi / self.inter_arc_dist + epsilon)) + n_bits_lat = ceil(np.log(n_lines_lat) / np.log(2)) + n_bits_lon = ceil(np.log(n_lines_lon) / np.log(2)) + n_bits_depth = ceil(np.log(n_lines_depth) / np.log(2)) self._bits = np.array([n_bits_depth, n_bits_lat, n_bits_lon]) @@ -140,16 +134,16 @@ def i_3d_to_hash(i_depth, i_lat, i_lon, lat_sign, bits): """Convert longitude and latitude id's to hash""" point_hash = lat_sign point_hash = np.bitwise_or(point_hash, np.left_shift(i_depth, 1)) - point_hash = np.bitwise_or(point_hash, np.left_shift(i_lat, 1+bits[0])) - point_hash = np.bitwise_or(point_hash, np.left_shift(i_lon, 1+bits[0]+bits[1])) + point_hash = np.bitwise_or(point_hash, np.left_shift(i_lat, 1 + bits[0])) + point_hash = np.bitwise_or(point_hash, np.left_shift(i_lon, 1 + bits[0] + bits[1])) return point_hash def geo_hash_to_neighbors(hash_id, coor, bits, inter_arc_dist): """Compute the hashes of all neighboring cells in a 3x3x3 neighborhood.""" lat_sign = hash_id & 0x1 - i_depth = (hash_id >> 1) & ((1 << bits[0])-1) - i_lat = (hash_id >> (1+bits[0])) & ((1 << bits[1])-1) + i_depth = (hash_id >> 1) & ((1 << bits[0]) - 1) + i_lat = (hash_id >> (1 + bits[0])) & ((1 << bits[1]) - 1) def all_neigh_depth(i_lat, i_lon, lat_sign): hashes = [] @@ -157,8 +151,7 @@ def geo_hash_to_neighbors(hash_id, coor, bits, inter_arc_dist): new_depth = i_depth + d_depth if new_depth < 0: continue - hashes.append( - i_3d_to_hash(new_depth, i_lat, i_lon, lat_sign, bits)) + hashes.append(i_3d_to_hash(new_depth, i_lat, i_lon, lat_sign, bits)) return hashes neighbors = [] @@ -168,20 +161,18 @@ def geo_hash_to_neighbors(hash_id, coor, bits, inter_arc_dist): new_i_lat = i_lat + i_d_lat if new_i_lat == -1: new_i_lat = 0 - new_lat_sign = (1-lat_sign) + new_lat_sign = 1 - lat_sign min_lat = new_i_lat + 1 - circ_small = 2*np.pi*np.cos(min_lat*inter_arc_dist) - n_new_lon = int(max(1, np.floor(circ_small/inter_arc_dist))) - d_lon = 360/n_new_lon + circ_small = 2 * np.pi * np.cos(min_lat * inter_arc_dist) + n_new_lon = int(max(1, np.floor(circ_small / inter_arc_dist))) + d_lon = 360 / n_new_lon if n_new_lon <= 3: for new_i_lon in range(n_new_lon): - neighbors.extend( - all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign)) + neighbors.extend(all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign)) else: - start_i_lon = int(np.floor(coor[2][0]/d_lon)) + start_i_lon = int(np.floor(coor[2][0] / d_lon)) for delta_lon in [-1, 0, 1]: - new_i_lon = (start_i_lon+delta_lon+n_new_lon) % n_new_lon - neighbors.extend( - all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign)) + new_i_lon = (start_i_lon + delta_lon + n_new_lon) % n_new_lon + neighbors.extend(all_neigh_depth(new_i_lat, new_i_lon, new_lat_sign)) return neighbors diff --git a/parcels/kernel.py b/parcels/kernel.py index 8d7d42c3..e15f814f 100644 --- a/parcels/kernel.py +++ b/parcels/kernel.py @@ -42,14 +42,24 @@ from parcels.tools.statuscodes import ( TimeExtrapolationError, ) -__all__ = ['Kernel', 'BaseKernel'] +__all__ = ["Kernel", "BaseKernel"] class BaseKernel: """Superclass for 'normal' and Interactive Kernels""" - def __init__(self, fieldset, ptype, pyfunc=None, funcname=None, funccode=None, py_ast=None, funcvars=None, - c_include="", delete_cfiles=True): + def __init__( + self, + fieldset, + ptype, + pyfunc=None, + funcname=None, + funccode=None, + py_ast=None, + funcvars=None, + c_include="", + delete_cfiles=True, + ): self._fieldset = fieldset self.field_args = None self.const_args = None @@ -116,9 +126,10 @@ class BaseKernel: field_keys = "" if self.field_args is not None: field_keys = "-".join( - [f"{name}:{field.units.__class__.__name__}" for name, field in self.field_args.items()]) - key = self.name + self.ptype._cache_key + field_keys + ('TIME:%f' % ostime()) - return hashlib.md5(key.encode('utf-8')).hexdigest() + [f"{name}:{field.units.__class__.__name__}" for name, field in self.field_args.items()] + ) + key = self.name + self.ptype._cache_key + field_keys + ("TIME:%f" % ostime()) + return hashlib.md5(key.encode("utf-8")).hexdigest() def remove_deleted(self, pset): """Utility to remove all particles that signalled deletion.""" @@ -153,40 +164,60 @@ class Kernel(BaseKernel): concatenation, the merged AST plus the new header definition is required. """ - def __init__(self, fieldset, ptype, pyfunc=None, funcname=None, funccode=None, py_ast=None, funcvars=None, - c_include="", delete_cfiles=True): - super().__init__(fieldset=fieldset, ptype=ptype, pyfunc=pyfunc, funcname=funcname, funccode=funccode, - py_ast=py_ast, funcvars=funcvars, c_include=c_include, delete_cfiles=delete_cfiles) + def __init__( + self, + fieldset, + ptype, + pyfunc=None, + funcname=None, + funccode=None, + py_ast=None, + funcvars=None, + c_include="", + delete_cfiles=True, + ): + super().__init__( + fieldset=fieldset, + ptype=ptype, + pyfunc=pyfunc, + funcname=funcname, + funccode=funccode, + py_ast=py_ast, + funcvars=funcvars, + c_include=c_include, + delete_cfiles=delete_cfiles, + ) # Derive meta information from pyfunc, if not given self.check_fieldsets_in_kernels(pyfunc) if funcvars is not None: self.funcvars = funcvars - elif hasattr(pyfunc, '__code__'): + elif hasattr(pyfunc, "__code__"): self.funcvars = list(pyfunc.__code__.co_varnames) else: self.funcvars = None self.funccode = funccode or inspect.getsource(pyfunc.__code__) self.funccode = ( # Remove parcels. prefix (see #1608) - self.funccode - .replace('parcels.rng', 'rng') - .replace('parcels.ParcelsRandom', 'ParcelsRandom') - .replace('parcels.StatusCode', 'StatusCode') + self.funccode.replace("parcels.rng", "rng") + .replace("parcels.ParcelsRandom", "ParcelsRandom") + .replace("parcels.StatusCode", "StatusCode") ) # Parse AST if it is not provided explicitly - self.py_ast = py_ast or ast.parse(textwrap.dedent(self.funccode)).body[0] # Dedent allows for in-lined kernel definitions + self.py_ast = ( + py_ast or ast.parse(textwrap.dedent(self.funccode)).body[0] + ) # Dedent allows for in-lined kernel definitions if pyfunc is None: # Extract user context by inspecting the call stack stack = inspect.stack() try: user_ctx = stack[-1][0].f_globals - user_ctx['math'] = globals()['math'] - user_ctx['ParcelsRandom'] = globals()['ParcelsRandom'] - user_ctx['rng'] = globals()['rng'] - user_ctx['random'] = globals()['random'] - user_ctx['StatusCode'] = globals()['StatusCode'] + user_ctx["math"] = globals()["math"] + user_ctx["ParcelsRandom"] = globals()["ParcelsRandom"] + user_ctx["rng"] = globals()["rng"] + user_ctx["random"] = globals()["random"] + user_ctx["StatusCode"] = globals()["StatusCode"] except: logger.warning("Could not access user context when merging kernels") user_ctx = globals() @@ -202,24 +233,24 @@ class Kernel(BaseKernel): numkernelargs = self.check_kernel_signature_on_version() - assert numkernelargs == 3, \ - 'Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon.' + assert ( + numkernelargs == 3 + ), "Since Parcels v2.0, kernels do only take 3 arguments: particle, fieldset, time !! AND !! Argument order in field interpolation is time, depth, lat, lon." self.name = f"{ptype.name}{self.funcname}" # Generate the kernel function and add the outer loop if self.ptype.uses_jit: kernelgen = KernelGenerator(fieldset, ptype) - kernel_ccode = kernelgen.generate(deepcopy(self.py_ast), - self.funcvars) + kernel_ccode = kernelgen.generate(deepcopy(self.py_ast), self.funcvars) self.field_args = kernelgen.field_args self.vector_field_args = kernelgen.vector_field_args fieldset = self.fieldset for f in self.vector_field_args.values(): - Wname = f.W.ccode_name if f.W else 'not_defined' - for sF_name, sF_component in zip([f.U.ccode_name, f.V.ccode_name, Wname], ['U', 'V', 'W']): + Wname = f.W.ccode_name if f.W else "not_defined" + for sF_name, sF_component in zip([f.U.ccode_name, f.V.ccode_name, Wname], ["U", "V", "W"]): if sF_name not in self.field_args: - if sF_name != 'not_defined': + if sF_name != "not_defined": self.field_args[sF_name] = getattr(f, sF_component) self.const_args = kernelgen.const_args loopgen = LoopGenerator(fieldset, ptype) @@ -228,8 +259,7 @@ class Kernel(BaseKernel): c_include_str = f.read() else: c_include_str = self._c_include - self.ccode = loopgen.generate(self.funcname, self.field_args, self.const_args, - kernel_ccode, c_include_str) + self.ccode = loopgen.generate(self.funcname, self.field_args, self.const_args, kernel_ccode, c_include_str) src_file_or_files, self.lib_file, self.log_file = self.get_kernel_compile_files() if type(src_file_or_files) in (list, dict, tuple, np.ndarray): @@ -272,9 +302,10 @@ class Kernel(BaseKernel): field_keys = "" if self.field_args is not None: field_keys = "-".join( - [f"{name}:{field.units.__class__.__name__}" for name, field in self.field_args.items()]) - key = self.name + self.ptype._cache_key + field_keys + ('TIME:%f' % ostime()) - return hashlib.md5(key.encode('utf-8')).hexdigest() + [f"{name}:{field.units.__class__.__name__}" for name, field in self.field_args.items()] + ) + key = self.name + self.ptype._cache_key + field_keys + ("TIME:%f" % ostime()) + return hashlib.md5(key.encode("utf-8")).hexdigest() def add_scipy_positionupdate_kernels(self): # Adding kernels that set and update the coordinate changes @@ -304,37 +335,51 @@ class Kernel(BaseKernel): if self.fieldset is not None: if pyfunc is AdvectionRK4_3D: warning = False - if isinstance(self._fieldset.W, Field) and self._fieldset.W.creation_log != 'from_nemo' and \ - self._fieldset.W._scaling_factor is not None and self._fieldset.W._scaling_factor > 0: + if ( + isinstance(self._fieldset.W, Field) + and self._fieldset.W.creation_log != "from_nemo" + and self._fieldset.W._scaling_factor is not None + and self._fieldset.W._scaling_factor > 0 + ): warning = True if isinstance(self._fieldset.W, NestedField): for f in self._fieldset.W: - if f.creation_log != 'from_nemo' and f._scaling_factor is not None and f._scaling_factor > 0: + if f.creation_log != "from_nemo" and f._scaling_factor is not None and f._scaling_factor > 0: warning = True if warning: - logger.warning_once('Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n' - ' If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)') + logger.warning_once( + "Note that in AdvectionRK4_3D, vertical velocity is assumed positive towards increasing z.\n" + " If z increases downward and w is positive upward you can re-orient it downwards by setting fieldset.W.set_scaling_factor(-1.)" + ) elif pyfunc is AdvectionAnalytical: if self.fieldset.particlefile is not None: self.fieldset.particlefile.analytical = True if self._ptype.uses_jit: - raise NotImplementedError('Analytical Advection only works in Scipy mode') - if self._fieldset.U.interp_method != 'cgrid_velocity': - raise NotImplementedError('Analytical Advection only works with C-grids') + raise NotImplementedError("Analytical Advection only works in Scipy mode") + if self._fieldset.U.interp_method != "cgrid_velocity": + raise NotImplementedError("Analytical Advection only works with C-grids") if self._fieldset.U.grid.gtype not in [GridType.CurvilinearZGrid, GridType.RectilinearZGrid]: - raise NotImplementedError('Analytical Advection only works with Z-grids in the vertical') + raise NotImplementedError("Analytical Advection only works with Z-grids in the vertical") elif pyfunc is AdvectionRK45: - if not hasattr(self.fieldset, 'RK45_tol'): - logger.info("Setting RK45 tolerance to 10 m. Use fieldset.add_constant('RK45_tol', [distance]) to change.") - self.fieldset.add_constant('RK45_tol', 10) - if self.fieldset.U.grid.mesh == 'spherical': - self.fieldset.RK45_tol /= (1852 * 60) # TODO does not account for zonal variation in meter -> degree conversion - if not hasattr(self.fieldset, 'RK45_min_dt'): - logger.info("Setting RK45 minimum timestep to 1 s. Use fieldset.add_constant('RK45_min_dt', [timestep]) to change.") - self.fieldset.add_constant('RK45_min_dt', 1) - if not hasattr(self.fieldset, 'RK45_max_dt'): - logger.info("Setting RK45 maximum timestep to 1 day. Use fieldset.add_constant('RK45_max_dt', [timestep]) to change.") - self.fieldset.add_constant('RK45_max_dt', 60*60*24) + if not hasattr(self.fieldset, "RK45_tol"): + logger.info( + "Setting RK45 tolerance to 10 m. Use fieldset.add_constant('RK45_tol', [distance]) to change." + ) + self.fieldset.add_constant("RK45_tol", 10) + if self.fieldset.U.grid.mesh == "spherical": + self.fieldset.RK45_tol /= ( + 1852 * 60 + ) # TODO does not account for zonal variation in meter -> degree conversion + if not hasattr(self.fieldset, "RK45_min_dt"): + logger.info( + "Setting RK45 minimum timestep to 1 s. Use fieldset.add_constant('RK45_min_dt', [timestep]) to change." + ) + self.fieldset.add_constant("RK45_min_dt", 1) + if not hasattr(self.fieldset, "RK45_max_dt"): + logger.info( + "Setting RK45 maximum timestep to 1 day. Use fieldset.add_constant('RK45_max_dt', [timestep]) to change." + ) + self.fieldset.add_constant("RK45_max_dt", 60 * 60 * 24) def check_kernel_signature_on_version(self): numkernelargs = 0 @@ -377,20 +422,26 @@ class Kernel(BaseKernel): if MPI: mpi_comm = MPI.COMM_WORLD mpi_rank = mpi_comm.Get_rank() - cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class + cache_name = ( + self._cache_key + ) # only required here because loading is done by Kernel class instead of Compiler class dyn_dir = get_cache_dir() if mpi_rank == 0 else None dyn_dir = mpi_comm.bcast(dyn_dir, root=0) basename = cache_name if mpi_rank == 0 else None basename = mpi_comm.bcast(basename, root=0) basename = basename + "_%d" % mpi_rank else: - cache_name = self._cache_key # only required here because loading is done by Kernel class instead of Compiler class + cache_name = ( + self._cache_key + ) # only required here because loading is done by Kernel class instead of Compiler class dyn_dir = get_cache_dir() basename = "%s_0" % cache_name lib_path = "lib" + basename src_file_or_files = None if type(basename) in (list, dict, tuple, ndarray): - src_file_or_files = ["", ] * len(basename) + src_file_or_files = [ + "", + ] * len(basename) for i, src_file in enumerate(basename): src_file_or_files[i] = f"{os.path.join(dyn_dir, src_file)}.c" else: @@ -405,13 +456,13 @@ class Kernel(BaseKernel): if self.src_file is None: if self.dyn_srcs is not None: for dyn_src in self.dyn_srcs: - with open(dyn_src, 'w') as f: + with open(dyn_src, "w") as f: f.write(self.ccode) all_files_array.append(dyn_src) compiler.compile(self.dyn_srcs, self.lib_file, self.log_file) else: if self.src_file is not None: - with open(self.src_file, 'w') as f: + with open(self.src_file, "w") as f: f.write(self.ccode) if self.src_file is not None: all_files_array.append(self.src_file) @@ -423,21 +474,33 @@ class Kernel(BaseKernel): all_files_array.append(self.log_file) def load_lib(self): - self._lib = npct.load_library(self.lib_file, '.') + self._lib = npct.load_library(self.lib_file, ".") self._function = self._lib.particle_loop def merge(self, kernel, kclass): funcname = self.funcname + kernel.funcname func_ast = None if self.py_ast is not None: - func_ast = ast.FunctionDef(name=funcname, args=self.py_ast.args, body=self.py_ast.body + kernel.py_ast.body, - decorator_list=[], lineno=1, col_offset=0) + func_ast = ast.FunctionDef( + name=funcname, + args=self.py_ast.args, + body=self.py_ast.body + kernel.py_ast.body, + decorator_list=[], + lineno=1, + col_offset=0, + ) delete_cfiles = self.delete_cfiles and kernel.delete_cfiles - return kclass(self.fieldset, self.ptype, pyfunc=None, - funcname=funcname, funccode=self.funccode + kernel.funccode, - py_ast=func_ast, funcvars=self.funcvars + kernel.funcvars, - c_include=self._c_include + kernel.c_include, - delete_cfiles=delete_cfiles) + return kclass( + self.fieldset, + self.ptype, + pyfunc=None, + funcname=funcname, + funccode=self.funccode + kernel.funccode, + py_ast=func_ast, + funcvars=self.funcvars + kernel.funcvars, + c_include=self._c_include + kernel.c_include, + delete_cfiles=delete_cfiles, + ) def __add__(self, kernel): if not isinstance(kernel, type(self)): @@ -484,7 +547,13 @@ class Kernel(BaseKernel): def cleanup_remove_files(lib_file, all_files_array, delete_cfiles): if lib_file is not None: if os.path.isfile(lib_file): # and delete_cfiles - [os.remove(s) for s in [lib_file, ] if os.path is not None and os.path.exists(s)] + [ + os.remove(s) + for s in [ + lib_file, + ] + if os.path is not None and os.path.exists(s) + ] if delete_cfiles and len(all_files_array) > 0: [os.remove(s) for s in all_files_array if os.path is not None and os.path.exists(s)] @@ -495,7 +564,7 @@ class Kernel(BaseKernel): # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel. if lib is not None: try: - _ctypes.FreeLibrary(lib._handle) if sys.platform == 'win32' else _ctypes.dlclose(lib._handle) + _ctypes.FreeLibrary(lib._handle) if sys.platform == "win32" else _ctypes.dlclose(lib._handle) except: pass @@ -510,7 +579,7 @@ class Kernel(BaseKernel): if isinstance(f, (VectorField, NestedField)): continue if f.data.dtype != np.float32: - raise RuntimeError(f'Field {f.name} data needs to be float32 in JIT mode') + raise RuntimeError(f"Field {f.name} data needs to be float32 in JIT mode") if f in self.field_args.values(): f.chunk_data() else: @@ -519,17 +588,16 @@ class Kernel(BaseKernel): f.c_data_chunks[block_id] = None for g in pset.fieldset.gridset.grids: - g.load_chunk = np.where(g.load_chunk == g.chunk_loading_requested, - g.chunk_loaded_touched, g.load_chunk) + g.load_chunk = np.where(g.load_chunk == g.chunk_loading_requested, g.chunk_loaded_touched, g.load_chunk) if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel - if not g.load_chunk.flags['C_CONTIGUOUS']: - g.load_chunk = np.array(g.load_chunk, order='C') + if not g.load_chunk.flags["C_CONTIGUOUS"]: + g.load_chunk = np.array(g.load_chunk, order="C") if not g.depth.flags.c_contiguous: - g.depth = np.array(g.depth, order='C') + g.depth = np.array(g.depth, order="C") if not g.lon.flags.c_contiguous: - g.lon = np.array(g.lon, order='C') + g.lon = np.array(g.lon, order="C") if not g.lat.flags.c_contiguous: - g.lat = np.array(g.lat, order='C') + g.lat = np.array(g.lat, order="C") def execute_jit(self, pset, endtime, dt): """Invokes JIT engine to perform the core update loop.""" @@ -538,8 +606,7 @@ class Kernel(BaseKernel): fargs = [byref(f.ctypes_struct) for f in self.field_args.values()] fargs += [c_double(f) for f in self.const_args.values()] particle_data = byref(pset.ctypes_struct) - return self._function(c_int(len(pset)), particle_data, - c_double(endtime), c_double(dt), *fargs) + return self._function(c_int(len(pset)), particle_data, c_double(endtime), c_double(dt), *fargs) def execute_python(self, pset, endtime, dt): """Performs the core update loop via Python.""" @@ -563,13 +630,14 @@ class Kernel(BaseKernel): pset.particledata.state[:] = StatusCode.Evaluate if abs(dt) < 1e-6: - logger.warning_once("'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)") + logger.warning_once( + "'dt' is too small, causing numerical accuracy limit problems. Please chose a higher 'dt' and rather scale the 'time' axis of the field accordingly. (related issue #762)" + ) if pset.fieldset is not None: for g in pset.fieldset.gridset.grids: if len(g.load_chunk) > g.chunk_not_loaded: # not the case if a field in not called in the kernel - g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, - g.chunk_deprecated, g.load_chunk) + g.load_chunk = np.where(g.load_chunk == g.chunk_loaded_touched, g.chunk_deprecated, g.load_chunk) # Execute the kernel over the particle set if self.ptype.uses_jit: @@ -604,11 +672,11 @@ class Kernel(BaseKernel): elif p.state == StatusCode.Delete: pass else: - logger.warning_once(f'Deleting particle {p.id} because of non-recoverable error') + logger.warning_once(f"Deleting particle {p.id} because of non-recoverable error") p.delete() # Remove all particles that signalled deletion - self.remove_deleted(pset) # Generalizable version! + self.remove_deleted(pset) # Generalizable version! # Execute core loop again to continue interrupted particles if self.ptype.uses_jit: @@ -634,19 +702,19 @@ class Kernel(BaseKernel): pre_dt = p.dt sign_dt = np.sign(p.dt) - if sign_dt*p.time_nextloop >= sign_dt*endtime: + if sign_dt * p.time_nextloop >= sign_dt * endtime: return p try: # Use next_dt from AdvectionRK45 if it is set - if abs(endtime - p.time_nextloop) < abs(p.next_dt)-1e-6: + if abs(endtime - p.time_nextloop) < abs(p.next_dt) - 1e-6: p.next_dt = abs(endtime - p.time_nextloop) * sign_dt except KeyError: - if abs(endtime - p.time_nextloop) < abs(p.dt)-1e-6: + if abs(endtime - p.time_nextloop) < abs(p.dt) - 1e-6: p.dt = abs(endtime - p.time_nextloop) * sign_dt res = self._pyfunc(p, self._fieldset, p.time_nextloop) if res is None: - if sign_dt*p.time < sign_dt*endtime and p.state == StatusCode.Success: + if sign_dt * p.time < sign_dt * endtime and p.state == StatusCode.Success: p.state = StatusCode.Evaluate else: p.state = res diff --git a/parcels/particle.py b/parcels/particle.py index 769dda2a..cd524275 100644 --- a/parcels/particle.py +++ b/parcels/particle.py @@ -5,7 +5,7 @@ import numpy as np from parcels.tools.statuscodes import StatusCode -__all__ = ['ScipyParticle', 'JITParticle', 'Variable', 'ScipyInteractionParticle'] +__all__ = ["ScipyParticle", "JITParticle", "Variable", "ScipyInteractionParticle"] indicators_64bit = [np.float64, np.uint64, np.int64, c_void_p] @@ -80,14 +80,15 @@ class ParticleType: for v in self.variables: if v.name in [v.name for v in ptype.variables]: raise AttributeError( - f"Custom Variable name '{v.name}' is not allowed, as it is also a built-in variable") - if v.name == 'z': + f"Custom Variable name '{v.name}' is not allowed, as it is also a built-in variable" + ) + if v.name == "z": raise AttributeError( - "Custom Variable name 'z' is not allowed, as it is used for depth in ParticleFile") + "Custom Variable name 'z' is not allowed, as it is used for depth in ParticleFile" + ) self.variables = ptype.variables + self.variables # Sort variables with all the 64-bit first so that they are aligned for the JIT cptr - self.variables = [v for v in self.variables if v.is64bit()] + \ - [v for v in self.variables if not v.is64bit()] + self.variables = [v for v in self.variables if v.is64bit()] + [v for v in self.variables if not v.is64bit()] def __repr__(self): return f"PType<{self.name}>::{self.variables}" @@ -110,7 +111,7 @@ class ParticleType: raise RuntimeError(str(v.dtype) + " variables are not implemented in JIT mode") if self.size % 8 > 0: # Add padding to be 64-bit aligned - type_list += [('pad', np.float32)] + type_list += [("pad", np.float32)] return np.dtype(type_list) @property @@ -149,23 +150,22 @@ class ScipyParticle: Additional Variables can be added via the :Class Variable: objects """ - lon = Variable('lon', dtype=np.float32) - lon_nextloop = Variable('lon_nextloop', dtype=np.float32, to_write=False) - lat = Variable('lat', dtype=np.float32) - lat_nextloop = Variable('lat_nextloop', dtype=np.float32, to_write=False) - depth = Variable('depth', dtype=np.float32) - depth_nextloop = Variable('depth_nextloop', dtype=np.float32, to_write=False) - time = Variable('time', dtype=np.float64) - time_nextloop = Variable('time_nextloop', dtype=np.float64, to_write=False) - id = Variable('id', dtype=np.int64, to_write='once') - obs_written = Variable('obs_written', dtype=np.int32, initial=0, to_write=False) - dt = Variable('dt', dtype=np.float64, to_write=False) - state = Variable('state', dtype=np.int32, initial=StatusCode.Evaluate, to_write=False) + lon = Variable("lon", dtype=np.float32) + lon_nextloop = Variable("lon_nextloop", dtype=np.float32, to_write=False) + lat = Variable("lat", dtype=np.float32) + lat_nextloop = Variable("lat_nextloop", dtype=np.float32, to_write=False) + depth = Variable("depth", dtype=np.float32) + depth_nextloop = Variable("depth_nextloop", dtype=np.float32, to_write=False) + time = Variable("time", dtype=np.float64) + time_nextloop = Variable("time_nextloop", dtype=np.float64, to_write=False) + id = Variable("id", dtype=np.int64, to_write="once") + obs_written = Variable("obs_written", dtype=np.int32, initial=0, to_write=False) + dt = Variable("dt", dtype=np.float64, to_write=False) + state = Variable("state", dtype=np.int32, initial=StatusCode.Evaluate, to_write=False) lastID = 0 # class-level variable keeping track of last Particle ID used - def __init__(self, lon, lat, pid, fieldset=None, ngrids=None, depth=0., time=0., cptr=None): - + def __init__(self, lon, lat, pid, fieldset=None, ngrids=None, depth=0.0, time=0.0, cptr=None): # Enforce default values through Variable descriptor type(self).lon.initial = lon type(self).lon_nextloop.initial = lon @@ -195,10 +195,10 @@ class ScipyParticle: pass # superclass is 'object', and object itself has no destructor, hence 'pass' def __repr__(self): - time_string = 'not_yet_set' if self.time is None or np.isnan(self.time) else f"{self.time:f}" + time_string = "not_yet_set" if self.time is None or np.isnan(self.time) else f"{self.time:f}" str = "P[%d](lon=%f, lat=%f, depth=%f, " % (self.id, self.lon, self.lat, self.depth) for var in vars(type(self)): - if var in ['lon_nextloop', 'lat_nextloop', 'depth_nextloop', 'time_nextloop']: + if var in ["lon_nextloop", "lat_nextloop", "depth_nextloop", "time_nextloop"]: continue if type(getattr(type(self), var)) is Variable and getattr(type(self), var).to_write is True: str += f"{var}={getattr(self, var):f}, " @@ -217,15 +217,15 @@ class ScipyParticle: if isinstance(var, list): return cls.add_variables(var) if not isinstance(var, Variable): - if len(args) > 0 and 'dtype' not in kwargs: - kwargs['dtype'] = args[0] - if len(args) > 1 and 'initial' not in kwargs: - kwargs['initial'] = args[1] - if len(args) > 2 and 'to_write' not in kwargs: - kwargs['to_write'] = args[2] - dtype = kwargs.pop('dtype', np.float32) - initial = kwargs.pop('initial', 0) - to_write = kwargs.pop('to_write', True) + if len(args) > 0 and "dtype" not in kwargs: + kwargs["dtype"] = args[0] + if len(args) > 1 and "initial" not in kwargs: + kwargs["initial"] = args[1] + if len(args) > 2 and "to_write" not in kwargs: + kwargs["to_write"] = args[2] + dtype = kwargs.pop("dtype", np.float32) + initial = kwargs.pop("initial", 0) + to_write = kwargs.pop("to_write", True) var = Variable(var, dtype=dtype, initial=initial, to_write=to_write) class NewParticle(cls): @@ -266,9 +266,9 @@ class ScipyParticle: ScipyParticle.lastID = offset -ScipyInteractionParticle = ScipyParticle.add_variables([ - Variable("vert_dist", dtype=np.float32), - Variable("horiz_dist", dtype=np.float32)]) +ScipyInteractionParticle = ScipyParticle.add_variables( + [Variable("vert_dist", dtype=np.float32), Variable("horiz_dist", dtype=np.float32)] +) class JITParticle(ScipyParticle): @@ -296,7 +296,7 @@ class JITParticle(ScipyParticle): """ def __init__(self, *args, **kwargs): - self._cptr = kwargs.pop('cptr', None) + self._cptr = kwargs.pop("cptr", None) if self._cptr is None: # Allocate data for a single particle ptype = self.getPType() diff --git a/parcels/particledata.py b/parcels/particledata.py index 78c60a9f..199a9c08 100644 --- a/parcels/particledata.py +++ b/parcels/particledata.py @@ -34,15 +34,16 @@ def partitionParticlesMPI_default(coords, mpi_size=1): kmeans = KMeans(n_clusters=mpi_size, random_state=0).fit(coords) mpiProcs = kmeans.labels_ else: # assigning random labels if no KMeans (see https://github.com/OceanParcels/parcels/issues/1261) - logger.warning_once('sklearn needs to be available if MPI is installed. ' - 'See https://docs.oceanparcels.org/en/latest/installation.html#installation-for-developers for more information') + logger.warning_once( + "sklearn needs to be available if MPI is installed. " + "See https://docs.oceanparcels.org/en/latest/installation.html#installation-for-developers for more information" + ) mpiProcs = np.random.randint(0, mpi_size, size=coords.shape[0]) return mpiProcs class ParticleData: - def __init__(self, pclass, lon, lat, depth, time, lonlatdepth_dtype, pid_orig, ngrid=1, **kwargs): """ Parameters @@ -64,19 +65,22 @@ class ParticleData: self._sorted = np.all(np.diff(pid) >= 0) - assert depth is not None, "particle's initial depth is None - incompatible with the ParticleData class. Invalid state." - assert lon.size == lat.size and lon.size == depth.size, ('lon, lat, depth don''t all have the same lenghts.') + assert ( + depth is not None + ), "particle's initial depth is None - incompatible with the ParticleData class. Invalid state." + assert lon.size == lat.size and lon.size == depth.size, "lon, lat, depth don" "t all have the same lenghts." - assert lon.size == time.size, ('time and positions (lon, lat, depth) don''t have the same lengths.') + assert lon.size == time.size, "time and positions (lon, lat, depth) don" "t have the same lengths." # If a partitioning function for MPI runs has been passed into the # particle creation with the "partition_function" kwarg, retrieve it here. # If it has not, assign the default function, partitionParticlesMPI_default() - partition_function = kwargs.pop('partition_function', partitionParticlesMPI_default) + partition_function = kwargs.pop("partition_function", partitionParticlesMPI_default) for kwvar in kwargs: - assert lon.size == kwargs[kwvar].size, ( - f"{kwvar} and positions (lon, lat, depth) don't have the same lengths.") + assert ( + lon.size == kwargs[kwvar].size + ), f"{kwvar} and positions (lon, lat, depth) don't have the same lengths." offset = np.max(pid) if (pid is not None) and len(pid) > 0 else -1 if MPI: @@ -85,7 +89,7 @@ class ParticleData: mpi_size = mpi_comm.Get_size() if lon.size < mpi_size and mpi_size > 1: - raise RuntimeError('Cannot initialise with fewer particles than MPI processors') + raise RuntimeError("Cannot initialise with fewer particles than MPI processors") if mpi_size > 1: if partition_function is not False: @@ -97,7 +101,7 @@ class ParticleData: self._pu_indicators = None self._pu_indicators = mpi_comm.bcast(self._pu_indicators, root=0) elif np.max(self._pu_indicators) >= mpi_size: - raise RuntimeError('Particle partitions must vary between 0 and the number of mpi procs') + raise RuntimeError("Particle partitions must vary between 0 and the number of mpi procs") lon = lon[self._pu_indicators == mpi_rank] lat = lat[self._pu_indicators == mpi_rank] time = time[self._pu_indicators == mpi_rank] @@ -107,14 +111,16 @@ class ParticleData: kwargs[kwvar] = kwargs[kwvar][self._pu_indicators == mpi_rank] offset = MPI.COMM_WORLD.allreduce(offset, op=MPI.MAX) - pclass.setLastID(offset+1) + pclass.setLastID(offset + 1) if lonlatdepth_dtype is None: self._lonlatdepth_dtype = np.float32 else: self._lonlatdepth_dtype = lonlatdepth_dtype - assert self._lonlatdepth_dtype in [np.float32, np.float64], \ - 'lon lat depth precision should be set to either np.float32 or np.float64' + assert self._lonlatdepth_dtype in [ + np.float32, + np.float64, + ], "lon lat depth precision should be set to either np.float32 or np.float64" pclass.set_lonlatdepth_dtype(self._lonlatdepth_dtype) self._pclass = pclass @@ -125,37 +131,49 @@ class ParticleData: self._ncount = len(lon) for v in self.ptype.variables: - if v.name in ['xi', 'yi', 'zi', 'ti']: + if v.name in ["xi", "yi", "zi", "ti"]: self._data[v.name] = np.empty((len(lon), ngrid), dtype=v.dtype) else: self._data[v.name] = np.empty(self._ncount, dtype=v.dtype) if lon is not None and lat is not None: # Initialise from lists of lon/lat coordinates - assert self._ncount == len(lon) and self._ncount == len(lat), ( - 'Size of ParticleSet does not match length of lon and lat.') + assert self._ncount == len(lon) and self._ncount == len( + lat + ), "Size of ParticleSet does not match length of lon and lat." # mimic the variables that get initialised in the constructor - self._data['lat'][:] = lat - self._data['lat_nextloop'][:] = lat - self._data['lon'][:] = lon - self._data['lon_nextloop'][:] = lon - self._data['depth'][:] = depth - self._data['depth_nextloop'][:] = depth - self._data['time'][:] = time - self._data['time_nextloop'][:] = time - self._data['id'][:] = pid - self._data['obs_written'][:] = 0 + self._data["lat"][:] = lat + self._data["lat_nextloop"][:] = lat + self._data["lon"][:] = lon + self._data["lon_nextloop"][:] = lon + self._data["depth"][:] = depth + self._data["depth_nextloop"][:] = depth + self._data["time"][:] = time + self._data["time_nextloop"][:] = time + self._data["id"][:] = pid + self._data["obs_written"][:] = 0 # special case for exceptions which can only be handled from scipy - self._data['exception'] = np.empty(self._ncount, dtype=object) - - initialised |= {'lat', 'lat_nextloop', 'lon', 'lon_nextloop', 'depth', 'depth_nextloop', 'time', 'time_nextloop', 'id', 'obs_written'} + self._data["exception"] = np.empty(self._ncount, dtype=object) + + initialised |= { + "lat", + "lat_nextloop", + "lon", + "lon_nextloop", + "depth", + "depth_nextloop", + "time", + "time_nextloop", + "id", + "obs_written", + } # any fields that were provided on the command line for kwvar, kwval in kwargs.items(): if not hasattr(pclass, kwvar): - raise RuntimeError(f'Particle class does not have Variable {kwvar}') + raise RuntimeError(f"Particle class does not have Variable {kwvar}") self._data[kwvar][:] = kwval initialised.add(kwvar) @@ -244,12 +262,18 @@ class ParticleData: def get_single_by_index(self, index): """Get a particle object from the ParticleData instance based on its index.""" - assert type(index) in [int, np.int32, np.intp], f"Trying to get a particle by index, but index {index} is not a 32-bit integer - invalid operation." + assert type(index) in [ + int, + np.int32, + np.intp, + ], f"Trying to get a particle by index, but index {index} is not a 32-bit integer - invalid operation." return ParticleDataAccessor(self, index) def add_same(self, same_class): """Add another ParticleData instance to this ParticleData instance. This is done by concatenating both instances.""" - assert same_class is not None, f"Trying to add another {type(self)} to this one, but the other one is None - invalid operation." + assert ( + same_class is not None + ), f"Trying to add another {type(self)} to this one, but the other one is None - invalid operation." assert type(same_class) is type(self) if same_class._ncount == 0: @@ -261,14 +285,12 @@ class ParticleData: return # Determine order of concatenation and update the sorted flag - if self._sorted and same_class._sorted \ - and self._data['id'][0] > same_class._data['id'][-1]: + if self._sorted and same_class._sorted and self._data["id"][0] > same_class._data["id"][-1]: for d in self._data: self._data[d] = np.concatenate((same_class._data[d], self._data[d])) self._ncount += same_class._ncount else: - if not (same_class._sorted - and self._data['id'][-1] < same_class._data['id'][0]): + if not (same_class._sorted and self._data["id"][-1] < same_class._data["id"][0]): self._sorted = False for d in self._data: self._data[d] = np.concatenate((self._data[d], same_class._data[d])) @@ -281,7 +303,11 @@ class ParticleData: def remove_single_by_index(self, index): """Remove a particle from the ParticleData instance based on its index.""" - assert type(index) in [int, np.int32, np.intp], f"Trying to remove a particle by index, but index {index} is not a 32-bit integer - invalid operation." + assert type(index) in [ + int, + np.int32, + np.intp, + ], f"Trying to remove a particle by index, but index {index} is not a 32-bit integer - invalid operation." for d in self._data: self._data[d] = np.delete(self._data[d], index, axis=0) @@ -290,12 +316,20 @@ class ParticleData: def remove_multi_by_indices(self, indices): """Remove particles from the ParticleData instance based on their indices.""" - assert indices is not None, "Trying to remove particles by their ParticleData instance indices, but the index list is None - invalid operation." - assert type(indices) in [list, dict, np.ndarray], "Trying to remove particles by their indices, but the index container is not a valid Python-collection - invalid operation." + assert ( + indices is not None + ), "Trying to remove particles by their ParticleData instance indices, but the index list is None - invalid operation." + assert ( + type(indices) in [list, dict, np.ndarray] + ), "Trying to remove particles by their indices, but the index container is not a valid Python-collection - invalid operation." if type(indices) is not dict: - assert len(indices) == 0 or type(indices[0]) in [int, np.int32, np.intp], "Trying to remove particles by their index, but the index type in the Python collection is not a 32-bit integer - invalid operation." + assert ( + len(indices) == 0 or type(indices[0]) in [int, np.int32, np.intp] + ), "Trying to remove particles by their index, but the index type in the Python collection is not a 32-bit integer - invalid operation." else: - assert len(list(indices.values())) == 0 or type(list(indices.values())[0]) in [int, np.int32, np.intp], "Trying to remove particles by their index, but the index type in the Python collection is not a 32-bit integer - invalid operation." + assert ( + len(list(indices.values())) == 0 or type(list(indices.values())[0]) in [int, np.int32, np.intp] + ), "Trying to remove particles by their index, but the index type in the Python collection is not a 32-bit integer - invalid operation." if type(indices) is dict: indices = list(indices.values()) @@ -306,6 +340,7 @@ class ParticleData: def cstruct(self): """Return the ctypes mapping of the particle data.""" + class CParticles(Structure): _fields_ = [(v.name, POINTER(np.ctypeslib.as_ctypes_type(v.dtype))) for v in self._ptype.variables] @@ -320,11 +355,15 @@ class ParticleData: def _to_write_particles(self, pd, time): """Return the Particles that need to be written at time: if particle.time is between time-dt/2 and time+dt (/2)""" - return np.where((np.less_equal(time - np.abs(pd['dt'] / 2), pd['time'], where=np.isfinite(pd['time'])) - & np.greater_equal(time + np.abs(pd['dt'] / 2), pd['time'], where=np.isfinite(pd['time'])) - | ((np.isnan(pd['dt'])) & np.equal(time, pd['time'], where=np.isfinite(pd['time'])))) - & (np.isfinite(pd['id'])) - & (np.isfinite(pd['time'])))[0] + return np.where( + ( + np.less_equal(time - np.abs(pd["dt"] / 2), pd["time"], where=np.isfinite(pd["time"])) + & np.greater_equal(time + np.abs(pd["dt"] / 2), pd["time"], where=np.isfinite(pd["time"])) + | ((np.isnan(pd["dt"])) & np.equal(time, pd["time"], where=np.isfinite(pd["time"]))) + ) + & (np.isfinite(pd["id"])) + & (np.isfinite(pd["time"])) + )[0] def getvardata(self, var, indices=None): if indices is None: @@ -357,7 +396,7 @@ class ParticleData: v.to_write = write_status var_changed = True if not var_changed: - raise SyntaxError(f'Could not change the write status of {var}, because it is not a Variable name') + raise SyntaxError(f"Could not change the write status of {var}, because it is not a Variable name") class ParticleDataAccessor: @@ -423,12 +462,17 @@ class ParticleDataAccessor: return self._pcoll.ptype def __repr__(self): - time_string = 'not_yet_set' if self.time is None or np.isnan(self.time) else f"{self.time:f}" + time_string = "not_yet_set" if self.time is None or np.isnan(self.time) else f"{self.time:f}" str = "P[%d](lon=%f, lat=%f, depth=%f, " % (self.id, self.lon, self.lat, self.depth) for var in self._pcoll.ptype.variables: - if var.name in ['lon_nextloop', 'lat_nextloop', 'depth_nextloop', 'time_nextloop']: # TODO check if time_nextloop is needed (or can work with time-dt?) + if var.name in [ + "lon_nextloop", + "lat_nextloop", + "depth_nextloop", + "time_nextloop", + ]: # TODO check if time_nextloop is needed (or can work with time-dt?) continue - if var.to_write is not False and var.name not in ['id', 'lon', 'lat', 'depth', 'time']: + if var.to_write is not False and var.name not in ["id", "lon", "lat", "depth", "time"]: str += f"{var.name}={getattr(self, var.name):f}, " return str + f"time={time_string})" @@ -449,10 +493,11 @@ class ParticleDataIterator: """ def __init__(self, pcoll, subset=None): - if subset is not None: if len(subset) > 0 and type(subset[0]) not in [int, np.int32, np.intp]: - raise TypeError("Iteration over a subset of particles in the particleset requires a list or numpy array of indices (of type int or np.int32).") + raise TypeError( + "Iteration over a subset of particles in the particleset requires a list or numpy array of indices (of type int or np.int32)." + ) self._indices = subset self.max_len = len(subset) else: diff --git a/parcels/particlefile.py b/parcels/particlefile.py index 9c9c644b..ba0c5113 100644 --- a/parcels/particlefile.py +++ b/parcels/particlefile.py @@ -1,4 +1,5 @@ """Module controlling the writing of ParticleSets to Zarr file.""" + import os from datetime import timedelta @@ -15,12 +16,12 @@ except ModuleNotFoundError: MPI = None -__all__ = ['ParticleFile'] +__all__ = ["ParticleFile"] def _set_calendar(origin_calendar): - if origin_calendar == 'np_datetime64': - return 'standard' + if origin_calendar == "np_datetime64": + return "standard" else: return origin_calendar @@ -56,11 +57,10 @@ class ParticleFile: lonlatdepth_dtype = None def __init__(self, name, particleset, outputdt=np.inf, chunks=None, create_new_zarrfile=True): - self.outputdt = outputdt.total_seconds() if isinstance(outputdt, timedelta) else outputdt self.chunks = chunks self.particleset = particleset - self.parcels_mesh = 'spherical' + self.parcels_mesh = "spherical" if self.particleset.fieldset is not None: self.parcels_mesh = self.particleset.fieldset.gridset.grids[0].mesh self.time_origin = self.particleset.time_origin @@ -77,20 +77,31 @@ class ParticleFile: self.analytical = False # Flag to indicate if ParticleFile is used for analytical trajectories # Reset obs_written of each particle, in case new ParticleFile created for a ParticleSet - particleset.particledata.setallvardata('obs_written', 0) + particleset.particledata.setallvardata("obs_written", 0) - self.metadata = {"feature_type": "trajectory", "Conventions": "CF-1.6/CF-1.7", - "ncei_template_version": "NCEI_NetCDF_Trajectory_Template_v2.0", - "parcels_version": parcels.__version__, - "parcels_mesh": self.parcels_mesh} + self.metadata = { + "feature_type": "trajectory", + "Conventions": "CF-1.6/CF-1.7", + "ncei_template_version": "NCEI_NetCDF_Trajectory_Template_v2.0", + "parcels_version": parcels.__version__, + "parcels_mesh": self.parcels_mesh, + } # Create dictionary to translate datatypes and fill_values - self.fill_value_map = {np.float16: np.nan, np.float32: np.nan, np.float64: np.nan, - np.bool_: np.iinfo(np.int8).max, np.int8: np.iinfo(np.int8).max, - np.int16: np.iinfo(np.int16).max, np.int32: np.iinfo(np.int32).max, - np.int64: np.iinfo(np.int64).max, np.uint8: np.iinfo(np.uint8).max, - np.uint16: np.iinfo(np.uint16).max, np.uint32: np.iinfo(np.uint32).max, - np.uint64: np.iinfo(np.uint64).max} + self.fill_value_map = { + np.float16: np.nan, + np.float32: np.nan, + np.float64: np.nan, + np.bool_: np.iinfo(np.int8).max, + np.int8: np.iinfo(np.int8).max, + np.int16: np.iinfo(np.int16).max, + np.int32: np.iinfo(np.int32).max, + np.int64: np.iinfo(np.int64).max, + np.uint8: np.iinfo(np.uint8).max, + np.uint16: np.iinfo(np.uint16).max, + np.uint32: np.iinfo(np.uint32).max, + np.uint64: np.iinfo(np.uint64).max, + } if issubclass(type(name), zarr.storage.Store): # If we already got a Zarr store, we won't need any of the naming logic below. # But we need to handle incompatibility with MPI mode for now: @@ -99,14 +110,18 @@ class ParticleFile: self.fname = name else: extension = os.path.splitext(str(name))[1] - if extension in ['.nc', '.nc4']: - raise RuntimeError('Output in NetCDF is not supported anymore. Use .zarr extension for ParticleFile name.') + if extension in [".nc", ".nc4"]: + raise RuntimeError( + "Output in NetCDF is not supported anymore. Use .zarr extension for ParticleFile name." + ) if MPI and MPI.COMM_WORLD.Get_size() > 1: self.fname = os.path.join(name, f"proc{self.mpi_rank:02d}.zarr") - if extension in ['.zarr']: - logger.warning(f'The ParticleFile name contains .zarr extension, but zarr files will be written per processor in MPI mode at {self.fname}') + if extension in [".zarr"]: + logger.warning( + f"The ParticleFile name contains .zarr extension, but zarr files will be written per processor in MPI mode at {self.fname}" + ) else: - self.fname = name if extension in ['.zarr'] else "%s.zarr" % name + self.fname = name if extension in [".zarr"] else "%s.zarr" % name def _create_variables_attribute_dict(self): """Creates the dictionary with variable attributes. @@ -115,37 +130,30 @@ class ParticleFile: ----- For ParticleSet structures other than SoA, and structures where ID != index, this has to be overridden. """ - attrs = {'z': {"long_name": "", - "standard_name": "depth", - "units": "m", - "positive": "down"}, - 'trajectory': {"long_name": "Unique identifier for each particle", - "cf_role": "trajectory_id", - "_FillValue": self.fill_value_map[np.int64]}, - 'time': {"long_name": "", - "standard_name": "time", - "units": "seconds", - "axis": "T"}, - 'lon': {"long_name": "", - "standard_name": "longitude", - "units": "degrees_east", - "axis": - "X"}, - 'lat': {"long_name": "", - "standard_name": "latitude", - "units": "degrees_north", - "axis": "Y"}} + attrs = { + "z": {"long_name": "", "standard_name": "depth", "units": "m", "positive": "down"}, + "trajectory": { + "long_name": "Unique identifier for each particle", + "cf_role": "trajectory_id", + "_FillValue": self.fill_value_map[np.int64], + }, + "time": {"long_name": "", "standard_name": "time", "units": "seconds", "axis": "T"}, + "lon": {"long_name": "", "standard_name": "longitude", "units": "degrees_east", "axis": "X"}, + "lat": {"long_name": "", "standard_name": "latitude", "units": "degrees_north", "axis": "Y"}, + } if self.time_origin.calendar is not None: - attrs['time']['units'] = "seconds since " + str(self.time_origin) - attrs['time']['calendar'] = _set_calendar(self.time_origin.calendar) + attrs["time"]["units"] = "seconds since " + str(self.time_origin) + attrs["time"]["calendar"] = _set_calendar(self.time_origin.calendar) for vname in self.vars_to_write: - if vname not in ['time', 'lat', 'lon', 'depth', 'id']: - attrs[vname] = {"_FillValue": self.fill_value_map[self.vars_to_write[vname]], - "long_name": "", - "standard_name": vname, - "units": "unknown"} + if vname not in ["time", "lat", "lon", "depth", "id"]: + attrs[vname] = { + "_FillValue": self.fill_value_map[self.vars_to_write[vname]], + "long_name": "", + "standard_name": vname, + "units": "unknown", + } return attrs @@ -162,22 +170,22 @@ class ParticleFile: self.metadata[name] = message def _convert_varout_name(self, var): - if var == 'depth': - return 'z' - elif var == 'id': - return 'trajectory' + if var == "depth": + return "z" + elif var == "id": + return "trajectory" else: return var def write_once(self, var): - return self.particleset.particledata.ptype[var].to_write == 'once' + return self.particleset.particledata.ptype[var].to_write == "once" def _extend_zarr_dims(self, Z, store, dtype, axis): if axis == 1: a = np.full((Z.shape[0], self.chunks[1]), self.fill_value_map[dtype], dtype=dtype) obs = zarr.group(store=store, overwrite=False)["obs"] if len(obs) == Z.shape[1]: - obs.append(np.arange(self.chunks[1])+obs[-1]+1) + obs.append(np.arange(self.chunks[1]) + obs[-1] + 1) else: extra_trajs = self.maxids - Z.shape[0] if len(Z.shape) == 2: @@ -204,17 +212,19 @@ class ParticleFile: logger.warning("ParticleSet is empty on writing as array at time %g" % time) return - indices_to_write = pset.particledata._to_write_particles(pset.particledata._data, time) if indices is None else indices + indices_to_write = ( + pset.particledata._to_write_particles(pset.particledata._data, time) if indices is None else indices + ) if len(indices_to_write) > 0: - pids = pset.particledata.getvardata('id', indices_to_write) + pids = pset.particledata.getvardata("id", indices_to_write) to_add = sorted(set(pids) - set(self.pids_written.keys())) for i, pid in enumerate(to_add): self.pids_written[pid] = self.maxids + i ids = np.array([self.pids_written[p] for p in pids], dtype=int) self.maxids = len(self.pids_written) - once_ids = np.where(pset.particledata.getvardata('obs_written', indices_to_write) == 0)[0] + once_ids = np.where(pset.particledata.getvardata("obs_written", indices_to_write) == 0)[0] if len(once_ids) > 0: ids_once = ids[once_ids] indices_to_write_once = indices_to_write[once_ids] @@ -223,31 +233,41 @@ class ParticleFile: if self.chunks is None: self.chunks = (len(ids), 1) if pset.repeatpclass is not None and self.chunks[0] < 1e4: - logger.warning(f'ParticleFile chunks are set to {self.chunks}, but this may lead to ' - f'a significant slowdown in Parcels when many calls to repeatdt. ' - f'Consider setting a larger chunk size for your ParticleFile (e.g. chunks=(int(1e4), 1)).') + logger.warning( + f"ParticleFile chunks are set to {self.chunks}, but this may lead to " + f"a significant slowdown in Parcels when many calls to repeatdt. " + f"Consider setting a larger chunk size for your ParticleFile (e.g. chunks=(int(1e4), 1))." + ) if (self.maxids > len(ids)) or (self.maxids > self.chunks[0]): arrsize = (self.maxids, self.chunks[1]) else: arrsize = (len(ids), self.chunks[1]) - ds = xr.Dataset(attrs=self.metadata, coords={"trajectory": ("trajectory", pids), - "obs": ("obs", np.arange(arrsize[1], dtype=np.int32))}) + ds = xr.Dataset( + attrs=self.metadata, + coords={"trajectory": ("trajectory", pids), "obs": ("obs", np.arange(arrsize[1], dtype=np.int32))}, + ) attrs = self._create_variables_attribute_dict() obs = np.zeros((self.maxids), dtype=np.int32) for var in self.vars_to_write: varout = self._convert_varout_name(var) - if varout not in ['trajectory']: # because 'trajectory' is written as coordinate + if varout not in ["trajectory"]: # because 'trajectory' is written as coordinate if self.write_once(var): - data = np.full((arrsize[0],), self.fill_value_map[self.vars_to_write[var]], dtype=self.vars_to_write[var]) + data = np.full( + (arrsize[0],), + self.fill_value_map[self.vars_to_write[var]], + dtype=self.vars_to_write[var], + ) data[ids_once] = pset.particledata.getvardata(var, indices_to_write_once) dims = ["trajectory"] else: - data = np.full(arrsize, self.fill_value_map[self.vars_to_write[var]], dtype=self.vars_to_write[var]) + data = np.full( + arrsize, self.fill_value_map[self.vars_to_write[var]], dtype=self.vars_to_write[var] + ) data[ids, 0] = pset.particledata.getvardata(var, indices_to_write) dims = ["trajectory", "obs"] ds[varout] = xr.DataArray(data=data, dims=dims, attrs=attrs[varout]) - ds[varout].encoding['chunks'] = self.chunks[0] if self.write_once(var) else self.chunks - ds.to_zarr(self.fname, mode='w') + ds[varout].encoding["chunks"] = self.chunks[0] if self.write_once(var) else self.chunks + ds.to_zarr(self.fname, mode="w") self.create_new_zarrfile = False else: # Either use the store that was provided directly or create a DirectoryStore: @@ -256,7 +276,7 @@ class ParticleFile: else: store = zarr.DirectoryStore(self.fname) Z = zarr.group(store=store, overwrite=False) - obs = pset.particledata.getvardata('obs_written', indices_to_write) + obs = pset.particledata.getvardata("obs_written", indices_to_write) for var in self.vars_to_write: varout = self._convert_varout_name(var) if self.maxids > Z[varout].shape[0]: @@ -269,7 +289,7 @@ class ParticleFile: self._extend_zarr_dims(Z[varout], store, dtype=self.vars_to_write[var], axis=1) Z[varout].vindex[ids, obs] = pset.particledata.getvardata(var, indices_to_write) - pset.particledata.setvardata('obs_written', indices_to_write, obs+1) + pset.particledata.setvardata("obs_written", indices_to_write, obs + 1) def write_latest_locations(self, pset, time): """Write the current (latest) particle locations to zarr file. @@ -283,7 +303,7 @@ class ParticleFile: time : Time at which to write ParticleSet. Note that typically this would be pset.time_nextloop """ - for var in ['lon', 'lat', 'depth', 'time']: + for var in ["lon", "lat", "depth", "time"]: pset.particledata.setallvardata(f"{var}", pset.particledata.getvardata(f"{var}_nextloop")) self.write(pset, time) diff --git a/parcels/particleset.py b/parcels/particleset.py index c4d9d881..c8157f65 100644 --- a/parcels/particleset.py +++ b/parcels/particleset.py @@ -38,12 +38,12 @@ from parcels.tools.global_statics import get_package_dir from parcels.tools.loggers import logger from parcels.tools.statuscodes import StatusCode -__all__ = ['ParticleSet'] +__all__ = ["ParticleSet"] def _convert_to_reltime(time): """Check to determine if the value of the time parameter needs to be converted to a relative value (relative to the time_origin).""" - if isinstance(time, np.datetime64) or (hasattr(time, 'calendar') and time.calendar in _get_cftime_calendars()): + if isinstance(time, np.datetime64) or (hasattr(time, "calendar") and time.calendar in _get_cftime_calendars()): return True return False @@ -88,9 +88,21 @@ class ParticleSet: Other Variables can be initialised using further arguments (e.g. v=... for a Variable named 'v') """ - def __init__(self, fieldset, pclass=JITParticle, lon=None, lat=None, - depth=None, time=None, repeatdt=None, lonlatdepth_dtype=None, - pid_orig=None, interaction_distance=None, periodic_domain_zonal=None, **kwargs): + def __init__( + self, + fieldset, + pclass=JITParticle, + lon=None, + lat=None, + depth=None, + time=None, + repeatdt=None, + lonlatdepth_dtype=None, + pid_orig=None, + interaction_distance=None, + periodic_domain_zonal=None, + **kwargs, + ): self.particledata = None self.repeat_starttime = None self.repeatlon = None @@ -110,9 +122,10 @@ class ParticleSet: class_name = pclass.__name__ array_class = None if class_name not in dir(): + def ArrayClass_init(self, *args, **kwargs): - fieldset = kwargs.get('fieldset', None) - ngrids = kwargs.get('ngrids', None) + fieldset = kwargs.get("fieldset", None) + ngrids = kwargs.get("ngrids", None) if type(self).ngrids.initial < 0: numgrids = ngrids if numgrids is None and fieldset is not None: @@ -121,20 +134,22 @@ class ParticleSet: type(self).ngrids.initial = numgrids self.ngrids = type(self).ngrids.initial if self.ngrids >= 0: - for index in ['xi', 'yi', 'zi', 'ti']: - if index != 'ti': + for index in ["xi", "yi", "zi", "ti"]: + if index != "ti": setattr(self, index, np.zeros(self.ngrids, dtype=np.int32)) else: - setattr(self, index, -1*np.ones(self.ngrids, dtype=np.int32)) + setattr(self, index, -1 * np.ones(self.ngrids, dtype=np.int32)) super(type(self), self).__init__(*args, **kwargs) - array_class_vdict = {"ngrids": Variable('ngrids', dtype=np.int32, to_write=False, initial=-1), - "xi": Variable('xi', dtype=np.int32, to_write=False), - "yi": Variable('yi', dtype=np.int32, to_write=False), - "zi": Variable('zi', dtype=np.int32, to_write=False), - "ti": Variable('ti', dtype=np.int32, to_write=False, initial=-1), - "__init__": ArrayClass_init} - array_class = type(class_name, (pclass, ), array_class_vdict) + array_class_vdict = { + "ngrids": Variable("ngrids", dtype=np.int32, to_write=False, initial=-1), + "xi": Variable("xi", dtype=np.int32, to_write=False), + "yi": Variable("yi", dtype=np.int32, to_write=False), + "zi": Variable("zi", dtype=np.int32, to_write=False), + "ti": Variable("ti", dtype=np.int32, to_write=False, initial=-1), + "__init__": ArrayClass_init, + } + array_class = type(class_name, (pclass,), array_class_vdict) else: array_class = locals()[class_name] # ==== dynamic re-classing completed ==== # @@ -147,12 +162,11 @@ class ParticleSet: pid_orig = np.arange(lon.size) if depth is None: - mindepth = self.fieldset.gridset.dimrange('depth')[0] + mindepth = self.fieldset.gridset.dimrange("depth")[0] depth = np.ones(lon.size) * mindepth else: depth = convert_to_flat_array(depth) - assert lon.size == lat.size and lon.size == depth.size, ( - 'lon, lat, depth don''t all have the same lenghts') + assert lon.size == lat.size and lon.size == depth.size, "lon, lat, depth don" "t all have the same lenghts" time = convert_to_flat_array(time) time = np.repeat(time, lon.size) if time.size == 1 else time @@ -160,31 +174,33 @@ class ParticleSet: if time.size > 0 and type(time[0]) in [datetime, date]: time = np.array([np.datetime64(t) for t in time]) if time.size > 0 and isinstance(time[0], np.timedelta64) and not self.time_origin: - raise NotImplementedError('If fieldset.time_origin is not a date, time of a particle must be a double') + raise NotImplementedError("If fieldset.time_origin is not a date, time of a particle must be a double") time = np.array([self.time_origin.reltime(t) if _convert_to_reltime(t) else t for t in time]) - assert lon.size == time.size, ( - 'time and positions (lon, lat, depth) don''t have the same lengths.') + assert lon.size == time.size, "time and positions (lon, lat, depth) don" "t have the same lengths." if lonlatdepth_dtype is None: lonlatdepth_dtype = self.lonlatdepth_dtype_from_field_interp_method(fieldset.U) - assert lonlatdepth_dtype in [np.float32, np.float64], \ - 'lon lat depth precision should be set to either np.float32 or np.float64' + assert lonlatdepth_dtype in [ + np.float32, + np.float64, + ], "lon lat depth precision should be set to either np.float32 or np.float64" for kwvar in kwargs: - if kwvar not in ['partition_function']: + if kwvar not in ["partition_function"]: kwargs[kwvar] = convert_to_flat_array(kwargs[kwvar]) - assert lon.size == kwargs[kwvar].size, ( - f"{kwvar} and positions (lon, lat, depth) don't have the same lengths.") + assert ( + lon.size == kwargs[kwvar].size + ), f"{kwvar} and positions (lon, lat, depth) don't have the same lengths." self.repeatdt = repeatdt.total_seconds() if isinstance(repeatdt, timedelta) else repeatdt if self.repeatdt: if self.repeatdt <= 0: - raise 'Repeatdt should be > 0' + raise "Repeatdt should be > 0" if time[0] and not np.allclose(time, time[0]): - raise 'All Particle.time should be the same when repeatdt is not None' + raise "All Particle.time should be the same when repeatdt is not None" self.repeatpclass = pclass self.repeatkwargs = kwargs - self.repeatkwargs.pop('partition_function', None) + self.repeatkwargs.pop("partition_function", None) ngrids = fieldset.gridset.size @@ -200,9 +216,16 @@ class ParticleSet: self._dirty_neighbor = True self.particledata = ParticleData( - _pclass, lon=lon, lat=lat, depth=depth, time=time, - lonlatdepth_dtype=lonlatdepth_dtype, pid_orig=pid_orig, - ngrid=ngrids, **kwargs) + _pclass, + lon=lon, + lat=lat, + depth=depth, + time=time, + lonlatdepth_dtype=lonlatdepth_dtype, + pid_orig=pid_orig, + ngrid=ngrids, + **kwargs, + ) # Initialize neighbor search data structure (used for interaction). if interaction_distance is not None: @@ -221,8 +244,7 @@ class ParticleSet: else: interaction_class = KDTreeFlatNeighborSearch else: - assert False, ("Interaction is only possible on 'flat' and " - "'spherical' meshes") + assert False, "Interaction is only possible on 'flat' and " "'spherical' meshes" try: if len(interaction_distance) == 2: inter_dist_vert, inter_dist_horiz = interaction_distance @@ -235,21 +257,24 @@ class ParticleSet: self._neighbor_tree = interaction_class( inter_dist_vert=inter_dist_vert, inter_dist_horiz=inter_dist_horiz, - periodic_domain_zonal=periodic_domain_zonal) + periodic_domain_zonal=periodic_domain_zonal, + ) # End of neighbor search data structure initialization. if self.repeatdt: if len(time) > 0 and time[0] is None: self.repeat_starttime = time[0] else: - if self.particledata.data['time'][0] and not np.allclose(self.particledata.data['time'], self.particledata.data['time'][0]): - raise ValueError('All Particle.time should be the same when repeatdt is not None') - self.repeat_starttime = copy(self.particledata.data['time'][0]) - self.repeatlon = copy(self.particledata.data['lon']) - self.repeatlat = copy(self.particledata.data['lat']) - self.repeatdepth = copy(self.particledata.data['depth']) + if self.particledata.data["time"][0] and not np.allclose( + self.particledata.data["time"], self.particledata.data["time"][0] + ): + raise ValueError("All Particle.time should be the same when repeatdt is not None") + self.repeat_starttime = copy(self.particledata.data["time"][0]) + self.repeatlon = copy(self.particledata.data["lon"]) + self.repeatlat = copy(self.particledata.data["lat"]) + self.repeatdepth = copy(self.particledata.data["depth"]) for kwvar in kwargs: - if kwvar not in ['partition_function']: + if kwvar not in ["partition_function"]: self.repeatkwargs[kwvar] = copy(self.particledata.data[kwvar]) if self.repeatdt: @@ -284,7 +309,7 @@ class ParticleSet: for v in self.particledata.ptype.variables: if v.name == name: return getattr(self.particledata, name) - if name in self.__dict__ and name[0] != '_': + if name in self.__dict__ and name[0] != "_": return self.__dict__[name] else: return False @@ -297,10 +322,10 @@ class ParticleSet: def lonlatdepth_dtype_from_field_interp_method(field): if isinstance(field, NestedField): for f in field: - if f.interp_method == 'cgrid_velocity': + if f.interp_method == "cgrid_velocity": return np.float64 else: - if field.interp_method == 'cgrid_velocity': + if field.interp_method == "cgrid_velocity": return np.float64 return np.float32 @@ -384,8 +409,8 @@ class ParticleSet: self.remove_indices(np.where(indices)[0]) def active_particles_mask(self, time, dt): - active_indices = (time - self.particledata.data['time'])/dt >= 0 - non_err_indices = np.isin(self.particledata.data['state'], [StatusCode.Success, StatusCode.Evaluate]) + active_indices = (time - self.particledata.data["time"]) / dt >= 0 + non_err_indices = np.isin(self.particledata.data["state"], [StatusCode.Success, StatusCode.Evaluate]) active_indices = np.logical_and(active_indices, non_err_indices) self._active_particle_idx = np.where(active_indices)[0] return active_indices @@ -393,11 +418,13 @@ class ParticleSet: def compute_neighbor_tree(self, time, dt): active_mask = self.active_particles_mask(time, dt) - self._values = np.vstack(( - self.particledata.data['depth'], - self.particledata.data['lat'], - self.particledata.data['lon'], - )) + self._values = np.vstack( + ( + self.particledata.data["depth"], + self.particledata.data["lat"], + self.particledata.data["lon"], + ) + ) if self._dirty_neighbor: self._neighbor_tree.rebuild(self._values, active_mask=active_mask) self._dirty_neighbor = False @@ -405,19 +432,18 @@ class ParticleSet: self._neighbor_tree.update_values(self._values, new_active_mask=active_mask) def neighbors_by_index(self, particle_idx): - neighbor_idx, distances = self._neighbor_tree.find_neighbors_by_idx( - particle_idx) + neighbor_idx, distances = self._neighbor_tree.find_neighbors_by_idx(particle_idx) neighbor_idx = self._active_particle_idx[neighbor_idx] - mask = (neighbor_idx != particle_idx) + mask = neighbor_idx != particle_idx neighbor_idx = neighbor_idx[mask] - if 'horiz_dist' in self.particledata._ptype.variables: - self.particledata.data['vert_dist'][neighbor_idx] = distances[0, mask] - self.particledata.data['horiz_dist'][neighbor_idx] = distances[1, mask] + if "horiz_dist" in self.particledata._ptype.variables: + self.particledata.data["vert_dist"][neighbor_idx] = distances[0, mask] + self.particledata.data["horiz_dist"][neighbor_idx] = distances[1, mask] return ParticleDataIterator(self.particledata, subset=neighbor_idx) def neighbors_by_coor(self, coor): neighbor_idx = self._neighbor_tree.find_neighbors_by_coor(coor) - neighbor_ids = self.particledata.data['id'][neighbor_idx] + neighbor_ids = self.particledata.data["id"][neighbor_idx] return neighbor_ids def populate_indices(self): @@ -438,18 +464,20 @@ class ParticleSet: IN = np.all(~np.isnan(tree_data), axis=1) tree = KDTree(tree_data[IN, :]) # stack all the particle positions for a single query - pts = np.stack((self.particledata.data['lon'], self.particledata.data['lat']), axis=-1) + pts = np.stack((self.particledata.data["lon"], self.particledata.data["lat"]), axis=-1) # query datatype needs to match tree datatype _, idx_nan = tree.query(pts.astype(tree_data.dtype)) idx = np.where(IN)[0][idx_nan] yi, xi = np.unravel_index(idx, grid.lon.shape) - self.particledata.data['xi'][:, i] = xi - self.particledata.data['yi'][:, i] = yi + self.particledata.data["xi"][:, i] = xi + self.particledata.data["yi"][:, i] = yi @classmethod - def from_list(cls, fieldset, pclass, lon, lat, depth=None, time=None, repeatdt=None, lonlatdepth_dtype=None, **kwargs): + def from_list( + cls, fieldset, pclass, lon, lat, depth=None, time=None, repeatdt=None, lonlatdepth_dtype=None, **kwargs + ): """Initialise the ParticleSet from lists of lon and lat. Parameters @@ -476,10 +504,32 @@ class ParticleSet: **kwargs : Keyword arguments passed to the particleset constructor. """ - return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, repeatdt=repeatdt, lonlatdepth_dtype=lonlatdepth_dtype, **kwargs) + return cls( + fieldset=fieldset, + pclass=pclass, + lon=lon, + lat=lat, + depth=depth, + time=time, + repeatdt=repeatdt, + lonlatdepth_dtype=lonlatdepth_dtype, + **kwargs, + ) @classmethod - def from_line(cls, fieldset, pclass, start, finish, size, depth=None, time=None, repeatdt=None, lonlatdepth_dtype=None, **kwargs): + def from_line( + cls, + fieldset, + pclass, + start, + finish, + size, + depth=None, + time=None, + repeatdt=None, + lonlatdepth_dtype=None, + **kwargs, + ): """Create a particleset in the shape of a line (according to a cartesian grid). Initialise the ParticleSet from start/finish coordinates with equidistant spacing @@ -513,10 +563,20 @@ class ParticleSet: lat = np.linspace(start[1], finish[1], size) if type(depth) in [int, float]: depth = [depth] * size - return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, repeatdt=repeatdt, lonlatdepth_dtype=lonlatdepth_dtype, **kwargs) + return cls( + fieldset=fieldset, + pclass=pclass, + lon=lon, + lat=lat, + depth=depth, + time=time, + repeatdt=repeatdt, + lonlatdepth_dtype=lonlatdepth_dtype, + **kwargs, + ) @classmethod - def monte_carlo_sample(cls, start_field, size, mode='monte_carlo'): + def monte_carlo_sample(cls, start_field, size, mode="monte_carlo"): """Converts a starting field into a monte-carlo sample of lons and lats. Parameters @@ -535,13 +595,13 @@ class ParticleSet: list of float A list of latitude values. """ - if mode == 'monte_carlo': + if mode == "monte_carlo": data = start_field.data if isinstance(start_field.data, np.ndarray) else np.array(start_field.data) - if start_field.interp_method == 'cgrid_tracer': + if start_field.interp_method == "cgrid_tracer": p_interior = np.squeeze(data[0, 1:, 1:]) else: # if A-grid d = data - p_interior = (d[0, :-1, :-1] + d[0, 1:, :-1] + d[0, :-1, 1:] + d[0, 1:, 1:])/4. + p_interior = (d[0, :-1, :-1] + d[0, 1:, :-1] + d[0, :-1, 1:] + d[0, 1:, 1:]) / 4.0 p_interior = np.where(d[0, :-1, :-1] == 0, 0, p_interior) p_interior = np.where(d[0, 1:, :-1] == 0, 0, p_interior) p_interior = np.where(d[0, 1:, 1:] == 0, 0, p_interior) @@ -557,24 +617,39 @@ class ParticleSet: lon = grid.lon[i] + xsi * (grid.lon[i + 1] - grid.lon[i]) lat = grid.lat[j] + eta * (grid.lat[j + 1] - grid.lat[j]) else: - lons = np.array([grid.lon[j, i], grid.lon[j, i+1], grid.lon[j+1, i+1], grid.lon[j+1, i]]) - if grid.mesh == 'spherical': - lons[1:] = np.where(lons[1:] - lons[0] > 180, lons[1:]-360, lons[1:]) - lons[1:] = np.where(-lons[1:] + lons[0] > 180, lons[1:]+360, lons[1:]) - lon = (1-xsi)*(1-eta) * lons[0] +\ - xsi*(1-eta) * lons[1] +\ - xsi*eta * lons[2] +\ - (1-xsi)*eta * lons[3] - lat = (1-xsi)*(1-eta) * grid.lat[j, i] +\ - xsi*(1-eta) * grid.lat[j, i+1] +\ - xsi*eta * grid.lat[j+1, i+1] +\ - (1-xsi)*eta * grid.lat[j+1, i] + lons = np.array([grid.lon[j, i], grid.lon[j, i + 1], grid.lon[j + 1, i + 1], grid.lon[j + 1, i]]) + if grid.mesh == "spherical": + lons[1:] = np.where(lons[1:] - lons[0] > 180, lons[1:] - 360, lons[1:]) + lons[1:] = np.where(-lons[1:] + lons[0] > 180, lons[1:] + 360, lons[1:]) + lon = ( + (1 - xsi) * (1 - eta) * lons[0] + + xsi * (1 - eta) * lons[1] + + xsi * eta * lons[2] + + (1 - xsi) * eta * lons[3] + ) + lat = ( + (1 - xsi) * (1 - eta) * grid.lat[j, i] + + xsi * (1 - eta) * grid.lat[j, i + 1] + + xsi * eta * grid.lat[j + 1, i + 1] + + (1 - xsi) * eta * grid.lat[j + 1, i] + ) return list(lon), list(lat) else: raise NotImplementedError(f'Mode {mode} not implemented. Please use "monte carlo" algorithm instead.') @classmethod - def from_field(cls, fieldset, pclass, start_field, size, mode='monte_carlo', depth=None, time=None, repeatdt=None, lonlatdepth_dtype=None): + def from_field( + cls, + fieldset, + pclass, + start_field, + size, + mode="monte_carlo", + depth=None, + time=None, + repeatdt=None, + lonlatdepth_dtype=None, + ): """Initialise the ParticleSet randomly drawn according to distribution from a field. Parameters @@ -602,10 +677,21 @@ class ParticleSet: """ lon, lat = cls.monte_carlo_sample(start_field, size, mode) - return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, lonlatdepth_dtype=lonlatdepth_dtype, repeatdt=repeatdt) + return cls( + fieldset=fieldset, + pclass=pclass, + lon=lon, + lat=lat, + depth=depth, + time=time, + lonlatdepth_dtype=lonlatdepth_dtype, + repeatdt=repeatdt, + ) @classmethod - def from_particlefile(cls, fieldset, pclass, filename, restart=True, restarttime=None, repeatdt=None, lonlatdepth_dtype=None, **kwargs): + def from_particlefile( + cls, fieldset, pclass, filename, restart=True, restarttime=None, repeatdt=None, lonlatdepth_dtype=None, **kwargs + ): """Initialise the ParticleSet from a zarr ParticleFile. This creates a new ParticleSet based on locations of all particles written in a zarr ParticleFile at a certain time. Particle IDs are preserved if restart=True @@ -635,9 +721,11 @@ class ParticleSet: Keyword arguments passed to the particleset constructor. """ if repeatdt is not None: - logger.warning(f'Note that the `repeatdt` argument is not retained from {filename}, and that ' - 'setting a new repeatdt will start particles from the _new_ particle ' - 'locations.') + logger.warning( + f"Note that the `repeatdt` argument is not retained from {filename}, and that " + "setting a new repeatdt will start particles from the _new_ particle " + "locations." + ) pfile = xr.open_zarr(str(filename)) pfile_vars = [v for v in pfile.data_vars] @@ -647,44 +735,69 @@ class ParticleSet: for v in pclass.getPType().variables: if v.name in pfile_vars: vars[v.name] = np.ma.filled(pfile.variables[v.name], np.nan) - elif v.name not in ['xi', 'yi', 'zi', 'ti', 'dt', 'depth', 'id', 'obs_written', 'state', - 'lon_nextloop', 'lat_nextloop', 'depth_nextloop', 'time_nextloop'] \ - and v.to_write: - raise RuntimeError(f'Variable {v.name} is in pclass but not in the particlefile') + elif ( + v.name + not in [ + "xi", + "yi", + "zi", + "ti", + "dt", + "depth", + "id", + "obs_written", + "state", + "lon_nextloop", + "lat_nextloop", + "depth_nextloop", + "time_nextloop", + ] + and v.to_write + ): + raise RuntimeError(f"Variable {v.name} is in pclass but not in the particlefile") to_write[v.name] = v.to_write - vars['depth'] = np.ma.filled(pfile.variables['z'], np.nan) - vars['id'] = np.ma.filled(pfile.variables['trajectory'], np.nan) + vars["depth"] = np.ma.filled(pfile.variables["z"], np.nan) + vars["id"] = np.ma.filled(pfile.variables["trajectory"], np.nan) - for v in ['lon', 'lat', 'depth', 'time']: + for v in ["lon", "lat", "depth", "time"]: to_write[v] = True - if isinstance(vars['time'][0, 0], np.timedelta64): - vars['time'] = np.array([t/np.timedelta64(1, 's') for t in vars['time']]) + if isinstance(vars["time"][0, 0], np.timedelta64): + vars["time"] = np.array([t / np.timedelta64(1, "s") for t in vars["time"]]) if restarttime is None: - restarttime = np.nanmax(vars['time']) + restarttime = np.nanmax(vars["time"]) elif callable(restarttime): - restarttime = restarttime(vars['time']) + restarttime = restarttime(vars["time"]) else: restarttime = restarttime - inds = np.where(vars['time'] == restarttime) + inds = np.where(vars["time"] == restarttime) for v in vars: if to_write[v] is True: vars[v] = vars[v][inds] - elif to_write[v] == 'once': + elif to_write[v] == "once": vars[v] = vars[v][inds[0]] - if v not in ['lon', 'lat', 'depth', 'time', 'id']: + if v not in ["lon", "lat", "depth", "time", "id"]: kwargs[v] = vars[v] if restart: pclass.setLastID(0) # reset to zero offset else: - vars['id'] = None - - return cls(fieldset=fieldset, pclass=pclass, lon=vars['lon'], lat=vars['lat'], - depth=vars['depth'], time=vars['time'], pid_orig=vars['id'], - lonlatdepth_dtype=lonlatdepth_dtype, repeatdt=repeatdt, **kwargs) + vars["id"] = None + + return cls( + fieldset=fieldset, + pclass=pclass, + lon=vars["lon"], + lat=vars["lat"], + depth=vars["depth"], + time=vars["time"], + pid_orig=vars["id"], + lonlatdepth_dtype=lonlatdepth_dtype, + repeatdt=repeatdt, + **kwargs, + ) def Kernel(self, pyfunc, c_include="", delete_cfiles=True): """Wrapper method to convert a `pyfunc` into a :class:`parcels.kernel.Kernel` object. @@ -722,7 +835,9 @@ class ParticleSet: def InteractionKernel(self, pyfunc_inter, delete_cfiles=True): if pyfunc_inter is None: return None - return InteractionKernel(self.fieldset, self.particledata.ptype, pyfunc=pyfunc_inter, delete_cfiles=delete_cfiles) + return InteractionKernel( + self.fieldset, self.particledata.ptype, pyfunc=pyfunc_inter, delete_cfiles=delete_cfiles + ) def ParticleFile(self, *args, **kwargs): """Wrapper method to initialise a :class:`parcels.particlefile.ParticleFile` object from the ParticleSet.""" @@ -748,7 +863,15 @@ class ParticleSet: Numpy array of indices that satisfy the test. """ - compare_values = np.array([compare_values, ]) if type(compare_values) not in [list, dict, np.ndarray] else compare_values + compare_values = ( + np.array( + [ + compare_values, + ] + ) + if type(compare_values) not in [list, dict, np.ndarray] + else compare_values + ) return np.where(np.isin(self.particledata.data[variable_name], compare_values, invert=invert))[0] @property @@ -760,7 +883,7 @@ class ParticleSet: iterator ParticleDataIterator over error particles. """ - error_indices = self.data_indices('state', [StatusCode.Success, StatusCode.Evaluate], invert=True) + error_indices = self.data_indices("state", [StatusCode.Success, StatusCode.Evaluate], invert=True) return ParticleDataIterator(self.particledata, subset=error_indices) @property @@ -772,9 +895,7 @@ class ParticleSet: int Number of error particles. """ - return np.sum(np.isin( - self.particledata.data['state'], - [StatusCode.Success, StatusCode.Evaluate], invert=True)) + return np.sum(np.isin(self.particledata.data["state"], [StatusCode.Success, StatusCode.Evaluate], invert=True)) def set_variable_write_status(self, var, write_status): """Method to set the write status of a Variable. @@ -788,8 +909,19 @@ class ParticleSet: """ self.particledata.set_variable_write_status(var, write_status) - def execute(self, pyfunc=AdvectionRK4, pyfunc_inter=None, endtime=None, runtime=None, dt=1., - output_file=None, verbose_progress=True, postIterationCallbacks=None, callbackdt=None, delete_cfiles=True): + def execute( + self, + pyfunc=AdvectionRK4, + pyfunc_inter=None, + endtime=None, + runtime=None, + dt=1.0, + output_file=None, + verbose_progress=True, + postIterationCallbacks=None, + callbackdt=None, + delete_cfiles=True, + ): """Execute a given kernel function over the particle set for multiple timesteps. Optionally also provide sub-timestepping @@ -842,11 +974,13 @@ class ParticleSet: # Prepare JIT kernel execution if self.particledata.ptype.uses_jit: self.kernel.remove_lib() - cppargs = ['-DDOUBLE_COORD_VARIABLES'] if self.particledata.lonlatdepth_dtype else None - self.kernel.compile(compiler=GNUCompiler(cppargs=cppargs, incdirs=[os.path.join(get_package_dir(), 'include'), "."])) + cppargs = ["-DDOUBLE_COORD_VARIABLES"] if self.particledata.lonlatdepth_dtype else None + self.kernel.compile( + compiler=GNUCompiler(cppargs=cppargs, incdirs=[os.path.join(get_package_dir(), "include"), "."]) + ) self.kernel.load_lib() if output_file: - output_file.add_metadata('parcels_kernels', self.kernel.name) + output_file.add_metadata("parcels_kernels", self.kernel.name) # Set up the interaction kernel(s) if not set and given. if self.interaction_kernel is None and pyfunc_inter is not None: @@ -857,43 +991,45 @@ class ParticleSet: # Convert all time variables to seconds if isinstance(endtime, timedelta): - raise TypeError('endtime must be either a datetime or a double') + raise TypeError("endtime must be either a datetime or a double") if isinstance(endtime, datetime): endtime = np.datetime64(endtime) elif isinstance(endtime, cftime.datetime): endtime = self.time_origin.reltime(endtime) if isinstance(endtime, np.datetime64): if self.time_origin.calendar is None: - raise NotImplementedError('If fieldset.time_origin is not a date, execution endtime must be a double') + raise NotImplementedError("If fieldset.time_origin is not a date, execution endtime must be a double") endtime = self.time_origin.reltime(endtime) if isinstance(runtime, timedelta): runtime = runtime.total_seconds() if isinstance(dt, timedelta): dt = dt.total_seconds() if abs(dt) <= 1e-6: - raise ValueError('Time step dt is too small') + raise ValueError("Time step dt is too small") if (dt * 1e6) % 1 != 0: - raise ValueError('Output interval should not have finer precision than 1e-6 s') + raise ValueError("Output interval should not have finer precision than 1e-6 s") outputdt = output_file.outputdt if output_file else np.inf if isinstance(outputdt, timedelta): outputdt = outputdt.total_seconds() if isinstance(callbackdt, timedelta): callbackdt = callbackdt.total_seconds() - assert runtime is None or runtime >= 0, 'runtime must be positive' - assert outputdt is None or outputdt >= 0, 'outputdt must be positive' + assert runtime is None or runtime >= 0, "runtime must be positive" + assert outputdt is None or outputdt >= 0, "outputdt must be positive" if runtime is not None and endtime is not None: - raise RuntimeError('Only one of (endtime, runtime) can be specified') + raise RuntimeError("Only one of (endtime, runtime) can be specified") - mintime, maxtime = self.fieldset.gridset.dimrange('time_full') + mintime, maxtime = self.fieldset.gridset.dimrange("time_full") default_release_time = mintime if dt >= 0 else maxtime - if np.any(np.isnan(self.particledata.data['time'])): - self.particledata.data['time'][np.isnan(self.particledata.data['time'])] = default_release_time - self.particledata.data['time_nextloop'][np.isnan(self.particledata.data['time_nextloop'])] = default_release_time - min_rt = np.min(self.particledata.data['time_nextloop']) - max_rt = np.max(self.particledata.data['time_nextloop']) + if np.any(np.isnan(self.particledata.data["time"])): + self.particledata.data["time"][np.isnan(self.particledata.data["time"])] = default_release_time + self.particledata.data["time_nextloop"][np.isnan(self.particledata.data["time_nextloop"])] = ( + default_release_time + ) + min_rt = np.min(self.particledata.data["time_nextloop"]) + max_rt = np.max(self.particledata.data["time_nextloop"]) # Derive starttime and endtime from arguments or fieldset defaults starttime = min_rt if dt >= 0 else max_rt @@ -902,14 +1038,16 @@ class ParticleSet: if runtime is not None: endtime = starttime + runtime * np.sign(dt) elif endtime is None: - mintime, maxtime = self.fieldset.gridset.dimrange('time_full') + mintime, maxtime = self.fieldset.gridset.dimrange("time_full") endtime = maxtime if dt >= 0 else mintime - if (abs(endtime-starttime) < 1e-5 or runtime == 0) and dt == 0: - raise RuntimeError("dt and runtime are zero, or endtime is equal to Particle.time. " - "ParticleSet.execute() will not do anything.") + if (abs(endtime - starttime) < 1e-5 or runtime == 0) and dt == 0: + raise RuntimeError( + "dt and runtime are zero, or endtime is equal to Particle.time. " + "ParticleSet.execute() will not do anything." + ) - self.particledata._data['dt'][:] = dt + self.particledata._data["dt"][:] = dt if callbackdt is None: interupt_dts = [np.inf, outputdt] @@ -919,16 +1057,18 @@ class ParticleSet: # Set up pbar if output_file: - logger.info(f'Output files are stored in {output_file.fname}.') + logger.info(f"Output files are stored in {output_file.fname}.") if verbose_progress: pbar = tqdm(total=abs(endtime - starttime), file=sys.stdout) # Set up variables for first iteration if self.repeatdt: - next_prelease = self.repeat_starttime + (abs(starttime - self.repeat_starttime) // self.repeatdt + 1) * self.repeatdt * np.sign(dt) + next_prelease = self.repeat_starttime + ( + abs(starttime - self.repeat_starttime) // self.repeatdt + 1 + ) * self.repeatdt * np.sign(dt) else: - next_prelease = np.inf if dt > 0 else - np.inf + next_prelease = np.inf if dt > 0 else -np.inf if output_file: next_output = starttime + dt else: @@ -960,9 +1100,9 @@ class ParticleSet: cur_time = time while (cur_time < next_time and dt > 0) or (cur_time > next_time and dt < 0): if dt > 0: - cur_end_time = min(cur_time+dt, next_time) + cur_end_time = min(cur_time + dt, next_time) else: - cur_end_time = max(cur_time+dt, next_time) + cur_end_time = max(cur_time + dt, next_time) self.kernel.execute(self, endtime=cur_end_time, dt=dt) self.interaction_kernel.execute(self, endtime=cur_end_time, dt=dt) cur_time += dt @@ -971,10 +1111,12 @@ class ParticleSet: if abs(time - next_output) < tol: for fld in self.fieldset.get_fields(): - if hasattr(fld, 'to_write') and fld.to_write: + if hasattr(fld, "to_write") and fld.to_write: if fld.grid.tdim > 1: - raise RuntimeError('Field writing during execution only works for Fields with one snapshot in time') - fldfilename = str(output_file.fname).replace('.zarr', '_%.4d' % fld.to_write) + raise RuntimeError( + "Field writing during execution only works for Fields with one snapshot in time" + ) + fldfilename = str(output_file.fname).replace(".zarr", "_%.4d" % fld.to_write) fld.write(fldfilename) fld.to_write += 1 @@ -988,19 +1130,25 @@ class ParticleSet: next_output += outputdt * np.sign(dt) # ==== insert post-process here to also allow for memory clean-up via external func ==== # - if abs(time-next_callback) < tol: + if abs(time - next_callback) < tol: if postIterationCallbacks is not None: for extFunc in postIterationCallbacks: extFunc() next_callback += callbackdt * np.sign(dt) - if abs(time-next_prelease) < tol: + if abs(time - next_prelease) < tol: pset_new = self.__class__( - fieldset=self.fieldset, time=time, lon=self.repeatlon, - lat=self.repeatlat, depth=self.repeatdepth, + fieldset=self.fieldset, + time=time, + lon=self.repeatlon, + lat=self.repeatlat, + depth=self.repeatdepth, pclass=self.repeatpclass, lonlatdepth_dtype=self.particledata.lonlatdepth_dtype, - partition_function=False, pid_orig=self.repeatpid, **self.repeatkwargs) + partition_function=False, + pid_orig=self.repeatpid, + **self.repeatkwargs, + ) for p in pset_new: p.dt = dt self.add(pset_new) diff --git a/parcels/rng.py b/parcels/rng.py index 77bd15e1..a395796e 100644 --- a/parcels/rng.py +++ b/parcels/rng.py @@ -10,7 +10,7 @@ from parcels.compilation.codecompiler import GNUCompiler from parcels.tools import get_cache_dir, get_package_dir from parcels.tools.loggers import logger -__all__ = ['seed', 'random', 'uniform', 'randint', 'normalvariate', 'expovariate', 'vonmisesvariate'] +__all__ = ["seed", "random", "uniform", "randint", "normalvariate", "expovariate", "vonmisesvariate"] class RandomC: @@ -78,13 +78,13 @@ extern float pcls_vonmisesvariate(float mu, float kappa){ def unload_lib(self): # Unload the currently loaded dynamic linked library to be secure if self._lib is not None and self._loaded and _ctypes is not None: - _ctypes.FreeLibrary(self._lib._handle) if sys.platform == 'win32' else _ctypes.dlclose(self._lib._handle) + _ctypes.FreeLibrary(self._lib._handle) if sys.platform == "win32" else _ctypes.dlclose(self._lib._handle) del self._lib self._lib = None self._loaded = False def load_lib(self): - self._lib = npct.load_library(self.lib_file, '.') + self._lib = npct.load_library(self.lib_file, ".") self._loaded = True def remove_lib(self): @@ -95,7 +95,7 @@ extern float pcls_vonmisesvariate(float mu, float kappa){ def compile(self, compiler=None): if self.src_file is None or self.lib_file is None or self.log_file is None: - basename = 'parcels_random_%s' % uuid.uuid4() + basename = "parcels_random_%s" % uuid.uuid4() lib_filename = "lib" + basename basepath = os.path.join(get_cache_dir(), f"{basename}") libpath = os.path.join(get_cache_dir(), f"{lib_filename}") @@ -105,10 +105,12 @@ extern float pcls_vonmisesvariate(float mu, float kappa){ ccompiler = compiler if ccompiler is None: cppargs = [] - incdirs = [os.path.join(get_package_dir(), 'include'), ] + incdirs = [ + os.path.join(get_package_dir(), "include"), + ] ccompiler = GNUCompiler(cppargs=cppargs, incdirs=incdirs) if self._lib is None: - with open(self.src_file, 'w+') as f: + with open(self.src_file, "w+") as f: f.write(self.ccode) ccompiler.compile(self.src_file, self.lib_file, self.log_file) logger.info(f"Compiled ParcelsRandom ==> {self.src_file}") diff --git a/parcels/tools/converters.py b/parcels/tools/converters.py index 5f9e8205..b1dea2fd 100644 --- a/parcels/tools/converters.py +++ b/parcels/tools/converters.py @@ -7,9 +7,17 @@ import cftime import numpy as np import xarray as xr -__all__ = ['UnitConverter', 'Geographic', 'GeographicPolar', 'GeographicSquare', - 'GeographicPolarSquare', 'unitconverters_map', 'TimeConverter', - 'convert_xarray_time_units', 'convert_to_flat_array'] +__all__ = [ + "UnitConverter", + "Geographic", + "GeographicPolar", + "GeographicSquare", + "GeographicPolarSquare", + "unitconverters_map", + "TimeConverter", + "convert_xarray_time_units", + "convert_to_flat_array", +] def convert_to_flat_array(var): @@ -31,7 +39,7 @@ def convert_to_flat_array(var): def _get_cftime_datetimes(): # Is there a more elegant way to parse these from cftime? cftime_calendars = tuple(x[1].__name__ for x in inspect.getmembers(cftime._cftime, inspect.isclass)) - cftime_datetime_names = [ca for ca in cftime_calendars if 'Datetime' in ca] + cftime_datetime_names = [ca for ca in cftime_calendars if "Datetime" in ca] return cftime_datetime_names @@ -75,25 +83,29 @@ class TimeConverter: """ time = time.time_origin if isinstance(time, TimeConverter) else time - if self.calendar in ['np_datetime64', 'np_timedelta64']: - return (time - self.time_origin) / np.timedelta64(1, 's') + if self.calendar in ["np_datetime64", "np_timedelta64"]: + return (time - self.time_origin) / np.timedelta64(1, "s") elif self.calendar in _get_cftime_calendars(): if isinstance(time, (list, np.ndarray)): try: return np.array([(t - self.time_origin).total_seconds() for t in time]) except ValueError: - raise ValueError(f"Cannot subtract 'time' (a {type(time)} object) from a {self.calendar} calendar.\n" - f"Provide 'time' as a {type(self.time_origin)} object?") + raise ValueError( + f"Cannot subtract 'time' (a {type(time)} object) from a {self.calendar} calendar.\n" + f"Provide 'time' as a {type(self.time_origin)} object?" + ) else: try: return (time - self.time_origin).total_seconds() except ValueError: - raise ValueError(f"Cannot subtract 'time' (a {type(time)} object) from a {self.calendar} calendar.\n" - f"Provide 'time' as a {type(self.time_origin)} object?") + raise ValueError( + f"Cannot subtract 'time' (a {type(time)} object) from a {self.calendar} calendar.\n" + f"Provide 'time' as a {type(self.time_origin)} object?" + ) elif self.calendar is None: return time - self.time_origin else: - raise RuntimeError(f'Calendar {self.calendar} not implemented in TimeConverter') + raise RuntimeError(f"Calendar {self.calendar} not implemented in TimeConverter") def fulltime(self, time): """Method to convert a time difference in seconds to a date, based on the time_origin @@ -110,17 +122,17 @@ class TimeConverter: """ time = time.time_origin if isinstance(time, TimeConverter) else time - if self.calendar in ['np_datetime64', 'np_timedelta64']: + if self.calendar in ["np_datetime64", "np_timedelta64"]: if isinstance(time, (list, np.ndarray)): - return [self.time_origin + np.timedelta64(int(t), 's') for t in time] + return [self.time_origin + np.timedelta64(int(t), "s") for t in time] else: - return self.time_origin + np.timedelta64(int(time), 's') + return self.time_origin + np.timedelta64(int(time), "s") elif self.calendar in _get_cftime_calendars(): return self.time_origin + timedelta(seconds=time) elif self.calendar is None: return self.time_origin + time else: - raise RuntimeError(f'Calendar {self.calendar} not implemented in TimeConverter') + raise RuntimeError(f"Calendar {self.calendar} not implemented in TimeConverter") def __repr__(self): return "%s" % self.time_origin @@ -174,14 +186,14 @@ class UnitConverter: class Geographic(UnitConverter): """Unit converter from geometric to geographic coordinates (m to degree)""" - source_unit = 'm' - target_unit = 'degree' + source_unit = "m" + target_unit = "degree" def to_target(self, value, x, y, z): - return value / 1000. / 1.852 / 60. + return value / 1000.0 / 1.852 / 60.0 def to_source(self, value, x, y, z): - return value * 1000. * 1.852 * 60. + return value * 1000.0 * 1.852 * 60.0 def ccode_to_target(self, x, y, z): return "(1.0 / (1000.0 * 1.852 * 60.0))" @@ -195,14 +207,14 @@ class GeographicPolar(UnitConverter): with a correction to account for narrower grid cells closer to the poles. """ - source_unit = 'm' - target_unit = 'degree' + source_unit = "m" + target_unit = "degree" def to_target(self, value, x, y, z): - return value / 1000. / 1.852 / 60. / cos(y * pi / 180) + return value / 1000.0 / 1.852 / 60.0 / cos(y * pi / 180) def to_source(self, value, x, y, z): - return value * 1000. * 1.852 * 60. * cos(y * pi / 180) + return value * 1000.0 * 1.852 * 60.0 * cos(y * pi / 180) def ccode_to_target(self, x, y, z): return "(1.0 / (1000. * 1.852 * 60. * cos(%s * M_PI / 180)))" % y @@ -214,14 +226,14 @@ class GeographicPolar(UnitConverter): class GeographicSquare(UnitConverter): """Square distance converter from geometric to geographic coordinates (m2 to degree2)""" - source_unit = 'm2' - target_unit = 'degree2' + source_unit = "m2" + target_unit = "degree2" def to_target(self, value, x, y, z): - return value / pow(1000. * 1.852 * 60., 2) + return value / pow(1000.0 * 1.852 * 60.0, 2) def to_source(self, value, x, y, z): - return value * pow(1000. * 1.852 * 60., 2) + return value * pow(1000.0 * 1.852 * 60.0, 2) def ccode_to_target(self, x, y, z): return "pow(1.0 / (1000.0 * 1.852 * 60.0), 2)" @@ -235,14 +247,14 @@ class GeographicPolarSquare(UnitConverter): with a correction to account for narrower grid cells closer to the poles. """ - source_unit = 'm2' - target_unit = 'degree2' + source_unit = "m2" + target_unit = "degree2" def to_target(self, value, x, y, z): - return value / pow(1000. * 1.852 * 60. * cos(y * pi / 180), 2) + return value / pow(1000.0 * 1.852 * 60.0 * cos(y * pi / 180), 2) def to_source(self, value, x, y, z): - return value * pow(1000. * 1.852 * 60. * cos(y * pi / 180), 2) + return value * pow(1000.0 * 1.852 * 60.0 * cos(y * pi / 180), 2) def ccode_to_target(self, x, y, z): return "pow(1.0 / (1000. * 1.852 * 60. * cos(%s * M_PI / 180)), 2)" % y @@ -251,22 +263,28 @@ class GeographicPolarSquare(UnitConverter): return "pow((1000. * 1.852 * 60. * cos(%s * M_PI / 180)), 2)" % y -unitconverters_map = {'U': GeographicPolar(), 'V': Geographic(), - 'Kh_zonal': GeographicPolarSquare(), - 'Kh_meridional': GeographicSquare()} +unitconverters_map = { + "U": GeographicPolar(), + "V": Geographic(), + "Kh_zonal": GeographicPolarSquare(), + "Kh_meridional": GeographicSquare(), +} def convert_xarray_time_units(ds, time): """Fixes DataArrays that have time.Unit instead of expected time.units""" da = ds[time] if isinstance(ds, xr.Dataset) else ds - if 'units' not in da.attrs and 'Unit' in da.attrs: - da.attrs['units'] = da.attrs['Unit'] + if "units" not in da.attrs and "Unit" in da.attrs: + da.attrs["units"] = da.attrs["Unit"] da2 = xr.Dataset({time: da}) try: da2 = xr.decode_cf(da2) except ValueError: - raise RuntimeError('Xarray could not convert the calendar. If you''re using from_netcdf, ' - 'try using the timestamps keyword in the construction of your Field. ' - 'See also the tutorial at https://docs.oceanparcels.org/en/latest/' - 'examples/tutorial_timestamps.html') + raise RuntimeError( + "Xarray could not convert the calendar. If you" + "re using from_netcdf, " + "try using the timestamps keyword in the construction of your Field. " + "See also the tutorial at https://docs.oceanparcels.org/en/latest/" + "examples/tutorial_timestamps.html" + ) ds[time] = da2[time] diff --git a/parcels/tools/exampledata_utils.py b/parcels/tools/exampledata_utils.py index e8f493b6..60de3e6d 100644 --- a/parcels/tools/exampledata_utils.py +++ b/parcels/tools/exampledata_utils.py @@ -24,10 +24,7 @@ example_data_files = { ], "GlobCurrent_example_data": [ f"{date.strftime('%Y%m%d')}000000-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc" - for date in ( - [datetime(2002, 1, 1) + timedelta(days=x) for x in range(0, 365)] - + [datetime(2003, 1, 1)] - ) + for date in ([datetime(2002, 1, 1) + timedelta(days=x) for x in range(0, 365)] + [datetime(2003, 1, 1)]) ], "DecayingMovingEddy_data": [ "decaying_moving_eddyU.nc", @@ -135,8 +132,7 @@ def download_example_dataset(dataset: str, data_home=None): # Dev note: `dataset` is assumed to be a folder name with netcdf files if dataset not in example_data_files: raise ValueError( - f"Dataset {dataset!r} not found. Available datasets are: " - ", ".join(example_data_files.keys()) + f"Dataset {dataset!r} not found. Available datasets are: " ", ".join(example_data_files.keys()) ) cache_folder = get_data_home(data_home) diff --git a/parcels/tools/global_statics.py b/parcels/tools/global_statics.py index ed579a14..0e97bac0 100644 --- a/parcels/tools/global_statics.py +++ b/parcels/tools/global_statics.py @@ -9,9 +9,11 @@ try: except: # Windows does not have getuid(), so define to simply return 'tmp' def getuid(): - return 'tmp' + return "tmp" + + +__all__ = ["cleanup_remove_files", "cleanup_unload_lib", "get_package_dir", "get_cache_dir"] -__all__ = ['cleanup_remove_files', 'cleanup_unload_lib', 'get_package_dir', 'get_cache_dir'] def cleanup_remove_files(lib_file, log_file): if os.path.isfile(lib_file): @@ -23,7 +25,7 @@ def cleanup_unload_lib(lib): # This is not really necessary, as these programs are not that large, but with the new random # naming scheme which is required on Windows OS'es to deal with updates to a Parcels' kernel. if lib is not None: - _ctypes.FreeLibrary(lib._handle) if sys.platform == 'win32' else _ctypes.dlclose(lib._handle) + _ctypes.FreeLibrary(lib._handle) if sys.platform == "win32" else _ctypes.dlclose(lib._handle) def get_package_dir(): diff --git a/parcels/tools/interpolation_utils.py b/parcels/tools/interpolation_utils.py index d6061df8..c884fcd1 100644 --- a/parcels/tools/interpolation_utils.py +++ b/parcels/tools/interpolation_utils.py @@ -1,70 +1,73 @@ # flake8: noqa import numpy as np -__all__=[] +__all__ = [] + def phi1D_lin(xsi): - phi = [1-xsi, - xsi] + phi = [1 - xsi, xsi] return phi def phi1D_quad(xsi): - phi = [2*xsi**2-3*xsi+1, - -4*xsi**2+4*xsi, - 2*xsi**2-xsi] + phi = [2 * xsi**2 - 3 * xsi + 1, -4 * xsi**2 + 4 * xsi, 2 * xsi**2 - xsi] return phi def phi2D_lin(xsi, eta): - phi = [(1-xsi) * (1-eta), - xsi * (1-eta), - xsi * eta , - (1-xsi) * eta ] + phi = [(1 - xsi) * (1 - eta), xsi * (1 - eta), xsi * eta, (1 - xsi) * eta] return phi def phi3D_lin(xsi, eta, zet): - phi = [(1-xsi) * (1-eta) * (1-zet), - xsi * (1-eta) * (1-zet), - xsi * eta * (1-zet), - (1-xsi) * eta * (1-zet), - (1-xsi) * (1-eta) * zet , - xsi * (1-eta) * zet , - xsi * eta * zet , - (1-xsi) * eta * zet ] + phi = [ + (1 - xsi) * (1 - eta) * (1 - zet), + xsi * (1 - eta) * (1 - zet), + xsi * eta * (1 - zet), + (1 - xsi) * eta * (1 - zet), + (1 - xsi) * (1 - eta) * zet, + xsi * (1 - eta) * zet, + xsi * eta * zet, + (1 - xsi) * eta * zet, + ] return phi def dphidxsi3D_lin(xsi, eta, zet): - dphidxsi = [ - (1-eta) * (1-zet), - (1-eta) * (1-zet), - ( eta) * (1-zet), - - ( eta) * (1-zet), - - (1-eta) * ( zet), - (1-eta) * ( zet), - ( eta) * ( zet), - - ( eta) * ( zet)] - dphideta = [ - (1-xsi) * (1-zet), - - ( xsi) * (1-zet), - ( xsi) * (1-zet), - (1-xsi) * (1-zet), - - (1-xsi) * ( zet), - - ( xsi) * ( zet), - ( xsi) * ( zet), - (1-xsi) * ( zet)] - dphidzet = [ - (1-xsi) * (1-eta), - - ( xsi) * (1-eta), - - ( xsi) * ( eta), - - (1-xsi) * ( eta), - (1-xsi) * (1-eta), - ( xsi) * (1-eta), - ( xsi) * ( eta), - (1-xsi) * ( eta)] + dphidxsi = [ + -(1 - eta) * (1 - zet), + (1 - eta) * (1 - zet), + (eta) * (1 - zet), + -(eta) * (1 - zet), + -(1 - eta) * (zet), + (1 - eta) * (zet), + (eta) * (zet), + -(eta) * (zet), + ] + dphideta = [ + -(1 - xsi) * (1 - zet), + -(xsi) * (1 - zet), + (xsi) * (1 - zet), + (1 - xsi) * (1 - zet), + -(1 - xsi) * (zet), + -(xsi) * (zet), + (xsi) * (zet), + (1 - xsi) * (zet), + ] + dphidzet = [ + -(1 - xsi) * (1 - eta), + -(xsi) * (1 - eta), + -(xsi) * (eta), + -(1 - xsi) * (eta), + (1 - xsi) * (1 - eta), + (xsi) * (1 - eta), + (xsi) * (eta), + (1 - xsi) * (eta), + ] return dphidxsi, dphideta, dphidzet @@ -72,13 +75,15 @@ def dphidxsi3D_lin(xsi, eta, zet): def dxdxsi3D_lin(hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh): dphidxsi, dphideta, dphidzet = dphidxsi3D_lin(xsi, eta, zet) - if mesh == 'spherical': - deg2m = 1852 * 60. - rad = np.pi / 180. - lat = (1-xsi) * (1-eta) * hexa_y[0] + \ - xsi * (1-eta) * hexa_y[1] + \ - xsi * eta * hexa_y[2] + \ - (1-xsi) * eta * hexa_y[3] + if mesh == "spherical": + deg2m = 1852 * 60.0 + rad = np.pi / 180.0 + lat = ( + (1 - xsi) * (1 - eta) * hexa_y[0] + + xsi * (1 - eta) * hexa_y[1] + + xsi * eta * hexa_y[2] + + (1 - xsi) * eta * hexa_y[3] + ) jac_lon = deg2m * np.cos(rad * lat) jac_lat = deg2m else: @@ -99,48 +104,47 @@ def dxdxsi3D_lin(hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh): def jacobian3D_lin(hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh): - dxdxsi, dxdeta, dxdzet, dydxsi, dydeta, dydzet, dzdxsi, dzdeta, dzdzet = dxdxsi3D_lin(hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh) - - jac = dxdxsi * (dydeta*dzdzet - dzdeta*dydzet)\ - - dxdeta * (dydxsi*dzdzet - dzdxsi*dydzet)\ - + dxdzet * (dydxsi*dzdeta - dzdxsi*dydeta) + dxdxsi, dxdeta, dxdzet, dydxsi, dydeta, dydzet, dzdxsi, dzdeta, dzdzet = dxdxsi3D_lin( + hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh + ) + + jac = ( + dxdxsi * (dydeta * dzdzet - dzdeta * dydzet) + - dxdeta * (dydxsi * dzdzet - dzdxsi * dydzet) + + dxdzet * (dydxsi * dzdeta - dzdxsi * dydeta) + ) return jac def jacobian3D_lin_face(hexa_x, hexa_y, hexa_z, xsi, eta, zet, orientation, mesh): - dxdxsi, dxdeta, dxdzet, dydxsi, dydeta, dydzet, dzdxsi, dzdeta, dzdzet = dxdxsi3D_lin(hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh) - - if orientation == 'zonal': - j = [dydeta*dzdzet-dydzet*dzdeta, - -dxdeta*dzdzet+dxdzet*dzdeta, - dxdeta*dydzet-dxdzet*dydeta] - elif orientation == 'meridional': - j = [dydxsi*dzdzet-dydzet*dzdxsi, - -dxdxsi*dzdzet+dxdzet*dzdxsi, - dxdxsi*dydzet-dxdzet*dydxsi] - elif orientation == 'vertical': - j = [dydxsi*dzdeta-dydeta*dzdxsi, - -dxdxsi*dzdeta+dxdeta*dzdxsi, - dxdxsi*dydeta-dxdeta*dydxsi] - - jac = np.sqrt(j[0]**2+j[1]**2+j[2]**2) + dxdxsi, dxdeta, dxdzet, dydxsi, dydeta, dydzet, dzdxsi, dzdeta, dzdzet = dxdxsi3D_lin( + hexa_x, hexa_y, hexa_z, xsi, eta, zet, mesh + ) + + if orientation == "zonal": + j = [dydeta * dzdzet - dydzet * dzdeta, -dxdeta * dzdzet + dxdzet * dzdeta, dxdeta * dydzet - dxdzet * dydeta] + elif orientation == "meridional": + j = [dydxsi * dzdzet - dydzet * dzdxsi, -dxdxsi * dzdzet + dxdzet * dzdxsi, dxdxsi * dydzet - dxdzet * dydxsi] + elif orientation == "vertical": + j = [dydxsi * dzdeta - dydeta * dzdxsi, -dxdxsi * dzdeta + dxdeta * dzdxsi, dxdxsi * dydeta - dxdeta * dydxsi] + + jac = np.sqrt(j[0] ** 2 + j[1] ** 2 + j[2] ** 2) return jac def dphidxsi2D_lin(xsi, eta): - dphidxsi = [-(1-eta), - 1-eta, - eta, - - eta] - dphideta = [-(1-xsi), - - xsi, - xsi, - 1-xsi] + dphidxsi = [-(1 - eta), 1 - eta, eta, -eta] + dphideta = [-(1 - xsi), -xsi, xsi, 1 - xsi] return dphidxsi, dphideta -def dxdxsi2D_lin(quad_x, quad_y, xsi, eta,): +def dxdxsi2D_lin( + quad_x, + quad_y, + xsi, + eta, +): dphidxsi, dphideta = dphidxsi2D_lin(xsi, eta) dxdxsi = np.dot(quad_x, dphidxsi) @@ -154,14 +158,14 @@ def dxdxsi2D_lin(quad_x, quad_y, xsi, eta,): def jacobian2D_lin(quad_x, quad_y, xsi, eta): dxdxsi, dxdeta, dydxsi, dydeta = dxdxsi2D_lin(quad_x, quad_y, xsi, eta) - jac = dxdxsi*dydeta - dxdeta*dydxsi + jac = dxdxsi * dydeta - dxdeta * dydxsi return jac def length2d_lin_edge(quad_x, quad_y, ids): xe = [quad_x[ids[0]], quad_x[ids[1]]] ye = [quad_y[ids[0]], quad_y[ids[1]]] - return np.sqrt((xe[1]-xe[0])**2+(ye[1]-ye[0])**2) + return np.sqrt((xe[1] - xe[0]) ** 2 + (ye[1] - ye[0]) ** 2) def interpolate(phi, f, xsi): diff --git a/parcels/tools/statuscodes.py b/parcels/tools/statuscodes.py index 6eeb5bed..06186d58 100644 --- a/parcels/tools/statuscodes.py +++ b/parcels/tools/statuscodes.py @@ -1,8 +1,13 @@ """Handling of Errors and particle status codes""" - -__all__ = ['StatusCode', 'FieldSamplingError', 'FieldOutOfBoundError', 'TimeExtrapolationError', - 'KernelError', 'AllParcelsErrorCodes'] +__all__ = [ + "StatusCode", + "FieldSamplingError", + "FieldOutOfBoundError", + "TimeExtrapolationError", + "KernelError", + "AllParcelsErrorCodes", +] class StatusCode: @@ -61,20 +66,22 @@ class FieldOutOfBoundSurfaceError(RuntimeError): self.x = x self.y = y self.z = z - message = f"{field.name if field else 'Field'} sampled out-of-bound at the surface, at ({self.x}, {self.y}, {self.z})" + message = ( + f"{field.name if field else 'Field'} sampled out-of-bound at the surface, at ({self.x}, {self.y}, {self.z})" + ) super().__init__(message) class TimeExtrapolationError(RuntimeError): """Utility error class to propagate erroneous time extrapolation sampling.""" - def __init__(self, time, field=None, msg='allow_time_extrapoltion'): + def __init__(self, time, field=None, msg="allow_time_extrapoltion"): if field is not None and field.grid.time_origin and time is not None: time = field.grid.time_origin.fulltime(time) message = f"{field.name if field else 'Field'} sampled outside time domain at time {time}." - if msg == 'allow_time_extrapoltion': + if msg == "allow_time_extrapoltion": message += " Try setting allow_time_extrapolation to True" - elif msg == 'show_time': + elif msg == "show_time": message += " Try explicitly providing a 'show_time'" else: message += msg + " Try setting allow_time_extrapolation to True" @@ -85,10 +92,12 @@ class KernelError(RuntimeError): """General particle kernel error with optional custom message.""" def __init__(self, particle, fieldset=None, msg=None): - message = (f"{particle.state}\n" - f"Particle {particle}\n" - f"Time: {parse_particletime(particle.time, fieldset)}\n" - f"timestep dt: {particle.dt}\n") + message = ( + f"{particle.state}\n" + f"Particle {particle}\n" + f"Time: {parse_particletime(particle.time, fieldset)}\n" + f"timestep dt: {particle.dt}\n" + ) if msg: message += msg super().__init__(message) @@ -111,9 +120,10 @@ class InterpolationError(KernelError): super().__init__(particle, fieldset=fieldset, msg=message) -AllParcelsErrorCodes = {FieldSamplingError: StatusCode.Error, - FieldOutOfBoundError: StatusCode.ErrorOutOfBounds, - FieldOutOfBoundSurfaceError: StatusCode.ErrorThroughSurface, - TimeExtrapolationError: StatusCode.ErrorTimeExtrapolation, - KernelError: StatusCode.Error, - } +AllParcelsErrorCodes = { + FieldSamplingError: StatusCode.Error, + FieldOutOfBoundError: StatusCode.ErrorOutOfBounds, + FieldOutOfBoundSurfaceError: StatusCode.ErrorThroughSurface, + TimeExtrapolationError: StatusCode.ErrorTimeExtrapolation, + KernelError: StatusCode.Error, +} diff --git a/parcels/tools/timer.py b/parcels/tools/timer.py index 2071c215..02daf75a 100644 --- a/parcels/tools/timer.py +++ b/parcels/tools/timer.py @@ -8,7 +8,8 @@ except ModuleNotFoundError: __all__ = [] -class Timer(): + +class Timer: def __init__(self, name, parent=None, start=True): self._start = None self._t = 0 @@ -22,13 +23,13 @@ class Timer(): def start(self): if self._parent: - assert self._parent._start, (f"Timer '{self._name}' cannot be started. Its parent timer does not run") + assert self._parent._start, f"Timer '{self._name}' cannot be started. Its parent timer does not run" if self._start is not None: - raise RuntimeError(f'Timer {self._name} cannot start since it is already running') + raise RuntimeError(f"Timer {self._name} cannot start since it is already running") self._start = time.time() def stop(self): - assert self._start, (f"Timer '{self._name}' was stopped before being started") + assert self._start, f"Timer '{self._name}' was stopped before being started" self._t += time.time() - self._start self._start = None @@ -45,14 +46,14 @@ class Timer(): time = self.local_time() if step == 0: root_time = time - print(('(%3d%%)' % round(time/root_time*100)), end='') - print(' ' * (step + 1), end='') + print(("(%3d%%)" % round(time / root_time * 100)), end="") + print(" " * (step + 1), end="") if step > 0: - print('(%3d%%) ' % round(time/parent_time*100), end='') - t_str = '%1.3e s' % time if root_time < 300 else datetime.timedelta(seconds=time) + print("(%3d%%) " % round(time / parent_time * 100), end="") + t_str = "%1.3e s" % time if root_time < 300 else datetime.timedelta(seconds=time) print(f"Timer {(self._name).ljust(20 - 2*step + 7*(step == 0))}: {t_str}") for child in self._children: - child.print_tree_sequential(step+1, root_time, time) + child.print_tree_sequential(step + 1, root_time, time) def print_tree(self, step=0, root_time=0, parent_time=0): if MPI is None: diff --git a/pyproject.toml b/pyproject.toml index 5604c668..35e9cb78 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -55,6 +55,7 @@ python_files = ["test_*.py", "example_*.py", "*tutorial*"] [tool.ruff] target-version = "py38" +line-length = 120 [tool.ruff.lint] exclude = ["*.ipynb"] @@ -120,51 +121,51 @@ ignore = [ convention = "numpy" [tool.ruff.format] -exclude = [ - "parcels/tools/interpolation_utils.py", - - # List of files to ignore formatting (ordered by ascending line count) - "tests/test_mpirun.py", - "parcels/tools/global_statics.py", - "tests/test_tools.py", - "parcels/gridset.py", - "parcels/tools/timer.py", - "parcels/application_kernels/TEOSseawaterdensity.py", - "tests/test_particles.py", - "parcels/application_kernels/interaction.py", - "parcels/application_kernels/advectiondiffusion.py", - "parcels/tools/statuscodes.py", - "tests/test_data/create_testfields.py", - "tests/test_diffusion.py", - "parcels/tools/exampledata_utils.py", - "parcels/interaction/neighborsearch/hashflat.py", - "parcels/interaction/neighborsearch/hashspherical.py", - "parcels/interaction/neighborsearch/basehash.py", - "parcels/rng.py", - "parcels/interaction/neighborsearch/base.py", - "parcels/interaction/interactionkernel.py", - "parcels/tools/converters.py", - "parcels/application_kernels/advection.py", - "parcels/particlefile.py", - "parcels/compilation/codecompiler.py", - "parcels/particle.py", - "tests/test_interaction.py", - "tests/test_particlefile.py", - "parcels/application_kernels/EOSseawaterproperties.py", - "tests/test_kernel_execution.py", - "tests/test_particlesets.py", - "parcels/particledata.py", - "tests/test_kernel_language.py", - "docs/conf.py", - "parcels/grid.py", - "tests/test_advection.py", - "parcels/kernel.py", - "parcels/fieldfilebuffer.py", - "tests/test_fieldset_sampling.py", - "parcels/compilation/codegenerator.py", - "tests/test_grids.py", - "parcels/particleset.py", - "tests/test_fieldset.py", - "parcels/fieldset.py", - "parcels/field.py", -] +# exclude = [ +# "parcels/tools/interpolation_utils.py", + +# # List of files to ignore formatting (ordered by ascending line count) +# "tests/test_mpirun.py", +# "parcels/tools/global_statics.py", +# "tests/test_tools.py", +# "parcels/gridset.py", +# "parcels/tools/timer.py", +# "parcels/application_kernels/TEOSseawaterdensity.py", +# "tests/test_particles.py", +# "parcels/application_kernels/interaction.py", +# "parcels/application_kernels/advectiondiffusion.py", +# "parcels/tools/statuscodes.py", +# "tests/test_data/create_testfields.py", +# "tests/test_diffusion.py", +# "parcels/tools/exampledata_utils.py", +# "parcels/interaction/neighborsearch/hashflat.py", +# "parcels/interaction/neighborsearch/hashspherical.py", +# "parcels/interaction/neighborsearch/basehash.py", +# "parcels/rng.py", +# "parcels/interaction/neighborsearch/base.py", +# "parcels/interaction/interactionkernel.py", +# "parcels/tools/converters.py", +# "parcels/application_kernels/advection.py", +# "parcels/particlefile.py", +# "parcels/compilation/codecompiler.py", +# "parcels/particle.py", +# "tests/test_interaction.py", +# "tests/test_particlefile.py", +# "parcels/application_kernels/EOSseawaterproperties.py", +# "tests/test_kernel_execution.py", +# "tests/test_particlesets.py", +# "parcels/particledata.py", +# "tests/test_kernel_language.py", +# "docs/conf.py", +# "parcels/grid.py", +# "tests/test_advection.py", +# "parcels/kernel.py", +# "parcels/fieldfilebuffer.py", +# "tests/test_fieldset_sampling.py", +# "parcels/compilation/codegenerator.py", +# "tests/test_grids.py", +# "parcels/particleset.py", +# "tests/test_fieldset.py", +# "parcels/fieldset.py", +# "parcels/field.py", +# ] diff --git a/tests/test_advection.py b/tests/test_advection.py index b48946d0..084ea12f 100644 --- a/tests/test_advection.py +++ b/tests/test_advection.py @@ -21,16 +21,22 @@ from parcels import ( StatusCode, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} -kernel = {'EE': AdvectionEE, 'RK4': AdvectionRK4, 'RK45': AdvectionRK45, 'AA': AdvectionAnalytical, - 'AdvDiffEM': AdvectionDiffusionEM, 'AdvDiffM1': AdvectionDiffusionM1} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} +kernel = { + "EE": AdvectionEE, + "RK4": AdvectionRK4, + "RK45": AdvectionRK45, + "AA": AdvectionAnalytical, + "AdvDiffEM": AdvectionDiffusionEM, + "AdvDiffM1": AdvectionDiffusionM1, +} # Some constants -f = 1.e-4 +f = 1.0e-4 u_0 = 0.3 u_g = 0.04 -gamma = 1/(86400. * 2.89) -gamma_g = 1/(86400. * 28.9) +gamma = 1 / (86400.0 * 2.89) +gamma_g = 1 / (86400.0 * 28.9) def lon(xdim=200): @@ -60,83 +66,89 @@ def depth_fixture(zdim=2): return depth(zdim=zdim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advection_zonal(lon, lat, depth, mode, npart=10): """Particles at high latitude move geographically faster due to the pole correction in `GeographicPolar`.""" - data2D = {'U': np.ones((lon.size, lat.size), dtype=np.float32), - 'V': np.zeros((lon.size, lat.size), dtype=np.float32)} - data3D = {'U': np.ones((lon.size, lat.size, depth.size), dtype=np.float32), - 'V': np.zeros((lon.size, lat.size, depth.size), dtype=np.float32)} - dimensions = {'lon': lon, 'lat': lat} - fieldset2D = FieldSet.from_data(data2D, dimensions, mesh='spherical', transpose=True) - assert fieldset2D.U.creation_log == 'from_data' - - pset2D = ParticleSet(fieldset2D, pclass=ptype[mode], - lon=np.zeros(npart) + 20., - lat=np.linspace(0, 80, npart)) + data2D = { + "U": np.ones((lon.size, lat.size), dtype=np.float32), + "V": np.zeros((lon.size, lat.size), dtype=np.float32), + } + data3D = { + "U": np.ones((lon.size, lat.size, depth.size), dtype=np.float32), + "V": np.zeros((lon.size, lat.size, depth.size), dtype=np.float32), + } + dimensions = {"lon": lon, "lat": lat} + fieldset2D = FieldSet.from_data(data2D, dimensions, mesh="spherical", transpose=True) + assert fieldset2D.U.creation_log == "from_data" + + pset2D = ParticleSet(fieldset2D, pclass=ptype[mode], lon=np.zeros(npart) + 20.0, lat=np.linspace(0, 80, npart)) pset2D.execute(AdvectionRK4, runtime=timedelta(hours=2), dt=timedelta(seconds=30)) - assert (np.diff(pset2D.lon) > 1.e-4).all() - - dimensions['depth'] = depth - fieldset3D = FieldSet.from_data(data3D, dimensions, mesh='spherical', transpose=True) - pset3D = ParticleSet(fieldset3D, pclass=ptype[mode], - lon=np.zeros(npart) + 20., - lat=np.linspace(0, 80, npart), - depth=np.zeros(npart) + 10.) + assert (np.diff(pset2D.lon) > 1.0e-4).all() + + dimensions["depth"] = depth + fieldset3D = FieldSet.from_data(data3D, dimensions, mesh="spherical", transpose=True) + pset3D = ParticleSet( + fieldset3D, + pclass=ptype[mode], + lon=np.zeros(npart) + 20.0, + lat=np.linspace(0, 80, npart), + depth=np.zeros(npart) + 10.0, + ) pset3D.execute(AdvectionRK4, runtime=timedelta(hours=2), dt=timedelta(seconds=30)) - assert (np.diff(pset3D.lon) > 1.e-4).all() + assert (np.diff(pset3D.lon) > 1.0e-4).all() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advection_meridional(lon, lat, mode, npart=10): """Particles at high latitude move geographically faster due to the pole correction in `GeographicPolar`.""" - data = {'U': np.zeros((lon.size, lat.size), dtype=np.float32), - 'V': np.ones((lon.size, lat.size), dtype=np.float32)} - dimensions = {'lon': lon, 'lat': lat} - fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) - - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(-60, 60, npart), - lat=np.linspace(0, 30, npart)) + data = {"U": np.zeros((lon.size, lat.size), dtype=np.float32), "V": np.ones((lon.size, lat.size), dtype=np.float32)} + dimensions = {"lon": lon, "lat": lat} + fieldset = FieldSet.from_data(data, dimensions, mesh="spherical", transpose=True) + + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(-60, 60, npart), lat=np.linspace(0, 30, npart)) delta_lat = np.diff(pset.lat) pset.execute(AdvectionRK4, runtime=timedelta(hours=2), dt=timedelta(seconds=30)) - assert np.allclose(np.diff(pset.lat), delta_lat, rtol=1.e-4) + assert np.allclose(np.diff(pset.lat), delta_lat, rtol=1.0e-4) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) def test_advection_3D(mode, npart=11): """Flat 2D zonal flow that increases linearly with depth from 0 m/s to 1 m/s.""" xdim = ydim = zdim = 2 - dimensions = {'lon': np.linspace(0., 1e4, xdim, dtype=np.float32), - 'lat': np.linspace(0., 1e4, ydim, dtype=np.float32), - 'depth': np.linspace(0., 1., zdim, dtype=np.float32)} - data = {'U': np.ones((xdim, ydim, zdim), dtype=np.float32), - 'V': np.zeros((xdim, ydim, zdim), dtype=np.float32)} - data['U'][:, :, 0] = 0. - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.zeros(npart), - lat=np.zeros(npart) + 1e2, - depth=np.linspace(0, 1, npart)) + dimensions = { + "lon": np.linspace(0.0, 1e4, xdim, dtype=np.float32), + "lat": np.linspace(0.0, 1e4, ydim, dtype=np.float32), + "depth": np.linspace(0.0, 1.0, zdim, dtype=np.float32), + } + data = {"U": np.ones((xdim, ydim, zdim), dtype=np.float32), "V": np.zeros((xdim, ydim, zdim), dtype=np.float32)} + data["U"][:, :, 0] = 0.0 + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + + pset = ParticleSet( + fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart) + 1e2, depth=np.linspace(0, 1, npart) + ) time = timedelta(hours=2).total_seconds() pset.execute(AdvectionRK4, runtime=time, dt=timedelta(seconds=30)) - assert np.allclose(pset.depth*pset.time, pset.lon, atol=1.e-1) + assert np.allclose(pset.depth * pset.time, pset.lon, atol=1.0e-1) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) -@pytest.mark.parametrize('direction', ['up', 'down']) -@pytest.mark.parametrize('wErrorThroughSurface', [True, False]) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) +@pytest.mark.parametrize("direction", ["up", "down"]) +@pytest.mark.parametrize("wErrorThroughSurface", [True, False]) def test_advection_3D_outofbounds(mode, direction, wErrorThroughSurface): xdim = ydim = zdim = 2 - dimensions = {'lon': np.linspace(0., 1, xdim, dtype=np.float32), - 'lat': np.linspace(0., 1, ydim, dtype=np.float32), - 'depth': np.linspace(0., 1, zdim, dtype=np.float32)} - wfac = -1. if direction == 'up' else 1. - data = {'U': 0.01*np.ones((xdim, ydim, zdim), dtype=np.float32), - 'V': np.zeros((xdim, ydim, zdim), dtype=np.float32), - 'W': wfac * np.ones((xdim, ydim, zdim), dtype=np.float32)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + dimensions = { + "lon": np.linspace(0.0, 1, xdim, dtype=np.float32), + "lat": np.linspace(0.0, 1, ydim, dtype=np.float32), + "depth": np.linspace(0.0, 1, zdim, dtype=np.float32), + } + wfac = -1.0 if direction == "up" else 1.0 + data = { + "U": 0.01 * np.ones((xdim, ydim, zdim), dtype=np.float32), + "V": np.zeros((xdim, ydim, zdim), dtype=np.float32), + "W": wfac * np.ones((xdim, ydim, zdim), dtype=np.float32), + } + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") def DeleteParticle(particle, fieldset, time): if particle.state == StatusCode.ErrorOutOfBounds or particle.state == StatusCode.ErrorThroughSurface: @@ -147,7 +159,7 @@ def test_advection_3D_outofbounds(mode, direction, wErrorThroughSurface): (u, v) = fieldset.UV[particle] particle_dlon = u * particle.dt # noqa particle_dlat = v * particle.dt # noqa - particle_ddepth = 0. # noqa + particle_ddepth = 0.0 # noqa particle.depth = 0 particle.state = StatusCode.Evaluate @@ -157,42 +169,43 @@ def test_advection_3D_outofbounds(mode, direction, wErrorThroughSurface): kernels.append(DeleteParticle) pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=0.5, lat=0.5, depth=0.9) - pset.execute(kernels, runtime=11., dt=1) + pset.execute(kernels, runtime=11.0, dt=1) - if direction == 'up' and wErrorThroughSurface: + if direction == "up" and wErrorThroughSurface: assert np.allclose(pset.lon[0], 0.6) assert np.allclose(pset.depth[0], 0) else: assert len(pset) == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('rk45_tol', [10, 100]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("rk45_tol", [10, 100]) def test_advection_RK45(lon, lat, mode, rk45_tol, npart=10): - data2D = {'U': np.ones((lon.size, lat.size), dtype=np.float32), - 'V': np.zeros((lon.size, lat.size), dtype=np.float32)} - dimensions = {'lon': lon, 'lat': lat} - fieldset = FieldSet.from_data(data2D, dimensions, mesh='spherical', transpose=True) - fieldset.add_constant('RK45_tol', rk45_tol) + data2D = { + "U": np.ones((lon.size, lat.size), dtype=np.float32), + "V": np.zeros((lon.size, lat.size), dtype=np.float32), + } + dimensions = {"lon": lon, "lat": lat} + fieldset = FieldSet.from_data(data2D, dimensions, mesh="spherical", transpose=True) + fieldset.add_constant("RK45_tol", rk45_tol) dt = timedelta(seconds=30).total_seconds() - RK45Particles = ptype[mode].add_variable('next_dt', dtype=np.float32, initial=dt) - pset = ParticleSet(fieldset, pclass=RK45Particles, - lon=np.zeros(npart) + 20., - lat=np.linspace(0, 80, npart)) + RK45Particles = ptype[mode].add_variable("next_dt", dtype=np.float32, initial=dt) + pset = ParticleSet(fieldset, pclass=RK45Particles, lon=np.zeros(npart) + 20.0, lat=np.linspace(0, 80, npart)) pset.execute(AdvectionRK45, runtime=timedelta(hours=2), dt=dt) - assert (np.diff(pset.lon) > 1.e-4).all() - assert np.isclose(fieldset.RK45_tol, rk45_tol/(1852*60)) + assert (np.diff(pset.lon) > 1.0e-4).all() + assert np.isclose(fieldset.RK45_tol, rk45_tol / (1852 * 60)) print(fieldset.RK45_tol) def periodicfields(xdim, ydim, uvel, vvel): - dimensions = {'lon': np.linspace(0., 1., xdim+1, dtype=np.float32)[1:], # don't include both 0 and 1, for periodic b.c. - 'lat': np.linspace(0., 1., ydim+1, dtype=np.float32)[1:]} + dimensions = { + "lon": np.linspace(0.0, 1.0, xdim + 1, dtype=np.float32)[1:], # don't include both 0 and 1, for periodic b.c. + "lat": np.linspace(0.0, 1.0, ydim + 1, dtype=np.float32)[1:], + } - data = {'U': uvel * np.ones((xdim, ydim), dtype=np.float32), - 'V': vvel * np.ones((xdim, ydim), dtype=np.float32)} - return FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) + data = {"U": uvel * np.ones((xdim, ydim), dtype=np.float32), "V": vvel * np.ones((xdim, ydim), dtype=np.float32)} + return FieldSet.from_data(data, dimensions, mesh="spherical", transpose=True) def periodicBC(particle, fieldset, time): @@ -200,9 +213,9 @@ def periodicBC(particle, fieldset, time): particle.lat = math.fmod(particle.lat, 1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advection_periodic_zonal(mode, xdim=100, ydim=100, halosize=3): - fieldset = periodicfields(xdim, ydim, uvel=1., vvel=0.) + fieldset = periodicfields(xdim, ydim, uvel=1.0, vvel=0.0) fieldset.add_periodic_halo(zonal=True, halosize=halosize) assert len(fieldset.U.lon) == xdim + 2 * halosize @@ -211,9 +224,9 @@ def test_advection_periodic_zonal(mode, xdim=100, ydim=100, halosize=3): assert abs(pset.lon[0] - 0.15) < 0.1 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advection_periodic_meridional(mode, xdim=100, ydim=100): - fieldset = periodicfields(xdim, ydim, uvel=0., vvel=1.) + fieldset = periodicfields(xdim, ydim, uvel=0.0, vvel=1.0) fieldset.add_periodic_halo(meridional=True) assert len(fieldset.U.lat) == ydim + 10 # default halo size is 5 grid points @@ -222,14 +235,14 @@ def test_advection_periodic_meridional(mode, xdim=100, ydim=100): assert abs(pset.lat[0] - 0.15) < 0.1 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advection_periodic_zonal_meridional(mode, xdim=100, ydim=100): - fieldset = periodicfields(xdim, ydim, uvel=1., vvel=1.) + fieldset = periodicfields(xdim, ydim, uvel=1.0, vvel=1.0) fieldset.add_periodic_halo(zonal=True, meridional=True) assert len(fieldset.U.lat) == ydim + 10 # default halo size is 5 grid points assert len(fieldset.U.lon) == xdim + 10 # default halo size is 5 grid points - assert np.allclose(np.diff(fieldset.U.lat), fieldset.U.lat[1]-fieldset.U.lat[0], rtol=0.001) - assert np.allclose(np.diff(fieldset.U.lon), fieldset.U.lon[1]-fieldset.U.lon[0], rtol=0.001) + assert np.allclose(np.diff(fieldset.U.lat), fieldset.U.lat[1] - fieldset.U.lat[0], rtol=0.001) + assert np.allclose(np.diff(fieldset.U.lon), fieldset.U.lon[1] - fieldset.U.lon[0], rtol=0.001) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.4], lat=[0.5]) pset.execute(AdvectionRK4 + pset.Kernel(periodicBC), runtime=timedelta(hours=20), dt=timedelta(seconds=30)) @@ -237,15 +250,15 @@ def test_advection_periodic_zonal_meridional(mode, xdim=100, ydim=100): assert abs(pset.lat[0] - 0.15) < 0.1 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('u', [-0.3, np.array(0.2)]) -@pytest.mark.parametrize('v', [0.2, np.array(1)]) -@pytest.mark.parametrize('w', [None, -0.2, np.array(0.7)]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("u", [-0.3, np.array(0.2)]) +@pytest.mark.parametrize("v", [0.2, np.array(1)]) +@pytest.mark.parametrize("w", [None, -0.2, np.array(0.7)]) def test_length1dimensions(mode, u, v, w): (lon, xdim) = (np.linspace(-10, 10, 21), 21) if isinstance(u, np.ndarray) else (0, 1) (lat, ydim) = (np.linspace(-15, 15, 31), 31) if isinstance(v, np.ndarray) else (-4, 1) (depth, zdim) = (np.linspace(-5, 5, 11), 11) if (isinstance(w, np.ndarray) and w is not None) else (3, 1) - dimensions = {'lon': lon, 'lat': lat, 'depth': depth} + dimensions = {"lon": lon, "lat": lat, "depth": depth} dims = [] if zdim > 1: @@ -262,10 +275,10 @@ def test_length1dimensions(mode, u, v, w): else: U, V, W = u, v, w - data = {'U': U, 'V': V} + data = {"U": U, "V": V} if w is not None: - data['W'] = W - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + data["W"] = W + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") x0, y0, z0 = 2, 8, -4 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=x0, lat=y0, depth=z0) @@ -273,7 +286,7 @@ def test_length1dimensions(mode, u, v, w): kernel = pset.Kernel(pfunc) pset.execute(kernel, runtime=5, dt=1) - assert (len(pset.lon) == len([p.lon for p in pset])) + assert len(pset.lon) == len([p.lon for p in pset]) assert ((np.array([p.lon - x0 for p in pset]) - 4 * u) < 1e-6).all() assert ((np.array([p.lat - y0 for p in pset]) - 4 * v) < 1e-6).all() if w: @@ -293,13 +306,17 @@ def fieldset_stationary(xdim=100, ydim=100, maxtime=timedelta(hours=6)): Reference: N. Fabbroni, 2009, "Numerical simulations of passive tracers dispersion in the sea" """ - time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64) - dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32), - 'lat': np.linspace(0, 25000, ydim, dtype=np.float32), - 'time': time} - data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_0 * np.cos(f * time), - 'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -u_0 * np.sin(f * time)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + time = np.arange(0.0, maxtime.total_seconds() + 1e-5, 60.0, dtype=np.float64) + dimensions = { + "lon": np.linspace(0, 25000, xdim, dtype=np.float32), + "lat": np.linspace(0, 25000, ydim, dtype=np.float32), + "time": time, + } + data = { + "U": np.ones((xdim, ydim, 1), dtype=np.float32) * u_0 * np.cos(f * time), + "V": np.ones((xdim, ydim, 1), dtype=np.float32) * -u_0 * np.sin(f * time), + } + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) # setting some constants for AdvectionRK45 kernel fieldset.RK45_min_dt = 1e-3 fieldset.RK45_max_dt = 1e2 @@ -307,37 +324,41 @@ def fieldset_stationary(xdim=100, ydim=100, maxtime=timedelta(hours=6)): return fieldset -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('method, rtol, diffField', [ - ('EE', 1e-2, False), - ('AdvDiffEM', 1e-2, True), - ('AdvDiffM1', 1e-2, True), - ('RK4', 1e-5, False), - ('RK45', 1e-5, False)]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "method, rtol, diffField", + [ + ("EE", 1e-2, False), + ("AdvDiffEM", 1e-2, True), + ("AdvDiffM1", 1e-2, True), + ("RK4", 1e-5, False), + ("RK45", 1e-5, False), + ], +) def test_stationary_eddy(fieldset_stationary, mode, method, rtol, diffField, npart=1): fieldset = fieldset_stationary if diffField: - fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) - fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) - fieldset.add_constant('dres', 0.1) + fieldset.add_field(Field("Kh_zonal", np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) + fieldset.add_field(Field("Kh_meridional", np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) + fieldset.add_constant("dres", 0.1) lon = np.linspace(12000, 21000, npart) lat = np.linspace(12500, 12500, npart) dt = timedelta(minutes=3).total_seconds() endtime = timedelta(hours=6).total_seconds() - RK45Particles = ptype[mode].add_variable('next_dt', dtype=np.float32, initial=dt) + RK45Particles = ptype[mode].add_variable("next_dt", dtype=np.float32, initial=dt) - pclass = RK45Particles if method == 'RK45' else ptype[mode] + pclass = RK45Particles if method == "RK45" else ptype[mode] pset = ParticleSet(fieldset, pclass=pclass, lon=lon, lat=lat) pset.execute(kernel[method], dt=dt, endtime=endtime) - exp_lon = [truth_stationary(x, y, pset[0].time)[0] for x, y, in zip(lon, lat)] - exp_lat = [truth_stationary(x, y, pset[0].time)[1] for x, y, in zip(lon, lat)] + exp_lon = [truth_stationary(x, y, pset[0].time)[0] for x, y in zip(lon, lat)] + exp_lat = [truth_stationary(x, y, pset[0].time)[1] for x, y in zip(lon, lat)] assert np.allclose(pset.lon, exp_lon, rtol=rtol) assert np.allclose(pset.lat, exp_lat, rtol=rtol) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_stationary_eddy_vertical(mode, npart=1): lon = np.linspace(12000, 21000, npart) lat = np.linspace(10000, 20000, npart) @@ -348,31 +369,31 @@ def test_stationary_eddy_vertical(mode, npart=1): xdim = ydim = 100 lon_data = np.linspace(0, 25000, xdim, dtype=np.float32) lat_data = np.linspace(0, 25000, ydim, dtype=np.float32) - time_data = np.arange(0., 6*3600+1e-5, 60., dtype=np.float64) + time_data = np.arange(0.0, 6 * 3600 + 1e-5, 60.0, dtype=np.float64) fld1 = np.ones((xdim, ydim, 1), dtype=np.float32) * u_0 * np.cos(f * time_data) fld2 = np.ones((xdim, ydim, 1), dtype=np.float32) * -u_0 * np.sin(f * time_data) fldzero = np.zeros((xdim, ydim, 1), dtype=np.float32) * time_data - dimensions = {'lon': lon_data, 'lat': lat_data, 'time': time_data} - data = {'U': fld1, 'V': fldzero, 'W': fld2} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + dimensions = {"lon": lon_data, "lat": lat_data, "time": time_data} + data = {"U": fld1, "V": fldzero, "W": fld2} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lon, lat=lat, depth=depth) pset.execute(AdvectionRK4_3D, dt=dt, endtime=endtime) - exp_lon = [truth_stationary(x, z, pset[0].time)[0] for x, z, in zip(lon, depth)] - exp_depth = [truth_stationary(x, z, pset[0].time)[1] for x, z, in zip(lon, depth)] + exp_lon = [truth_stationary(x, z, pset[0].time)[0] for x, z in zip(lon, depth)] + exp_depth = [truth_stationary(x, z, pset[0].time)[1] for x, z in zip(lon, depth)] print(pset, exp_lon) assert np.allclose(pset.lon, exp_lon, rtol=1e-5) assert np.allclose(pset.lat, lat, rtol=1e-5) assert np.allclose(pset.depth, exp_depth, rtol=1e-5) - data = {'U': fldzero, 'V': fld2, 'W': fld1} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + data = {"U": fldzero, "V": fld2, "W": fld1} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lon, lat=lat, depth=depth) pset.execute(AdvectionRK4_3D, dt=dt, endtime=endtime) - exp_depth = [truth_stationary(z, y, pset[0].time)[0] for z, y, in zip(depth, lat)] - exp_lat = [truth_stationary(z, y, pset[0].time)[1] for z, y, in zip(depth, lat)] + exp_depth = [truth_stationary(z, y, pset[0].time)[0] for z, y in zip(depth, lat)] + exp_lat = [truth_stationary(z, y, pset[0].time)[1] for z, y in zip(depth, lat)] assert np.allclose(pset.lon, lon, rtol=1e-5) assert np.allclose(pset.lat, exp_lat, rtol=1e-5) assert np.allclose(pset.depth, exp_depth, rtol=1e-5) @@ -391,36 +412,44 @@ def fieldset_moving(xdim=100, ydim=100, maxtime=timedelta(hours=6)): Reference: N. Fabbroni, 2009, "Numerical simulations of passive tracers dispersion in the sea" """ - time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64) - dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32), - 'lat': np.linspace(0, 25000, ydim, dtype=np.float32), - 'time': time} - data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_g + (u_0 - u_g) * np.cos(f * time), - 'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.sin(f * time)} - return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - - -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('method, rtol, diffField', [ - ('EE', 1e-2, False), - ('AdvDiffEM', 1e-2, True), - ('AdvDiffM1', 1e-2, True), - ('RK4', 1e-5, False), - ('RK45', 1e-5, False)]) + time = np.arange(0.0, maxtime.total_seconds() + 1e-5, 60.0, dtype=np.float64) + dimensions = { + "lon": np.linspace(0, 25000, xdim, dtype=np.float32), + "lat": np.linspace(0, 25000, ydim, dtype=np.float32), + "time": time, + } + data = { + "U": np.ones((xdim, ydim, 1), dtype=np.float32) * u_g + (u_0 - u_g) * np.cos(f * time), + "V": np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.sin(f * time), + } + return FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + + +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "method, rtol, diffField", + [ + ("EE", 1e-2, False), + ("AdvDiffEM", 1e-2, True), + ("AdvDiffM1", 1e-2, True), + ("RK4", 1e-5, False), + ("RK45", 1e-5, False), + ], +) def test_moving_eddy(fieldset_moving, mode, method, rtol, diffField, npart=1): fieldset = fieldset_moving if diffField: - fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) - fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) - fieldset.add_constant('dres', 0.1) + fieldset.add_field(Field("Kh_zonal", np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) + fieldset.add_field(Field("Kh_meridional", np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) + fieldset.add_constant("dres", 0.1) lon = np.linspace(12000, 21000, npart) lat = np.linspace(12500, 12500, npart) dt = timedelta(minutes=3).total_seconds() endtime = timedelta(hours=6).total_seconds() - RK45Particles = ptype[mode].add_variable('next_dt', dtype=np.float32, initial=dt) + RK45Particles = ptype[mode].add_variable("next_dt", dtype=np.float32, initial=dt) - pclass = RK45Particles if method == 'RK45' else ptype[mode] + pclass = RK45Particles if method == "RK45" else ptype[mode] pset = ParticleSet(fieldset, pclass=pclass, lon=lon, lat=lat) pset.execute(kernel[method], dt=dt, endtime=endtime) @@ -431,12 +460,16 @@ def test_moving_eddy(fieldset_moving, mode, method, rtol, diffField, npart=1): def truth_decaying(x_0, y_0, t): - lat = y_0 - ((u_0 - u_g) * f / (f ** 2 + gamma ** 2) - * (1 - np.exp(-gamma * t) * (np.cos(f * t) + gamma / f * np.sin(f * t)))) - lon = x_0 + (u_g / gamma_g * (1 - np.exp(-gamma_g * t)) - + (u_0 - u_g) * f / (f ** 2 + gamma ** 2) - * (gamma / f + np.exp(-gamma * t) - * (math.sin(f * t) - gamma / f * math.cos(f * t)))) + lat = y_0 - ( + (u_0 - u_g) * f / (f**2 + gamma**2) * (1 - np.exp(-gamma * t) * (np.cos(f * t) + gamma / f * np.sin(f * t))) + ) + lon = x_0 + ( + u_g / gamma_g * (1 - np.exp(-gamma_g * t)) + + (u_0 - u_g) + * f + / (f**2 + gamma**2) + * (gamma / f + np.exp(-gamma * t) * (math.sin(f * t) - gamma / f * math.cos(f * t))) + ) return lon, lat @@ -447,45 +480,54 @@ def fieldset_decaying(xdim=100, ydim=100, maxtime=timedelta(hours=6)): Reference: N. Fabbroni, 2009, "Numerical simulations of passive tracers dispersion in the sea" """ - time = np.arange(0., maxtime.total_seconds()+1e-5, 60., dtype=np.float64) - dimensions = {'lon': np.linspace(0, 25000, xdim, dtype=np.float32), - 'lat': np.linspace(0, 25000, ydim, dtype=np.float32), - 'time': time} - data = {'U': np.ones((xdim, ydim, 1), dtype=np.float32) * u_g * np.exp(-gamma_g * time) + (u_0 - u_g) * np.exp(-gamma * time) * np.cos(f * time), - 'V': np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.exp(-gamma * time) * np.sin(f * time)} - return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - - -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('method, rtol, diffField', [ - ('EE', 1e-2, False), - ('AdvDiffEM', 1e-2, True), - ('AdvDiffM1', 1e-2, True), - ('RK4', 1e-5, False), - ('RK45', 1e-5, False), - ('AA', 1e-3, False)]) + time = np.arange(0.0, maxtime.total_seconds() + 1e-5, 60.0, dtype=np.float64) + dimensions = { + "lon": np.linspace(0, 25000, xdim, dtype=np.float32), + "lat": np.linspace(0, 25000, ydim, dtype=np.float32), + "time": time, + } + data = { + "U": np.ones((xdim, ydim, 1), dtype=np.float32) * u_g * np.exp(-gamma_g * time) + + (u_0 - u_g) * np.exp(-gamma * time) * np.cos(f * time), + "V": np.ones((xdim, ydim, 1), dtype=np.float32) * -(u_0 - u_g) * np.exp(-gamma * time) * np.sin(f * time), + } + return FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + + +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "method, rtol, diffField", + [ + ("EE", 1e-2, False), + ("AdvDiffEM", 1e-2, True), + ("AdvDiffM1", 1e-2, True), + ("RK4", 1e-5, False), + ("RK45", 1e-5, False), + ("AA", 1e-3, False), + ], +) def test_decaying_eddy(fieldset_decaying, mode, method, rtol, diffField, npart=1): fieldset = fieldset_decaying - if method == 'AA': - if mode == 'jit': + if method == "AA": + if mode == "jit": return # AnalyticalAdvection not implemented in JIT else: # needed for AnalyticalAdvection to work, but comes at expense of accuracy - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" if diffField: - fieldset.add_field(Field('Kh_zonal', np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) - fieldset.add_field(Field('Kh_meridional', np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) - fieldset.add_constant('dres', 0.1) + fieldset.add_field(Field("Kh_zonal", np.zeros(fieldset.U.data.shape), grid=fieldset.U.grid)) + fieldset.add_field(Field("Kh_meridional", np.zeros(fieldset.V.data.shape), grid=fieldset.V.grid)) + fieldset.add_constant("dres", 0.1) lon = np.linspace(12000, 21000, npart) lat = np.linspace(12500, 12500, npart) dt = timedelta(minutes=3).total_seconds() endtime = timedelta(hours=6).total_seconds() - RK45Particles = ptype[mode].add_variable('next_dt', dtype=np.float32, initial=dt) + RK45Particles = ptype[mode].add_variable("next_dt", dtype=np.float32, initial=dt) - pclass = RK45Particles if method == 'RK45' else ptype[mode] + pclass = RK45Particles if method == "RK45" else ptype[mode] pset = ParticleSet(fieldset, pclass=pclass, lon=lon, lat=lat) pset.execute(kernel[method], dt=dt, endtime=endtime) @@ -495,24 +537,24 @@ def test_decaying_eddy(fieldset_decaying, mode, method, rtol, diffField, npart=1 assert np.allclose(pset.lat, exp_lat, rtol=rtol) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_analyticalAgrid(mode): lon = np.arange(0, 15, dtype=np.float32) lat = np.arange(0, 15, dtype=np.float32) U = np.ones((lat.size, lon.size), dtype=np.float32) V = np.ones((lat.size, lon.size), dtype=np.float32) - fieldset = FieldSet.from_data({'U': U, 'V': V}, {'lon': lon, 'lat': lat}, mesh='flat') + fieldset = FieldSet.from_data({"U": U, "V": V}, {"lon": lon, "lat": lat}, mesh="flat") pset = ParticleSet(fieldset, pclass=ptype[mode], lon=1, lat=1) with pytest.raises(NotImplementedError): pset.execute(AdvectionAnalytical, runtime=1) -@pytest.mark.parametrize('mode', ['scipy']) # JIT not implemented -@pytest.mark.parametrize('u', [1, -0.2, -0.3, 0]) -@pytest.mark.parametrize('v', [1, -0.3, 0, -1]) -@pytest.mark.parametrize('w', [None, 1, -0.3, 0, -1]) -@pytest.mark.parametrize('direction', [1, -1]) +@pytest.mark.parametrize("mode", ["scipy"]) # JIT not implemented +@pytest.mark.parametrize("u", [1, -0.2, -0.3, 0]) +@pytest.mark.parametrize("v", [1, -0.3, 0, -1]) +@pytest.mark.parametrize("w", [None, 1, -0.3, 0, -1]) +@pytest.mark.parametrize("direction", [1, -1]) def test_uniform_analytical(mode, u, v, w, direction, tmpdir): lon = np.arange(0, 15, dtype=np.float32) lat = np.arange(0, 15, dtype=np.float32) @@ -521,30 +563,29 @@ def test_uniform_analytical(mode, u, v, w, direction, tmpdir): U = u * np.ones((depth.size, lat.size, lon.size), dtype=np.float32) V = v * np.ones((depth.size, lat.size, lon.size), dtype=np.float32) W = w * np.ones((depth.size, lat.size, lon.size), dtype=np.float32) - fieldset = FieldSet.from_data({'U': U, 'V': V, 'W': W}, {'lon': lon, 'lat': lat, 'depth': depth}, mesh='flat') - fieldset.W.interp_method = 'cgrid_velocity' + fieldset = FieldSet.from_data({"U": U, "V": V, "W": W}, {"lon": lon, "lat": lat, "depth": depth}, mesh="flat") + fieldset.W.interp_method = "cgrid_velocity" else: U = u * np.ones((lat.size, lon.size), dtype=np.float32) V = v * np.ones((lat.size, lon.size), dtype=np.float32) - fieldset = FieldSet.from_data({'U': U, 'V': V}, {'lon': lon, 'lat': lat}, mesh='flat') - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' + fieldset = FieldSet.from_data({"U": U, "V": V}, {"lon": lon, "lat": lat}, mesh="flat") + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" x0, y0, z0 = 6.1, 6.2, 20 pset = ParticleSet(fieldset, pclass=ptype[mode], lon=x0, lat=y0, depth=z0) outfile_path = tmpdir.join("uniformanalytical.zarr") outfile = pset.ParticleFile(name=outfile_path, outputdt=1, chunks=(1, 1)) - pset.execute(AdvectionAnalytical, runtime=4, dt=direction, - output_file=outfile) + pset.execute(AdvectionAnalytical, runtime=4, dt=direction, output_file=outfile) assert np.abs(pset.lon - x0 - pset.time * u) < 1e-6 assert np.abs(pset.lat - y0 - pset.time * v) < 1e-6 if w: assert np.abs(pset.depth - z0 - pset.time * w) < 1e-4 ds = xr.open_zarr(outfile_path) - times = (direction*ds['time'][:]).values.astype('timedelta64[s]')[0] - timeref = np.arange(1, 5).astype('timedelta64[s]') - assert np.allclose(times, timeref, atol=np.timedelta64(1, 'ms')) - lons = ds['lon'][:].values - assert np.allclose(lons, x0+direction*u*np.arange(1, 5)) + times = (direction * ds["time"][:]).values.astype("timedelta64[s]")[0] + timeref = np.arange(1, 5).astype("timedelta64[s]") + assert np.allclose(times, timeref, atol=np.timedelta64(1, "ms")) + lons = ds["lon"][:].values + assert np.allclose(lons, x0 + direction * u * np.arange(1, 5)) diff --git a/tests/test_data/create_testfields.py b/tests/test_data/create_testfields.py index ce759432..3c7e783f 100644 --- a/tests/test_data/create_testfields.py +++ b/tests/test_data/create_testfields.py @@ -25,21 +25,24 @@ scalefac = 2.0 def generate_testfieldset(xdim, ydim, zdim, tdim): - lon = np.linspace(0., 2., xdim, dtype=np.float32) - lat = np.linspace(0., 1., ydim, dtype=np.float32) - depth = np.linspace(0., 0.5, zdim, dtype=np.float32) - time = np.linspace(0., tdim, tdim, dtype=np.float64) + lon = np.linspace(0.0, 2.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32) + depth = np.linspace(0.0, 0.5, zdim, dtype=np.float32) + time = np.linspace(0.0, tdim, tdim, dtype=np.float64) U = np.ones((xdim, ydim, zdim, tdim), dtype=np.float32) V = np.zeros((xdim, ydim, zdim, tdim), dtype=np.float32) - P = 2.*np.ones((xdim, ydim, zdim, tdim), dtype=np.float32) - data = {'U': U, 'V': V, 'P': P} - dimensions = {'lon': lon, 'lat': lat, 'depth': depth, 'time': time} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - fieldset.write('testfields') + P = 2.0 * np.ones((xdim, ydim, zdim, tdim), dtype=np.float32) + data = {"U": U, "V": V, "P": P} + dimensions = {"lon": lon, "lat": lat, "depth": depth, "time": time} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + fieldset.write("testfields") def generate_perlin_testfield(): - img_shape = (int(math.pow(2, noctaves)) * perlinres[0] * shapescale[0], int(math.pow(2, noctaves)) * perlinres[1] * shapescale[1]) + img_shape = ( + int(math.pow(2, noctaves)) * perlinres[0] * shapescale[0], + int(math.pow(2, noctaves)) * perlinres[1] * shapescale[1], + ) # Coordinates of the test fieldset (on A-grid in deg) lon = np.linspace(-180.0, 180.0, img_shape[0], dtype=np.float32) @@ -53,21 +56,21 @@ def generate_perlin_testfield(): U = PERLIN.generate_fractal_noise_2d(img_shape, perlinres, noctaves, perlin_persistence) * scalefac V = PERLIN.generate_fractal_noise_2d(img_shape, perlinres, noctaves, perlin_persistence) * scalefac else: - U = np.ones(img_shape, dtype=np.float32)*scalefac - V = np.ones(img_shape, dtype=np.float32)*scalefac + U = np.ones(img_shape, dtype=np.float32) * scalefac + V = np.ones(img_shape, dtype=np.float32) * scalefac U = np.transpose(U, (1, 0)) U = np.expand_dims(U, 0) V = np.transpose(V, (1, 0)) V = np.expand_dims(V, 0) - data = {'U': U, 'V': V} - dimensions = {'time': time, 'lon': lon, 'lat': lat} + data = {"U": U, "V": V} + dimensions = {"time": time, "lon": lon, "lat": lat} if asizeof is not None: print(f"Perlin U-field requires {U.size * U.itemsize} bytes of memory.") print(f"Perlin V-field requires {V.size * V.itemsize} bytes of memory.") - fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=False) + fieldset = FieldSet.from_data(data, dimensions, mesh="spherical", transpose=False) # fieldset.write("perlinfields") # can also be used, but then has a ghost depth dimension - write_simple_2Dt(fieldset.U, os.path.join(os.path.dirname(__file__), 'perlinfields'), varname='vozocrtx') - write_simple_2Dt(fieldset.V, os.path.join(os.path.dirname(__file__), 'perlinfields'), varname='vomecrty') + write_simple_2Dt(fieldset.U, os.path.join(os.path.dirname(__file__), "perlinfields"), varname="vozocrtx") + write_simple_2Dt(fieldset.V, os.path.join(os.path.dirname(__file__), "perlinfields"), varname="vomecrty") def write_simple_2Dt(field, filename, varname=None): @@ -82,33 +85,36 @@ def write_simple_2Dt(field, filename, varname=None): varname : str, optional Name of the variable to write to file. If None, defaults to field.name """ - filepath = str(f'{filename}{field.name}.nc') + filepath = str(f"{filename}{field.name}.nc") if varname is None: varname = field.name # Create DataArray objects for file I/O if field.grid.gtype == GridType.RectilinearZGrid: - nav_lon = xr.DataArray(field.grid.lon + np.zeros((field.grid.ydim, field.grid.xdim), dtype=np.float32), - coords=[('y', field.grid.lat), ('x', field.grid.lon)]) - nav_lat = xr.DataArray(field.grid.lat.reshape(field.grid.ydim, 1) + np.zeros(field.grid.xdim, dtype=np.float32), - coords=[('y', field.grid.lat), ('x', field.grid.lon)]) + nav_lon = xr.DataArray( + field.grid.lon + np.zeros((field.grid.ydim, field.grid.xdim), dtype=np.float32), + coords=[("y", field.grid.lat), ("x", field.grid.lon)], + ) + nav_lat = xr.DataArray( + field.grid.lat.reshape(field.grid.ydim, 1) + np.zeros(field.grid.xdim, dtype=np.float32), + coords=[("y", field.grid.lat), ("x", field.grid.lon)], + ) elif field.grid.gtype == GridType.CurvilinearZGrid: - nav_lon = xr.DataArray(field.grid.lon, coords=[('y', range(field.grid.ydim)), ('x', range(field.grid.xdim))]) - nav_lat = xr.DataArray(field.grid.lat, coords=[('y', range(field.grid.ydim)), ('x', range(field.grid.xdim))]) + nav_lon = xr.DataArray(field.grid.lon, coords=[("y", range(field.grid.ydim)), ("x", range(field.grid.xdim))]) + nav_lat = xr.DataArray(field.grid.lat, coords=[("y", range(field.grid.ydim)), ("x", range(field.grid.xdim))]) else: - raise NotImplementedError('Field.write only implemented for RectilinearZGrid and CurvilinearZGrid') - - attrs = {'units': 'seconds since ' + str(field.grid.time_origin)} if field.grid.time_origin.calendar else {} - time_counter = xr.DataArray(field.grid.time, - dims=['time_counter'], - attrs=attrs) - vardata = xr.DataArray(field.data.reshape((field.grid.tdim, field.grid.ydim, field.grid.xdim)), - dims=['time_counter', 'y', 'x']) + raise NotImplementedError("Field.write only implemented for RectilinearZGrid and CurvilinearZGrid") + + attrs = {"units": "seconds since " + str(field.grid.time_origin)} if field.grid.time_origin.calendar else {} + time_counter = xr.DataArray(field.grid.time, dims=["time_counter"], attrs=attrs) + vardata = xr.DataArray( + field.data.reshape((field.grid.tdim, field.grid.ydim, field.grid.xdim)), dims=["time_counter", "y", "x"] + ) # Create xarray Dataset and output to netCDF format - attrs = {'parcels_mesh': field.grid.mesh} - dset = xr.Dataset({varname: vardata}, coords={'nav_lon': nav_lon, - 'nav_lat': nav_lat, - 'time_counter': time_counter}, attrs=attrs) + attrs = {"parcels_mesh": field.grid.mesh} + dset = xr.Dataset( + {varname: vardata}, coords={"nav_lon": nav_lon, "nav_lat": nav_lat, "time_counter": time_counter}, attrs=attrs + ) dset.to_netcdf(filepath) if asizeof is not None: mem = 0 diff --git a/tests/test_diffusion.py b/tests/test_diffusion.py index 132df3c0..da67dd1e 100644 --- a/tests/test_diffusion.py +++ b/tests/test_diffusion.py @@ -17,24 +17,23 @@ from parcels import ( ScipyParticle, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} -def zeros_fieldset(mesh='spherical', xdim=200, ydim=100, mesh_conversion=1): +def zeros_fieldset(mesh="spherical", xdim=200, ydim=100, mesh_conversion=1): """Generates a zero velocity field.""" - lon = np.linspace(-1e5*mesh_conversion, 1e5*mesh_conversion, xdim, dtype=np.float32) - lat = np.linspace(-1e5*mesh_conversion, 1e5*mesh_conversion, ydim, dtype=np.float32) + lon = np.linspace(-1e5 * mesh_conversion, 1e5 * mesh_conversion, xdim, dtype=np.float32) + lat = np.linspace(-1e5 * mesh_conversion, 1e5 * mesh_conversion, ydim, dtype=np.float32) - dimensions = {'lon': lon, 'lat': lat} - data = {'U': np.zeros((ydim, xdim), dtype=np.float32), - 'V': np.zeros((ydim, xdim), dtype=np.float32)} + dimensions = {"lon": lon, "lat": lat} + data = {"U": np.zeros((ydim, xdim), dtype=np.float32), "V": np.zeros((ydim, xdim), dtype=np.float32)} return FieldSet.from_data(data, dimensions, mesh=mesh) -@pytest.mark.parametrize('mesh', ['spherical', 'flat']) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mesh", ["spherical", "flat"]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldKh_Brownian(mesh, mode, xdim=200, ydim=100, kh_zonal=100, kh_meridional=50): - mesh_conversion = 1/1852./60 if mesh == 'spherical' else 1 + mesh_conversion = 1 / 1852.0 / 60 if mesh == "spherical" else 1 fieldset = zeros_fieldset(mesh=mesh, xdim=xdim, ydim=ydim, mesh_conversion=mesh_conversion) fieldset.add_constant_field("Kh_zonal", kh_zonal, mesh=mesh) @@ -47,36 +46,35 @@ def test_fieldKh_Brownian(mesh, mode, xdim=200, ydim=100, kh_zonal=100, kh_merid pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart)) pset.execute(pset.Kernel(DiffusionUniformKh), runtime=runtime, dt=timedelta(hours=1)) - expected_std_lon = np.sqrt(2*kh_zonal*mesh_conversion**2*runtime.total_seconds()) - expected_std_lat = np.sqrt(2*kh_meridional*mesh_conversion**2*runtime.total_seconds()) + expected_std_lon = np.sqrt(2 * kh_zonal * mesh_conversion**2 * runtime.total_seconds()) + expected_std_lat = np.sqrt(2 * kh_meridional * mesh_conversion**2 * runtime.total_seconds()) lats = pset.lat lons = pset.lon - tol = 200*mesh_conversion # effectively 200 m errors + tol = 200 * mesh_conversion # effectively 200 m errors assert np.allclose(np.std(lats), expected_std_lat, atol=tol) assert np.allclose(np.std(lons), expected_std_lon, atol=tol) assert np.allclose(np.mean(lons), 0, atol=tol) assert np.allclose(np.mean(lats), 0, atol=tol) -@pytest.mark.parametrize('mesh', ['spherical', 'flat']) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('kernel', [AdvectionDiffusionM1, - AdvectionDiffusionEM]) +@pytest.mark.parametrize("mesh", ["spherical", "flat"]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("kernel", [AdvectionDiffusionM1, AdvectionDiffusionEM]) def test_fieldKh_SpatiallyVaryingDiffusion(mesh, mode, kernel, xdim=200, ydim=100): """Test advection-diffusion kernels on a non-uniform diffusivity field with a linear gradient in one direction.""" - mesh_conversion = 1/1852./60 if mesh == 'spherical' else 1 + mesh_conversion = 1 / 1852.0 / 60 if mesh == "spherical" else 1 fieldset = zeros_fieldset(mesh=mesh, xdim=xdim, ydim=ydim, mesh_conversion=mesh_conversion) Kh = np.zeros((ydim, xdim), dtype=np.float32) for x in range(xdim): - Kh[:, x] = np.tanh(fieldset.U.lon[x]/fieldset.U.lon[-1]*10.)*xdim/2.+xdim/2. + 100. + Kh[:, x] = np.tanh(fieldset.U.lon[x] / fieldset.U.lon[-1] * 10.0) * xdim / 2.0 + xdim / 2.0 + 100.0 grid = RectilinearZGrid(lon=fieldset.U.lon, lat=fieldset.U.lat, mesh=mesh) - fieldset.add_field(Field('Kh_zonal', Kh, grid=grid)) - fieldset.add_field(Field('Kh_meridional', Kh, grid=grid)) - fieldset.add_constant('dres', fieldset.U.lon[1]-fieldset.U.lon[0]) + fieldset.add_field(Field("Kh_zonal", Kh, grid=grid)) + fieldset.add_field(Field("Kh_meridional", Kh, grid=grid)) + fieldset.add_constant("dres", fieldset.U.lon[1] - fieldset.U.lon[0]) npart = 100 runtime = timedelta(days=1) @@ -87,14 +85,14 @@ def test_fieldKh_SpatiallyVaryingDiffusion(mesh, mode, kernel, xdim=200, ydim=10 lats = pset.lat lons = pset.lon - tol = 2000*mesh_conversion # effectively 2000 m errors (because of low numbers of particles) + tol = 2000 * mesh_conversion # effectively 2000 m errors (because of low numbers of particles) assert np.allclose(np.mean(lons), 0, atol=tol) assert np.allclose(np.mean(lats), 0, atol=tol) assert stats.skew(lons) > stats.skew(lats) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('lambd', [1, 5]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("lambd", [1, 5]) def test_randomexponential(mode, lambd, npart=1000): fieldset = zeros_fieldset() @@ -104,8 +102,9 @@ def test_randomexponential(mode, lambd, npart=1000): # Set random seed ParcelsRandom.seed(1234) - pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], - lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart)) + pset = ParticleSet( + fieldset=fieldset, pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart) + ) def vertical_randomexponential(particle, fieldset, time): # Kernel for random exponential variable in depth direction @@ -114,13 +113,13 @@ def test_randomexponential(mode, lambd, npart=1000): pset.execute(vertical_randomexponential, runtime=1, dt=1) depth = pset.depth - expected_mean = 1./fieldset.lambd - assert np.allclose(np.mean(depth), expected_mean, rtol=.1) + expected_mean = 1.0 / fieldset.lambd + assert np.allclose(np.mean(depth), expected_mean, rtol=0.1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('mu', [0.8*np.pi, np.pi]) -@pytest.mark.parametrize('kappa', [2, 4]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("mu", [0.8 * np.pi, np.pi]) +@pytest.mark.parametrize("kappa", [2, 4]) def test_randomvonmises(mode, mu, kappa, npart=10000): fieldset = zeros_fieldset() @@ -131,8 +130,10 @@ def test_randomvonmises(mode, mu, kappa, npart=10000): # Set random seed ParcelsRandom.seed(1234) - AngleParticle = ptype[mode].add_variable('angle') - pset = ParticleSet(fieldset=fieldset, pclass=AngleParticle, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart)) + AngleParticle = ptype[mode].add_variable("angle") + pset = ParticleSet( + fieldset=fieldset, pclass=AngleParticle, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart) + ) def vonmises(particle, fieldset, time): particle.angle = ParcelsRandom.vonmisesvariate(fieldset.mu, fieldset.kappa) @@ -141,8 +142,8 @@ def test_randomvonmises(mode, mu, kappa, npart=10000): angles = np.array([p.angle for p in pset]) - assert np.allclose(np.mean(angles), mu, atol=.1) + assert np.allclose(np.mean(angles), mu, atol=0.1) vonmises_mean = stats.vonmises.mean(kappa=kappa, loc=mu) - assert np.allclose(np.mean(angles), vonmises_mean, atol=.1) + assert np.allclose(np.mean(angles), vonmises_mean, atol=0.1) vonmises_var = stats.vonmises.var(kappa=kappa, loc=mu) - assert np.allclose(np.var(angles), vonmises_var, atol=.1) + assert np.allclose(np.var(angles), vonmises_var, atol=0.1) diff --git a/tests/test_fieldset.py b/tests/test_fieldset.py index b15d3028..974c0261 100644 --- a/tests/test_fieldset.py +++ b/tests/test_fieldset.py @@ -33,36 +33,36 @@ from parcels.tools.converters import ( _get_cftime_datetimes, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def generate_fieldset(xdim, ydim, zdim=1, tdim=1): - lon = np.linspace(0., 10., xdim, dtype=np.float32) - lat = np.linspace(0., 10., ydim, dtype=np.float32) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float32) depth = np.zeros(zdim, dtype=np.float32) time = np.zeros(tdim, dtype=np.float64) if zdim == 1 and tdim == 1: U, V = np.meshgrid(lon, lat) - dimensions = {'lat': lat, 'lon': lon} + dimensions = {"lat": lat, "lon": lon} else: U = np.ones((tdim, zdim, ydim, xdim)) V = np.ones((tdim, zdim, ydim, xdim)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time} - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth, "time": time} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} return (data, dimensions) -@pytest.mark.parametrize('xdim', [100, 200]) -@pytest.mark.parametrize('ydim', [100, 200]) +@pytest.mark.parametrize("xdim", [100, 200]) +@pytest.mark.parametrize("ydim", [100, 200]) def test_fieldset_from_data(xdim, ydim): """Simple test for fieldset initialisation from data.""" data, dimensions = generate_fieldset(xdim, ydim) fieldset = FieldSet.from_data(data, dimensions) assert len(fieldset.U.data.shape) == 3 assert len(fieldset.V.data.shape) == 3 - assert np.allclose(fieldset.U.data[0, :], data['U'], rtol=1e-12) - assert np.allclose(fieldset.V.data[0, :], data['V'], rtol=1e-12) + assert np.allclose(fieldset.U.data[0, :], data["U"], rtol=1e-12) + assert np.allclose(fieldset.V.data[0, :], data["V"], rtol=1e-12) def test_fieldset_extra_syntax(): @@ -76,52 +76,54 @@ def test_fieldset_extra_syntax(): def test_fieldset_vmin_vmax(): data, dimensions = generate_fieldset(11, 11) fieldset = FieldSet.from_data(data, dimensions, vmin=3, vmax=7) - assert np.isclose(np.amin(fieldset.U.data[fieldset.U.data > 0.]), 3) + assert np.isclose(np.amin(fieldset.U.data[fieldset.U.data > 0.0]), 3) assert np.isclose(np.amax(fieldset.U.data), 7) -@pytest.mark.parametrize('ttype', ['float', 'datetime64']) -@pytest.mark.parametrize('tdim', [1, 20]) +@pytest.mark.parametrize("ttype", ["float", "datetime64"]) +@pytest.mark.parametrize("tdim", [1, 20]) def test_fieldset_from_data_timedims(ttype, tdim): data, dimensions = generate_fieldset(10, 10, tdim=tdim) - if ttype == 'float': - dimensions['time'] = np.linspace(0, 5, tdim) + if ttype == "float": + dimensions["time"] = np.linspace(0, 5, tdim) else: - dimensions['time'] = [np.datetime64('2018-01-01') + np.timedelta64(t, 'D') for t in range(tdim)] + dimensions["time"] = [np.datetime64("2018-01-01") + np.timedelta64(t, "D") for t in range(tdim)] fieldset = FieldSet.from_data(data, dimensions) - for i, dtime in enumerate(dimensions['time']): + for i, dtime in enumerate(dimensions["time"]): assert fieldset.U.grid.time_origin.fulltime(fieldset.U.grid.time[i]) == dtime -@pytest.mark.parametrize('xdim', [100, 200]) -@pytest.mark.parametrize('ydim', [100, 50]) +@pytest.mark.parametrize("xdim", [100, 200]) +@pytest.mark.parametrize("ydim", [100, 50]) def test_fieldset_from_data_different_dimensions(xdim, ydim, zdim=4, tdim=2): """Test for fieldset initialisation from data using dict-of-dict for dimensions.""" - lon = np.linspace(0., 1., xdim, dtype=np.float32) - lat = np.linspace(0., 1., ydim, dtype=np.float32) + lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32) depth = np.zeros(zdim, dtype=np.float32) time = np.zeros(tdim, dtype=np.float64) U = np.zeros((xdim, ydim), dtype=np.float32) V = np.ones((xdim, ydim), dtype=np.float32) - P = 2 * np.ones((int(xdim/2), int(ydim/2), zdim, tdim), dtype=np.float32) - data = {'U': U, 'V': V, 'P': P} - dimensions = {'U': {'lat': lat, 'lon': lon}, - 'V': {'lat': lat, 'lon': lon}, - 'P': {'lat': lat[0::2], 'lon': lon[0::2], 'depth': depth, 'time': time}} + P = 2 * np.ones((int(xdim / 2), int(ydim / 2), zdim, tdim), dtype=np.float32) + data = {"U": U, "V": V, "P": P} + dimensions = { + "U": {"lat": lat, "lon": lon}, + "V": {"lat": lat, "lon": lon}, + "P": {"lat": lat[0::2], "lon": lon[0::2], "depth": depth, "time": time}, + } fieldset = FieldSet.from_data(data, dimensions, transpose=True) assert len(fieldset.U.data.shape) == 3 assert len(fieldset.V.data.shape) == 3 assert len(fieldset.P.data.shape) == 4 - assert fieldset.P.data.shape == (tdim, zdim, ydim/2, xdim/2) - assert np.allclose(fieldset.U.data, 0., rtol=1e-12) - assert np.allclose(fieldset.V.data, 1., rtol=1e-12) - assert np.allclose(fieldset.P.data, 2., rtol=1e-12) + assert fieldset.P.data.shape == (tdim, zdim, ydim / 2, xdim / 2) + assert np.allclose(fieldset.U.data, 0.0, rtol=1e-12) + assert np.allclose(fieldset.V.data, 1.0, rtol=1e-12) + assert np.allclose(fieldset.P.data, 2.0, rtol=1e-12) -@pytest.mark.parametrize('xdim', [100, 200]) -@pytest.mark.parametrize('ydim', [100, 200]) -def test_fieldset_from_parcels(xdim, ydim, tmpdir, filename='test_parcels'): +@pytest.mark.parametrize("xdim", [100, 200]) +@pytest.mark.parametrize("ydim", [100, 200]) +def test_fieldset_from_parcels(xdim, ydim, tmpdir, filename="test_parcels"): """Simple test for fieldset initialisation from Parcels FieldSet file format.""" filepath = tmpdir.join(filename) data, dimensions = generate_fieldset(xdim, ydim) @@ -130,44 +132,44 @@ def test_fieldset_from_parcels(xdim, ydim, tmpdir, filename='test_parcels'): fieldset = FieldSet.from_parcels(filepath) assert len(fieldset.U.data.shape) == 3 # Will be 4 once we use depth assert len(fieldset.V.data.shape) == 3 - assert np.allclose(fieldset.U.data[0, :], data['U'], rtol=1e-12) - assert np.allclose(fieldset.V.data[0, :], data['V'], rtol=1e-12) + assert np.allclose(fieldset.U.data[0, :], data["U"], rtol=1e-12) + assert np.allclose(fieldset.V.data[0, :], data["V"], rtol=1e-12) def test_field_from_netcdf_variables(): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') - filename = data_path + 'perlinfieldsU.nc' - dims = {'lon': 'x', 'lat': 'y'} + data_path = os.path.join(os.path.dirname(__file__), "test_data/") + filename = data_path + "perlinfieldsU.nc" + dims = {"lon": "x", "lat": "y"} - variable = 'vozocrtx' + variable = "vozocrtx" f1 = Field.from_netcdf(filename, variable, dims) - variable = ('U', 'vozocrtx') + variable = ("U", "vozocrtx") f2 = Field.from_netcdf(filename, variable, dims) - variable = {'U': 'vozocrtx'} + variable = {"U": "vozocrtx"} f3 = Field.from_netcdf(filename, variable, dims) assert np.allclose(f1.data, f2.data, atol=1e-12) assert np.allclose(f1.data, f3.data, atol=1e-12) with pytest.raises(AssertionError): - variable = {'U': 'vozocrtx', 'nav_lat': 'nav_lat'} # multiple variables will fail + variable = {"U": "vozocrtx", "nav_lat": "nav_lat"} # multiple variables will fail f3 = Field.from_netcdf(filename, variable, dims) -@pytest.mark.parametrize('calendar, cftime_datetime', - zip(_get_cftime_calendars(), - _get_cftime_datetimes())) -def test_fieldset_nonstandardtime(calendar, cftime_datetime, tmpdir, filename='test_nonstandardtime.nc', xdim=4, ydim=6): +@pytest.mark.parametrize("calendar, cftime_datetime", zip(_get_cftime_calendars(), _get_cftime_datetimes())) +def test_fieldset_nonstandardtime( + calendar, cftime_datetime, tmpdir, filename="test_nonstandardtime.nc", xdim=4, ydim=6 +): filepath = tmpdir.join(filename) dates = [getattr(cftime, cftime_datetime)(1, m, 1) for m in range(1, 13)] - da = xr.DataArray(np.random.rand(12, xdim, ydim), - coords=[dates, range(xdim), range(ydim)], - dims=['time', 'lon', 'lat'], name='U') + da = xr.DataArray( + np.random.rand(12, xdim, ydim), coords=[dates, range(xdim), range(ydim)], dims=["time", "lon", "lat"], name="U" + ) da.to_netcdf(str(filepath)) - dims = {'lon': 'lon', 'lat': 'lat', 'time': 'time'} + dims = {"lon": "lon", "lat": "lat", "time": "time"} try: - field = Field.from_netcdf(filepath, 'U', dims) + field = Field.from_netcdf(filepath, "U", dims) except NotImplementedError: field = None @@ -175,98 +177,110 @@ def test_fieldset_nonstandardtime(calendar, cftime_datetime, tmpdir, filename='t assert field.grid.time_origin.calendar == calendar -@pytest.mark.parametrize('with_timestamps', [True, False]) +@pytest.mark.parametrize("with_timestamps", [True, False]) def test_field_from_netcdf(with_timestamps): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') + data_path = os.path.join(os.path.dirname(__file__), "test_data/") - filenames = {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc'} - variable = 'U' - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + filenames = { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", + } + variable = "U" + dimensions = {"lon": "glamf", "lat": "gphif"} if with_timestamps: - timestamp_types = [[[2]], [[np.datetime64('2000-01-01')]]] + timestamp_types = [[[2]], [[np.datetime64("2000-01-01")]]] for timestamps in timestamp_types: - Field.from_netcdf(filenames, variable, dimensions, interp_method='cgrid_velocity', timestamps=timestamps) + Field.from_netcdf(filenames, variable, dimensions, interp_method="cgrid_velocity", timestamps=timestamps) else: - Field.from_netcdf(filenames, variable, dimensions, interp_method='cgrid_velocity') + Field.from_netcdf(filenames, variable, dimensions, interp_method="cgrid_velocity") def test_fieldset_from_modulefile(): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') - fieldset = FieldSet.from_modulefile(data_path + 'fieldset_nemo.py') - assert fieldset.U.creation_log == 'from_nemo' + data_path = os.path.join(os.path.dirname(__file__), "test_data/") + fieldset = FieldSet.from_modulefile(data_path + "fieldset_nemo.py") + assert fieldset.U.creation_log == "from_nemo" - indices = {'lon': range(6, 10)} - fieldset = FieldSet.from_modulefile(data_path + 'fieldset_nemo.py', indices=indices) + indices = {"lon": range(6, 10)} + fieldset = FieldSet.from_modulefile(data_path + "fieldset_nemo.py", indices=indices) assert fieldset.U.grid.lon.shape[1] == 4 with pytest.raises(IOError): - FieldSet.from_modulefile(data_path + 'fieldset_nemo_error.py') + FieldSet.from_modulefile(data_path + "fieldset_nemo_error.py") - FieldSet.from_modulefile(data_path + 'fieldset_nemo_error.py', modulename='random_function_name') + FieldSet.from_modulefile(data_path + "fieldset_nemo_error.py", modulename="random_function_name") with pytest.raises(IOError): - FieldSet.from_modulefile(data_path + 'fieldset_nemo_error.py', modulename='none_returning_function') + FieldSet.from_modulefile(data_path + "fieldset_nemo_error.py", modulename="none_returning_function") def test_field_from_netcdf_fieldtypes(): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') + data_path = os.path.join(os.path.dirname(__file__), "test_data/") - filenames = {'varU': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc'}, - 'varV': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Vv_eastward_nemo_cross_180lon.nc'}} - variables = {'varU': 'U', 'varV': 'V'} - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + filenames = { + "varU": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", + }, + "varV": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Vv_eastward_nemo_cross_180lon.nc", + }, + } + variables = {"varU": "U", "varV": "V"} + dimensions = {"lon": "glamf", "lat": "gphif"} # first try without setting fieldtype fset = FieldSet.from_nemo(filenames, variables, dimensions) assert isinstance(fset.varU.units, UnitConverter) # now try with setting fieldtype - fset = FieldSet.from_nemo(filenames, variables, dimensions, fieldtype={'varU': 'U', 'varV': 'V'}) + fset = FieldSet.from_nemo(filenames, variables, dimensions, fieldtype={"varU": "U", "varV": "V"}) assert isinstance(fset.varU.units, GeographicPolar) + def test_fieldset_from_agrid_dataset(): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') + data_path = os.path.join(os.path.dirname(__file__), "test_data/") filenames = { - 'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc' + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", } - variable = {'U': 'U'} - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + variable = {"U": "U"} + dimensions = {"lon": "glamf", "lat": "gphif"} FieldSet.from_a_grid_dataset(filenames, variable, dimensions) + def test_fieldset_from_cgrid_interpmethod(): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') + data_path = os.path.join(os.path.dirname(__file__), "test_data/") - filenames = {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc'} - variable = 'U' - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + filenames = { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", + } + variable = "U" + dimensions = {"lon": "glamf", "lat": "gphif"} with pytest.raises(TypeError): # should fail because FieldSet.from_c_grid_dataset does not support interp_method - FieldSet.from_c_grid_dataset(filenames, variable, dimensions, interp_method='partialslip') + FieldSet.from_c_grid_dataset(filenames, variable, dimensions, interp_method="partialslip") -@pytest.mark.parametrize('cast_data_dtype', ['float32', 'float64']) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("cast_data_dtype", ["float32", "float64"]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_float64(cast_data_dtype, mode, tmpdir, xdim=10, ydim=5): - lon = np.linspace(0., 10., xdim, dtype=np.float64) - lat = np.linspace(0., 10., ydim, dtype=np.float64) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float64) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float64) U, V = np.meshgrid(lon, lat) - dimensions = {'lat': lat, 'lon': lon} - data = {'U': np.array(U, dtype=np.float64), 'V': np.array(V, dtype=np.float64)} + dimensions = {"lat": lat, "lon": lon} + data = {"U": np.array(U, dtype=np.float64), "V": np.array(V, dtype=np.float64)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', cast_data_dtype=cast_data_dtype) - if cast_data_dtype == 'float32': + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", cast_data_dtype=cast_data_dtype) + if cast_data_dtype == "float32": assert fieldset.U.data.dtype == np.float32 else: assert fieldset.U.data.dtype == np.float64 @@ -277,124 +291,130 @@ def test_fieldset_float64(cast_data_dtype, mode, tmpdir, xdim=10, ydim=5): pset.execute(AdvectionRK4, runtime=2) except RuntimeError: failed = True - if mode == 'jit' and cast_data_dtype == 'float64': + if mode == "jit" and cast_data_dtype == "float64": assert failed else: assert np.isclose(pset[0].lon, 2.70833) assert np.isclose(pset[0].lat, 5.41667) - filepath = tmpdir.join('test_fieldset_float64') + filepath = tmpdir.join("test_fieldset_float64") fieldset.U.write(filepath) - da = xr.open_dataset(str(filepath)+'U.nc') - if cast_data_dtype == 'float32': - assert da['U'].dtype == np.float32 + da = xr.open_dataset(str(filepath) + "U.nc") + if cast_data_dtype == "float32": + assert da["U"].dtype == np.float32 else: - assert da['U'].dtype == np.float64 + assert da["U"].dtype == np.float64 -@pytest.mark.parametrize('indslon', [range(10, 20), [1]]) -@pytest.mark.parametrize('indslat', [range(30, 60), [22]]) -def test_fieldset_from_file_subsets(indslon, indslat, tmpdir, filename='test_subsets'): +@pytest.mark.parametrize("indslon", [range(10, 20), [1]]) +@pytest.mark.parametrize("indslat", [range(30, 60), [22]]) +def test_fieldset_from_file_subsets(indslon, indslat, tmpdir, filename="test_subsets"): """Test for subsetting fieldset from file using indices dict.""" data, dimensions = generate_fieldset(100, 100) filepath = tmpdir.join(filename) fieldsetfull = FieldSet.from_data(data, dimensions) fieldsetfull.write(filepath) - indices = {'lon': indslon, 'lat': indslat} + indices = {"lon": indslon, "lat": indslat} indices_back = indices.copy() fieldsetsub = FieldSet.from_parcels(filepath, indices=indices, chunksize=None) assert indices == indices_back - assert np.allclose(fieldsetsub.U.lon, fieldsetfull.U.grid.lon[indices['lon']]) - assert np.allclose(fieldsetsub.U.lat, fieldsetfull.U.grid.lat[indices['lat']]) - assert np.allclose(fieldsetsub.V.lon, fieldsetfull.V.grid.lon[indices['lon']]) - assert np.allclose(fieldsetsub.V.lat, fieldsetfull.V.grid.lat[indices['lat']]) + assert np.allclose(fieldsetsub.U.lon, fieldsetfull.U.grid.lon[indices["lon"]]) + assert np.allclose(fieldsetsub.U.lat, fieldsetfull.U.grid.lat[indices["lat"]]) + assert np.allclose(fieldsetsub.V.lon, fieldsetfull.V.grid.lon[indices["lon"]]) + assert np.allclose(fieldsetsub.V.lat, fieldsetfull.V.grid.lat[indices["lat"]]) - ixgrid = np.ix_([0], indices['lat'], indices['lon']) + ixgrid = np.ix_([0], indices["lat"], indices["lon"]) assert np.allclose(fieldsetsub.U.data, fieldsetfull.U.data[ixgrid]) assert np.allclose(fieldsetsub.V.data, fieldsetfull.V.data[ixgrid]) -def test_empty_indices(tmpdir, filename='test_subsets'): +def test_empty_indices(tmpdir, filename="test_subsets"): data, dimensions = generate_fieldset(100, 100) filepath = tmpdir.join(filename) FieldSet.from_data(data, dimensions).write(filepath) with pytest.raises(RuntimeError): - FieldSet.from_parcels(filepath, indices={'lon': []}) + FieldSet.from_parcels(filepath, indices={"lon": []}) -@pytest.mark.parametrize('calltype', ['from_data', 'from_nemo']) +@pytest.mark.parametrize("calltype", ["from_data", "from_nemo"]) def test_illegal_dimensionsdict(calltype): with pytest.raises(NameError): - if calltype == 'from_data': + if calltype == "from_data": data, dimensions = generate_fieldset(10, 10) - dimensions['test'] = None + dimensions["test"] = None FieldSet.from_data(data, dimensions) - elif calltype == 'from_nemo': - fname = os.path.join(os.path.dirname(__file__), 'test_data', 'mask_nemo_cross_180lon.nc') - filenames = {'dx': fname, 'mesh_mask': fname} - variables = {'dx': 'e1u'} - dimensions = {'lon': 'glamu', 'lat': 'gphiu', 'test': 'test'} + elif calltype == "from_nemo": + fname = os.path.join(os.path.dirname(__file__), "test_data", "mask_nemo_cross_180lon.nc") + filenames = {"dx": fname, "mesh_mask": fname} + variables = {"dx": "e1u"} + dimensions = {"lon": "glamu", "lat": "gphiu", "test": "test"} FieldSet.from_nemo(filenames, variables, dimensions) -@pytest.mark.parametrize('xdim', [100, 200]) -@pytest.mark.parametrize('ydim', [100, 200]) -def test_add_field(xdim, ydim, tmpdir, filename='test_add'): +@pytest.mark.parametrize("xdim", [100, 200]) +@pytest.mark.parametrize("ydim", [100, 200]) +def test_add_field(xdim, ydim, tmpdir, filename="test_add"): filepath = tmpdir.join(filename) data, dimensions = generate_fieldset(xdim, ydim) fieldset = FieldSet.from_data(data, dimensions) - field = Field('newfld', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) + field = Field("newfld", fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) fieldset.add_field(field) assert fieldset.newfld.data.shape == fieldset.U.data.shape fieldset.write(filepath) -@pytest.mark.parametrize('dupobject', ['same', 'new']) +@pytest.mark.parametrize("dupobject", ["same", "new"]) def test_add_duplicate_field(dupobject): data, dimensions = generate_fieldset(100, 100) fieldset = FieldSet.from_data(data, dimensions) - field = Field('newfld', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) + field = Field("newfld", fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) fieldset.add_field(field) with pytest.raises(RuntimeError): - if dupobject == 'same': + if dupobject == "same": fieldset.add_field(field) - elif dupobject == 'new': - field2 = Field('newfld', np.ones((2, 2)), lon=np.array([0, 1]), lat=np.array([0, 2])) + elif dupobject == "new": + field2 = Field("newfld", np.ones((2, 2)), lon=np.array([0, 1]), lat=np.array([0, 2])) fieldset.add_field(field2) -@pytest.mark.parametrize('fieldtype', ['normal', 'vector']) +@pytest.mark.parametrize("fieldtype", ["normal", "vector"]) def test_add_field_after_pset(fieldtype): data, dimensions = generate_fieldset(100, 100) fieldset = FieldSet.from_data(data, dimensions) pset = ParticleSet(fieldset, ScipyParticle, lon=0, lat=0) # noqa ; to trigger fieldset.check_complete - field1 = Field('field1', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) - field2 = Field('field2', fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) - vfield = VectorField('vfield', field1, field2) + field1 = Field("field1", fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) + field2 = Field("field2", fieldset.U.data, lon=fieldset.U.lon, lat=fieldset.U.lat) + vfield = VectorField("vfield", field1, field2) with pytest.raises(RuntimeError): - if fieldtype == 'normal': + if fieldtype == "normal": fieldset.add_field(field1) - elif fieldtype == 'vector': + elif fieldtype == "vector": fieldset.add_vector_field(vfield) -@pytest.mark.parametrize('chunksize', ['auto', None]) -def test_fieldset_samegrids_from_file(tmpdir, chunksize, filename='test_subsets'): +@pytest.mark.parametrize("chunksize", ["auto", None]) +def test_fieldset_samegrids_from_file(tmpdir, chunksize, filename="test_subsets"): """Test for subsetting fieldset from file using indices dict.""" data, dimensions = generate_fieldset(100, 100) - filepath1 = tmpdir.join(filename+'_1') + filepath1 = tmpdir.join(filename + "_1") fieldset1 = FieldSet.from_data(data, dimensions) fieldset1.write(filepath1) - ufiles = [filepath1+'U.nc', ] * 4 - vfiles = [filepath1+'V.nc', ] * 4 + ufiles = [ + filepath1 + "U.nc", + ] * 4 + vfiles = [ + filepath1 + "V.nc", + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} - fieldset = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize) - - if chunksize == 'auto': + files = {"U": ufiles, "V": vfiles} + variables = {"U": "vozocrtx", "V": "vomecrty"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} + fieldset = FieldSet.from_netcdf( + files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize + ) + + if chunksize == "auto": assert fieldset.gridset.size == 2 assert fieldset.U.grid != fieldset.V.grid else: @@ -403,47 +423,53 @@ def test_fieldset_samegrids_from_file(tmpdir, chunksize, filename='test_subsets' assert fieldset.U.chunksize == fieldset.V.chunksize -@pytest.mark.parametrize('gridtype', ['A', 'C']) +@pytest.mark.parametrize("gridtype", ["A", "C"]) def test_fieldset_dimlength1_cgrid(gridtype): - fieldset = FieldSet.from_data({'U': 0, 'V': 0}, {'lon': 0, 'lat': 0}) - if gridtype == 'C': - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' + fieldset = FieldSet.from_data({"U": 0, "V": 0}, {"lon": 0, "lat": 0}) + if gridtype == "C": + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" try: fieldset.check_complete() - success = True if gridtype == 'A' else False + success = True if gridtype == "A" else False except NotImplementedError: - success = True if gridtype == 'C' else False + success = True if gridtype == "C" else False assert success -@pytest.mark.parametrize('chunksize', ['auto', None]) -def test_fieldset_diffgrids_from_file(tmpdir, chunksize, filename='test_subsets'): +@pytest.mark.parametrize("chunksize", ["auto", None]) +def test_fieldset_diffgrids_from_file(tmpdir, chunksize, filename="test_subsets"): """Test for subsetting fieldset from file using indices dict.""" data, dimensions = generate_fieldset(100, 100) - filepath1 = tmpdir.join(filename+'_1') + filepath1 = tmpdir.join(filename + "_1") fieldset1 = FieldSet.from_data(data, dimensions) fieldset1.write(filepath1) data, dimensions = generate_fieldset(50, 50) - filepath2 = tmpdir.join(filename + '_2') + filepath2 = tmpdir.join(filename + "_2") fieldset2 = FieldSet.from_data(data, dimensions) fieldset2.write(filepath2) - ufiles = [filepath1+'U.nc', ] * 4 - vfiles = [filepath2+'V.nc', ] * 4 + ufiles = [ + filepath1 + "U.nc", + ] * 4 + vfiles = [ + filepath2 + "V.nc", + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} + files = {"U": ufiles, "V": vfiles} + variables = {"U": "vozocrtx", "V": "vomecrty"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} - fieldset = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize) + fieldset = FieldSet.from_netcdf( + files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize + ) assert fieldset.gridset.size == 2 assert fieldset.U.grid != fieldset.V.grid -@pytest.mark.parametrize('chunksize', ['auto', None]) -def test_fieldset_diffgrids_from_file_data(tmpdir, chunksize, filename='test_subsets'): +@pytest.mark.parametrize("chunksize", ["auto", None]) +def test_fieldset_diffgrids_from_file_data(tmpdir, chunksize, filename="test_subsets"): """Test for subsetting fieldset from file using indices dict.""" data, dimensions = generate_fieldset(100, 100) filepath = tmpdir.join(filename) @@ -452,26 +478,32 @@ def test_fieldset_diffgrids_from_file_data(tmpdir, chunksize, filename='test_sub field_data = fieldset_data.U field_data.name = "B" - ufiles = [filepath+'U.nc', ] * 4 - vfiles = [filepath+'V.nc', ] * 4 + ufiles = [ + filepath + "U.nc", + ] * 4 + vfiles = [ + filepath + "V.nc", + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} - fieldset_file = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize) + files = {"U": ufiles, "V": vfiles} + variables = {"U": "vozocrtx", "V": "vomecrty"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} + fieldset_file = FieldSet.from_netcdf( + files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chunksize + ) fieldset_file.add_field(field_data, "B") fields = [f for f in fieldset_file.get_fields() if isinstance(f, Field)] assert len(fields) == 3 - if chunksize == 'auto': + if chunksize == "auto": assert fieldset_file.gridset.size == 3 else: assert fieldset_file.gridset.size == 2 assert fieldset_file.U.grid != fieldset_file.B.grid -def test_fieldset_samegrids_from_data(tmpdir, filename='test_subsets'): +def test_fieldset_samegrids_from_data(tmpdir, filename="test_subsets"): """Test for subsetting fieldset from file using indices dict.""" data, dimensions = generate_fieldset(100, 100) fieldset1 = FieldSet.from_data(data, dimensions) @@ -482,79 +514,83 @@ def test_fieldset_samegrids_from_data(tmpdir, filename='test_subsets'): assert fieldset1.U.grid == fieldset1.B.grid -@pytest.mark.parametrize('mesh', ['flat', 'spherical']) +@pytest.mark.parametrize("mesh", ["flat", "spherical"]) def test_fieldset_celledgesizes(mesh): data, dimensions = generate_fieldset(10, 7) fieldset = FieldSet.from_data(data, dimensions, mesh=mesh) fieldset.U.calc_cell_edge_sizes() - D_meridional = fieldset.U.cell_edge_sizes['y'] - D_zonal = fieldset.U.cell_edge_sizes['x'] + D_meridional = fieldset.U.cell_edge_sizes["y"] + D_zonal = fieldset.U.cell_edge_sizes["x"] - assert np.allclose(D_meridional.flatten(), D_meridional[0, 0]) # all meridional distances should be the same in either mesh - if mesh == 'flat': + assert np.allclose( + D_meridional.flatten(), D_meridional[0, 0] + ) # all meridional distances should be the same in either mesh + if mesh == "flat": assert np.allclose(D_zonal.flatten(), D_zonal[0, 0]) # all zonal distances should be the same in flat mesh else: assert all((np.gradient(D_zonal, axis=0) < 0).flatten()) # zonal distances should decrease in spherical mesh -@pytest.mark.parametrize('dx, dy', [('e1u', 'e2u'), ('e1v', 'e2v')]) +@pytest.mark.parametrize("dx, dy", [("e1u", "e2u"), ("e1v", "e2v")]) def test_fieldset_celledgesizes_curvilinear(dx, dy): - fname = os.path.join(os.path.dirname(__file__), 'test_data', 'mask_nemo_cross_180lon.nc') - filenames = {'dx': fname, 'dy': fname, 'mesh_mask': fname} - variables = {'dx': dx, 'dy': dy} - dimensions = {'dx': {'lon': 'glamu', 'lat': 'gphiu'}, - 'dy': {'lon': 'glamu', 'lat': 'gphiu'}} + fname = os.path.join(os.path.dirname(__file__), "test_data", "mask_nemo_cross_180lon.nc") + filenames = {"dx": fname, "dy": fname, "mesh_mask": fname} + variables = {"dx": dx, "dy": dy} + dimensions = {"dx": {"lon": "glamu", "lat": "gphiu"}, "dy": {"lon": "glamu", "lat": "gphiu"}} fieldset = FieldSet.from_nemo(filenames, variables, dimensions) # explicitly setting cell_edge_sizes from e1u and e2u etc - fieldset.dx.grid.cell_edge_sizes['x'] = fieldset.dx.data - fieldset.dx.grid.cell_edge_sizes['y'] = fieldset.dy.data + fieldset.dx.grid.cell_edge_sizes["x"] = fieldset.dx.data + fieldset.dx.grid.cell_edge_sizes["y"] = fieldset.dy.data A = fieldset.dx.cell_areas() assert np.allclose(A, fieldset.dx.data * fieldset.dy.data) def test_fieldset_write_curvilinear(tmpdir): - fname = os.path.join(os.path.dirname(__file__), 'test_data', 'mask_nemo_cross_180lon.nc') - filenames = {'dx': fname, 'mesh_mask': fname} - variables = {'dx': 'e1u'} - dimensions = {'lon': 'glamu', 'lat': 'gphiu'} + fname = os.path.join(os.path.dirname(__file__), "test_data", "mask_nemo_cross_180lon.nc") + filenames = {"dx": fname, "mesh_mask": fname} + variables = {"dx": "e1u"} + dimensions = {"lon": "glamu", "lat": "gphiu"} fieldset = FieldSet.from_nemo(filenames, variables, dimensions) - assert fieldset.dx.creation_log == 'from_nemo' + assert fieldset.dx.creation_log == "from_nemo" - newfile = tmpdir.join('curv_field') + newfile = tmpdir.join("curv_field") fieldset.write(newfile) - fieldset2 = FieldSet.from_netcdf(filenames=newfile+'dx.nc', variables={'dx': 'dx'}, - dimensions={'time': 'time_counter', 'depth': 'depthdx', - 'lon': 'nav_lon', 'lat': 'nav_lat'}) - assert fieldset2.dx.creation_log == 'from_netcdf' + fieldset2 = FieldSet.from_netcdf( + filenames=newfile + "dx.nc", + variables={"dx": "dx"}, + dimensions={"time": "time_counter", "depth": "depthdx", "lon": "nav_lon", "lat": "nav_lat"}, + ) + assert fieldset2.dx.creation_log == "from_netcdf" - for var in ['lon', 'lat', 'data']: + for var in ["lon", "lat", "data"]: assert np.allclose(getattr(fieldset2.dx, var), getattr(fieldset.dx, var)) def test_curv_fieldset_add_periodic_halo(): - fname = os.path.join(os.path.dirname(__file__), 'test_data', 'mask_nemo_cross_180lon.nc') - filenames = {'dx': fname, 'dy': fname, 'mesh_mask': fname} - variables = {'dx': 'e1u', 'dy': 'e1v'} - dimensions = {'dx': {'lon': 'glamu', 'lat': 'gphiu'}, - 'dy': {'lon': 'glamu', 'lat': 'gphiu'}} + fname = os.path.join(os.path.dirname(__file__), "test_data", "mask_nemo_cross_180lon.nc") + filenames = {"dx": fname, "dy": fname, "mesh_mask": fname} + variables = {"dx": "e1u", "dy": "e1v"} + dimensions = {"dx": {"lon": "glamu", "lat": "gphiu"}, "dy": {"lon": "glamu", "lat": "gphiu"}} fieldset = FieldSet.from_nemo(filenames, variables, dimensions) fieldset.add_periodic_halo(zonal=3, meridional=2) -@pytest.mark.parametrize('mesh', ['flat', 'spherical']) +@pytest.mark.parametrize("mesh", ["flat", "spherical"]) def test_fieldset_cellareas(mesh): data, dimensions = generate_fieldset(10, 7) fieldset = FieldSet.from_data(data, dimensions, mesh=mesh) cell_areas = fieldset.V.cell_areas() - if mesh == 'flat': + if mesh == "flat": assert np.allclose(cell_areas.flatten(), cell_areas[0, 0], rtol=1e-3) else: - assert all((np.gradient(cell_areas, axis=0) < 0).flatten()) # areas should decrease with latitude in spherical mesh + assert all( + (np.gradient(cell_areas, axis=0) < 0).flatten() + ) # areas should decrease with latitude in spherical mesh for y in range(cell_areas.shape[0]): assert np.allclose(cell_areas[y, :], cell_areas[y, 0], rtol=1e-3) @@ -563,14 +599,14 @@ def addConst(particle, fieldset, time): particle.lon = particle.lon + fieldset.movewest + fieldset.moveeast -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_constant(mode): data, dimensions = generate_fieldset(100, 100) fieldset = FieldSet.from_data(data, dimensions) westval = -0.2 eastval = 0.3 - fieldset.add_constant('movewest', westval) - fieldset.add_constant('moveeast', eastval) + fieldset.add_constant("movewest", westval) + fieldset.add_constant("moveeast", eastval) assert fieldset.movewest == westval pset = ParticleSet.from_line(fieldset, size=1, pclass=ptype[mode], start=(0.5, 0.5), finish=(0.5, 0.5)) @@ -578,47 +614,45 @@ def test_fieldset_constant(mode): assert abs(pset.lon[0] - (0.5 + westval + eastval)) < 1e-4 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('swapUV', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("swapUV", [False, True]) def test_vector_fields(mode, swapUV): - lon = np.linspace(0., 10., 12, dtype=np.float32) - lat = np.linspace(0., 10., 10, dtype=np.float32) + lon = np.linspace(0.0, 10.0, 12, dtype=np.float32) + lat = np.linspace(0.0, 10.0, 10, dtype=np.float32) U = np.ones((10, 12), dtype=np.float32) V = np.zeros((10, 12), dtype=np.float32) - data = {'U': U, 'V': V} - dimensions = {'U': {'lat': lat, 'lon': lon}, - 'V': {'lat': lat, 'lon': lon}} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": U, "V": V} + dimensions = {"U": {"lat": lat, "lon": lon}, "V": {"lat": lat, "lon": lon}} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") if swapUV: # we test that we can freely edit whatever UV field - UV = VectorField('UV', fieldset.V, fieldset.U) + UV = VectorField("UV", fieldset.V, fieldset.U) fieldset.add_vector_field(UV) pset = ParticleSet.from_line(fieldset, size=1, pclass=ptype[mode], start=(0.5, 0.5), finish=(0.5, 0.5)) pset.execute(AdvectionRK4, dt=1, runtime=2) if swapUV: - assert abs(pset.lon[0] - .5) < 1e-9 + assert abs(pset.lon[0] - 0.5) < 1e-9 assert abs(pset.lat[0] - 1.5) < 1e-9 else: assert abs(pset.lon[0] - 1.5) < 1e-9 - assert abs(pset.lat[0] - .5) < 1e-9 + assert abs(pset.lat[0] - 0.5) < 1e-9 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_add_second_vector_field(mode): - lon = np.linspace(0., 10., 12, dtype=np.float32) - lat = np.linspace(0., 10., 10, dtype=np.float32) + lon = np.linspace(0.0, 10.0, 12, dtype=np.float32) + lat = np.linspace(0.0, 10.0, 10, dtype=np.float32) U = np.ones((10, 12), dtype=np.float32) V = np.zeros((10, 12), dtype=np.float32) - data = {'U': U, 'V': V} - dimensions = {'U': {'lat': lat, 'lon': lon}, - 'V': {'lat': lat, 'lon': lon}} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": U, "V": V} + dimensions = {"U": {"lat": lat, "lon": lon}, "V": {"lat": lat, "lon": lon}} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") - data2 = {'U2': U, 'V2': V} - dimensions2 = {'lon': [ln + 0.1 for ln in lon], 'lat': [lt - 0.1 for lt in lat]} - fieldset2 = FieldSet.from_data(data2, dimensions2, mesh='flat') + data2 = {"U2": U, "V2": V} + dimensions2 = {"lon": [ln + 0.1 for ln in lon], "lat": [lt - 0.1 for lt in lat]} + fieldset2 = FieldSet.from_data(data2, dimensions2, mesh="flat") - UV2 = VectorField('UV2', fieldset2.U2, fieldset2.V2) + UV2 = VectorField("UV2", fieldset2.U2, fieldset2.V2) fieldset.add_vector_field(UV2) def SampleUV2(particle, fieldset, time): @@ -627,23 +661,22 @@ def test_add_second_vector_field(mode): particle_dlat += v * particle.dt # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0.5, lat=0.5) - pset.execute(AdvectionRK4+pset.Kernel(SampleUV2), dt=1, runtime=2) + pset.execute(AdvectionRK4 + pset.Kernel(SampleUV2), dt=1, runtime=2) assert abs(pset.lon[0] - 2.5) < 1e-9 - assert abs(pset.lat[0] - .5) < 1e-9 + assert abs(pset.lat[0] - 0.5) < 1e-9 def test_fieldset_write(tmpdir): filepath = tmpdir.join("fieldset_write.zarr") xdim, ydim = 3, 4 - lon = np.linspace(0., 10., xdim, dtype=np.float32) - lat = np.linspace(0., 10., ydim, dtype=np.float32) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float32) U = np.ones((ydim, xdim), dtype=np.float32) V = np.zeros((ydim, xdim), dtype=np.float32) - data = {'U': U, 'V': V} - dimensions = {'U': {'lat': lat, 'lon': lon}, - 'V': {'lat': lat, 'lon': lon}} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": U, "V": V} + dimensions = {"U": {"lat": lat, "lon": lon}, "V": {"lat": lat, "lon": lon}} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") fieldset.U.to_write = True @@ -653,30 +686,32 @@ def test_fieldset_write(tmpdir): fieldset.U.grid.time[0] = time pset = ParticleSet(fieldset, pclass=ScipyParticle, lon=5, lat=5) - ofile = pset.ParticleFile(name=filepath, outputdt=2.) + ofile = pset.ParticleFile(name=filepath, outputdt=2.0) pset.execute(UpdateU, dt=1, runtime=10, output_file=ofile) assert fieldset.U.data[0, 1, 0] == 11 - da = xr.open_dataset(str(filepath).replace('.zarr', '_0005U.nc')) - assert np.allclose(fieldset.U.data, da['U'].values, atol=1.) + da = xr.open_dataset(str(filepath).replace(".zarr", "_0005U.nc")) + assert np.allclose(fieldset.U.data, da["U"].values, atol=1.0) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('time_periodic', [4*86400.0, False]) -@pytest.mark.parametrize('dt', [-3600, 3600]) -@pytest.mark.parametrize('chunksize', [False, 'auto', {'time': ('time_counter', 1), 'lat': ('y', 32), 'lon': ('x', 32)}]) -@pytest.mark.parametrize('with_GC', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("time_periodic", [4 * 86400.0, False]) +@pytest.mark.parametrize("dt", [-3600, 3600]) +@pytest.mark.parametrize( + "chunksize", [False, "auto", {"time": ("time_counter", 1), "lat": ("y", 32), "lon": ("x", 32)}] +) +@pytest.mark.parametrize("with_GC", [False, True]) @pytest.mark.skipif(sys.platform.startswith("win"), reason="skipping windows test as windows memory leaks (#787)") def test_from_netcdf_memory_containment(mode, time_periodic, dt, chunksize, with_GC): if time_periodic and dt < 0: return # time_periodic does not work in backward-time mode - if chunksize == 'auto': - dask.config.set({'array.chunk-size': '2MiB'}) + if chunksize == "auto": + dask.config.set({"array.chunk-size": "2MiB"}) else: - dask.config.set({'array.chunk-size': '128MiB'}) + dask.config.set({"array.chunk-size": "128MiB"}) - class PerformanceLog(): + class PerformanceLog: samples = [] memory_steps = [] _iter = 0 @@ -691,111 +726,162 @@ def test_from_netcdf_memory_containment(mode, time_periodic, dt, chunksize, with gc.collect() def periodicBoundaryConditions(particle, fieldset, time): - while particle.lon > 180.: - particle_dlon -= 360. # noqa - while particle.lon < -180.: - particle_dlon += 360. - while particle.lat > 90.: - particle_dlat -= 180. # noqa - while particle.lat < -90.: - particle_dlat += 180. + while particle.lon > 180.0: + particle_dlon -= 360.0 # noqa + while particle.lon < -180.0: + particle_dlon += 360.0 + while particle.lat > 90.0: + particle_dlat -= 180.0 # noqa + while particle.lat < -90.0: + particle_dlat += 180.0 process = psutil.Process(os.getpid()) mem_0 = process.memory_info().rss - fnameU = os.path.join(os.path.dirname(__file__), 'test_data', 'perlinfieldsU.nc') - fnameV = os.path.join(os.path.dirname(__file__), 'test_data', 'perlinfieldsV.nc') - ufiles = [fnameU, ] * 4 - vfiles = [fnameV, ] * 4 + fnameU = os.path.join(os.path.dirname(__file__), "test_data", "perlinfieldsU.nc") + fnameV = os.path.join(os.path.dirname(__file__), "test_data", "perlinfieldsV.nc") + ufiles = [ + fnameU, + ] * 4 + vfiles = [ + fnameV, + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} - - fieldset = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, time_periodic=time_periodic, allow_time_extrapolation=True if time_periodic in [False, None] else False, chunksize=chunksize) + files = {"U": ufiles, "V": vfiles} + variables = {"U": "vozocrtx", "V": "vomecrty"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} + + fieldset = FieldSet.from_netcdf( + files, + variables, + dimensions, + timestamps=timestamps, + time_periodic=time_periodic, + allow_time_extrapolation=True if time_periodic in [False, None] else False, + chunksize=chunksize, + ) perflog = PerformanceLog() - postProcessFuncs = [perflog.advance, ] + postProcessFuncs = [ + perflog.advance, + ] if with_GC: postProcessFuncs.append(perIterGC) - pset = ParticleSet(fieldset=fieldset, pclass=ptype[mode], lon=[0.5, ], lat=[0.5, ]) + pset = ParticleSet( + fieldset=fieldset, + pclass=ptype[mode], + lon=[ + 0.5, + ], + lat=[ + 0.5, + ], + ) mem_0 = process.memory_info().rss mem_exhausted = False try: - pset.execute(pset.Kernel(AdvectionRK4)+periodicBoundaryConditions, dt=dt, runtime=timedelta(days=7), postIterationCallbacks=postProcessFuncs, callbackdt=timedelta(hours=12)) + pset.execute( + pset.Kernel(AdvectionRK4) + periodicBoundaryConditions, + dt=dt, + runtime=timedelta(days=7), + postIterationCallbacks=postProcessFuncs, + callbackdt=timedelta(hours=12), + ) except MemoryError: mem_exhausted = True mem_steps_np = np.array(perflog.memory_steps) if with_GC: assert np.allclose(mem_steps_np[8:], perflog.memory_steps[-1], rtol=0.01) - if (chunksize is not False or with_GC) and mode != 'scipy': - assert np.all((mem_steps_np-mem_0) <= 5275648) # represents 4 x [U|V] * sizeof(field data) + 562816 + if (chunksize is not False or with_GC) and mode != "scipy": + assert np.all((mem_steps_np - mem_0) <= 5275648) # represents 4 x [U|V] * sizeof(field data) + 562816 assert not mem_exhausted -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('time_periodic', [4*86400.0, False]) -@pytest.mark.parametrize('chunksize', [False, 'auto', {'lat': ('y', 32), 'lon': ('x', 32)}, {'time': ('time_counter', 1), 'lat': ('y', 32), 'lon': ('x', 32)}]) -@pytest.mark.parametrize('deferLoad', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("time_periodic", [4 * 86400.0, False]) +@pytest.mark.parametrize( + "chunksize", + [ + False, + "auto", + {"lat": ("y", 32), "lon": ("x", 32)}, + {"time": ("time_counter", 1), "lat": ("y", 32), "lon": ("x", 32)}, + ], +) +@pytest.mark.parametrize("deferLoad", [True, False]) def test_from_netcdf_chunking(mode, time_periodic, chunksize, deferLoad): - fnameU = os.path.join(os.path.dirname(__file__), 'test_data', 'perlinfieldsU.nc') - fnameV = os.path.join(os.path.dirname(__file__), 'test_data', 'perlinfieldsV.nc') - ufiles = [fnameU, ] * 4 - vfiles = [fnameV, ] * 4 + fnameU = os.path.join(os.path.dirname(__file__), "test_data", "perlinfieldsU.nc") + fnameV = os.path.join(os.path.dirname(__file__), "test_data", "perlinfieldsV.nc") + ufiles = [ + fnameU, + ] * 4 + vfiles = [ + fnameV, + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} - - fieldset = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, time_periodic=time_periodic, deferred_load=deferLoad, allow_time_extrapolation=True if time_periodic in [False, None] else False, chunksize=chunksize) + files = {"U": ufiles, "V": vfiles} + variables = {"U": "vozocrtx", "V": "vomecrty"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} + + fieldset = FieldSet.from_netcdf( + files, + variables, + dimensions, + timestamps=timestamps, + time_periodic=time_periodic, + deferred_load=deferLoad, + allow_time_extrapolation=True if time_periodic in [False, None] else False, + chunksize=chunksize, + ) pset = ParticleSet.from_line(fieldset, size=1, pclass=ptype[mode], start=(0.5, 0.5), finish=(0.5, 0.5)) pset.execute(AdvectionRK4, dt=1, runtime=1) -@pytest.mark.parametrize('datetype', ['float', 'datetime64']) +@pytest.mark.parametrize("datetype", ["float", "datetime64"]) def test_timestamps(datetype, tmpdir): data1, dims1 = generate_fieldset(10, 10, 1, 10) data2, dims2 = generate_fieldset(10, 10, 1, 4) - if datetype == 'float': - dims1['time'] = np.arange(0, 10, 1) * 86400 - dims2['time'] = np.arange(10, 14, 1) * 86400 + if datetype == "float": + dims1["time"] = np.arange(0, 10, 1) * 86400 + dims2["time"] = np.arange(10, 14, 1) * 86400 else: - dims1['time'] = np.arange('2005-02-01', '2005-02-11', dtype='datetime64[D]') - dims2['time'] = np.arange('2005-02-11', '2005-02-15', dtype='datetime64[D]') + dims1["time"] = np.arange("2005-02-01", "2005-02-11", dtype="datetime64[D]") + dims2["time"] = np.arange("2005-02-11", "2005-02-15", dtype="datetime64[D]") fieldset1 = FieldSet.from_data(data1, dims1) - fieldset1.U.data[0, :, :] = 2. - fieldset1.write(tmpdir.join('file1')) + fieldset1.U.data[0, :, :] = 2.0 + fieldset1.write(tmpdir.join("file1")) fieldset2 = FieldSet.from_data(data2, dims2) - fieldset2.U.data[0, :, :] = 0. - fieldset2.write(tmpdir.join('file2')) + fieldset2.U.data[0, :, :] = 0.0 + fieldset2.write(tmpdir.join("file2")) - fieldset3 = FieldSet.from_parcels(tmpdir.join('file*'), time_periodic=timedelta(days=14)) - timestamps = [dims1['time'], dims2['time']] - fieldset4 = FieldSet.from_parcels(tmpdir.join('file*'), timestamps=timestamps, time_periodic=timedelta(days=14)) + fieldset3 = FieldSet.from_parcels(tmpdir.join("file*"), time_periodic=timedelta(days=14)) + timestamps = [dims1["time"], dims2["time"]] + fieldset4 = FieldSet.from_parcels(tmpdir.join("file*"), timestamps=timestamps, time_periodic=timedelta(days=14)) assert np.allclose(fieldset3.U.grid.time_full, fieldset4.U.grid.time_full) for d in [0, 8, 10, 13]: - fieldset3.computeTimeChunk(d*86400., 1.) - fieldset4.computeTimeChunk(d*86400., 1.) + fieldset3.computeTimeChunk(d * 86400.0, 1.0) + fieldset4.computeTimeChunk(d * 86400.0, 1.0) assert np.allclose(fieldset3.U.data, fieldset4.U.data) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('use_xarray', [True, False]) -@pytest.mark.parametrize('time_periodic', [86400., False]) -@pytest.mark.parametrize('dt_sign', [-1, 1]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("use_xarray", [True, False]) +@pytest.mark.parametrize("time_periodic", [86400.0, False]) +@pytest.mark.parametrize("dt_sign", [-1, 1]) def test_periodic(mode, use_xarray, time_periodic, dt_sign): lon = np.array([0, 1], dtype=np.float32) lat = np.array([0, 1], dtype=np.float32) depth = np.array([0, 1], dtype=np.float32) - tsize = 24*60+1 + tsize = 24 * 60 + 1 period = 86400 time = np.linspace(0, period, tsize, dtype=np.float64) def temp_func(time): - return 20 + 2 * np.sin(time*2*np.pi/period) + return 20 + 2 * np.sin(time * 2 * np.pi / period) + temp_vec = temp_func(time) U = np.zeros((2, 2, 2, tsize), dtype=np.float32) @@ -806,24 +892,34 @@ def test_periodic(mode, use_xarray, time_periodic, dt_sign): temp[:, :, :, :] = temp_vec D = np.ones((2, 2), dtype=np.float32) # adding non-timevarying field - full_dims = {'lon': lon, 'lat': lat, 'depth': depth, 'time': time} - dimensions = {'U': full_dims, 'V': full_dims, 'W': full_dims, 'temp': full_dims, 'D': {'lon': lon, 'lat': lat}} + full_dims = {"lon": lon, "lat": lat, "depth": depth, "time": time} + dimensions = {"U": full_dims, "V": full_dims, "W": full_dims, "temp": full_dims, "D": {"lon": lon, "lat": lat}} if use_xarray: - coords = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time} - variables = {'U': 'Uxr', 'V': 'Vxr', 'W': 'Wxr', 'temp': 'Txr', 'D': 'Dxr'} - dimnames = {'lon': 'lon', 'lat': 'lat', 'depth': 'depth', 'time': 'time'} - ds = xr.Dataset({'Uxr': xr.DataArray(U, coords=coords, dims=('lon', 'lat', 'depth', 'time')), - 'Vxr': xr.DataArray(V, coords=coords, dims=('lon', 'lat', 'depth', 'time')), - 'Wxr': xr.DataArray(W, coords=coords, dims=('lon', 'lat', 'depth', 'time')), - 'Txr': xr.DataArray(temp, coords=coords, dims=('lon', 'lat', 'depth', 'time')), - 'Dxr': xr.DataArray(D, coords={'lat': lat, 'lon': lon}, dims=('lon', 'lat'))}) - fieldset = FieldSet.from_xarray_dataset(ds, variables, - {'U': dimnames, 'V': dimnames, 'W': dimnames, 'temp': dimnames, - 'D': {'lon': 'lon', 'lat': 'lat'}}, - time_periodic=time_periodic, transpose=True, allow_time_extrapolation=True) + coords = {"lat": lat, "lon": lon, "depth": depth, "time": time} + variables = {"U": "Uxr", "V": "Vxr", "W": "Wxr", "temp": "Txr", "D": "Dxr"} + dimnames = {"lon": "lon", "lat": "lat", "depth": "depth", "time": "time"} + ds = xr.Dataset( + { + "Uxr": xr.DataArray(U, coords=coords, dims=("lon", "lat", "depth", "time")), + "Vxr": xr.DataArray(V, coords=coords, dims=("lon", "lat", "depth", "time")), + "Wxr": xr.DataArray(W, coords=coords, dims=("lon", "lat", "depth", "time")), + "Txr": xr.DataArray(temp, coords=coords, dims=("lon", "lat", "depth", "time")), + "Dxr": xr.DataArray(D, coords={"lat": lat, "lon": lon}, dims=("lon", "lat")), + } + ) + fieldset = FieldSet.from_xarray_dataset( + ds, + variables, + {"U": dimnames, "V": dimnames, "W": dimnames, "temp": dimnames, "D": {"lon": "lon", "lat": "lat"}}, + time_periodic=time_periodic, + transpose=True, + allow_time_extrapolation=True, + ) else: - data = {'U': U, 'V': V, 'W': W, 'temp': temp, 'D': D} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', time_periodic=time_periodic, transpose=True, allow_time_extrapolation=True) + data = {"U": U, "V": V, "W": W, "temp": temp, "D": D} + fieldset = FieldSet.from_data( + data, dimensions, mesh="flat", time_periodic=time_periodic, transpose=True, allow_time_extrapolation=True + ) def sampleTemp(particle, fieldset, time): particle.temp = fieldset.temp[time, particle.depth, particle.lat, particle.lon] @@ -833,17 +929,21 @@ def test_periodic(mode, use_xarray, time_periodic, dt_sign): # test if we can sample a non-timevarying field too particle.d = fieldset.D[0, 0, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('temp', dtype=np.float32, initial=20.), - Variable('u1', dtype=np.float32, initial=0.), - Variable('u2', dtype=np.float32, initial=0.), - Variable('v1', dtype=np.float32, initial=0.), - Variable('v2', dtype=np.float32, initial=0.), - Variable('d', dtype=np.float32, initial=0.), - ]) + MyParticle = ptype[mode].add_variables( + [ + Variable("temp", dtype=np.float32, initial=20.0), + Variable("u1", dtype=np.float32, initial=0.0), + Variable("u2", dtype=np.float32, initial=0.0), + Variable("v1", dtype=np.float32, initial=0.0), + Variable("v2", dtype=np.float32, initial=0.0), + Variable("d", dtype=np.float32, initial=0.0), + ] + ) pset = ParticleSet.from_list(fieldset, pclass=MyParticle, lon=[0.5], lat=[0.5], depth=[0.5]) - pset.execute(AdvectionRK4_3D + pset.Kernel(sampleTemp), runtime=timedelta(hours=51), dt=timedelta(hours=dt_sign*1)) + pset.execute( + AdvectionRK4_3D + pset.Kernel(sampleTemp), runtime=timedelta(hours=51), dt=timedelta(hours=dt_sign * 1) + ) if time_periodic is not False: t = pset.time[0] @@ -855,52 +955,57 @@ def test_periodic(mode, use_xarray, time_periodic, dt_sign): assert np.allclose(temp_theo, pset.temp[0], atol=1e-5) assert np.allclose(pset.u1[0], pset.u2[0]) assert np.allclose(pset.v1[0], pset.v2[0]) - assert np.allclose(pset.d[0], 1.) + assert np.allclose(pset.d[0], 1.0) -@pytest.mark.parametrize('fail', [False, pytest.param(True, marks=pytest.mark.xfail(strict=True))]) -def test_fieldset_defer_loading_with_diff_time_origin(tmpdir, fail, filename='test_parcels_defer_loading'): +@pytest.mark.parametrize("fail", [False, pytest.param(True, marks=pytest.mark.xfail(strict=True))]) +def test_fieldset_defer_loading_with_diff_time_origin(tmpdir, fail, filename="test_parcels_defer_loading"): filepath = tmpdir.join(filename) data0, dims0 = generate_fieldset(10, 10, 1, 10) - dims0['time'] = np.arange(0, 10, 1) * 3600 + dims0["time"] = np.arange(0, 10, 1) * 3600 fieldset_out = FieldSet.from_data(data0, dims0) - fieldset_out.U.grid.time_origin = TimeConverter(np.datetime64('2018-04-20')) - fieldset_out.V.grid.time_origin = TimeConverter(np.datetime64('2018-04-20')) + fieldset_out.U.grid.time_origin = TimeConverter(np.datetime64("2018-04-20")) + fieldset_out.V.grid.time_origin = TimeConverter(np.datetime64("2018-04-20")) data1, dims1 = generate_fieldset(10, 10, 1, 10) if fail: - dims1['time'] = np.arange(0, 10, 1) * 3600 + dims1["time"] = np.arange(0, 10, 1) * 3600 else: - dims1['time'] = np.arange(0, 10, 1) * 1800 + (24+25)*3600 + dims1["time"] = np.arange(0, 10, 1) * 1800 + (24 + 25) * 3600 if fail: - Wtime_origin = TimeConverter(np.datetime64('2018-04-22')) + Wtime_origin = TimeConverter(np.datetime64("2018-04-22")) else: - Wtime_origin = TimeConverter(np.datetime64('2018-04-18')) - gridW = RectilinearZGrid(dims1['lon'], dims1['lat'], dims1['depth'], dims1['time'], time_origin=Wtime_origin) - fieldW = Field('W', np.zeros(data1['U'].shape), grid=gridW) + Wtime_origin = TimeConverter(np.datetime64("2018-04-18")) + gridW = RectilinearZGrid(dims1["lon"], dims1["lat"], dims1["depth"], dims1["time"], time_origin=Wtime_origin) + fieldW = Field("W", np.zeros(data1["U"].shape), grid=gridW) fieldset_out.add_field(fieldW) fieldset_out.write(filepath) - fieldset = FieldSet.from_parcels(filepath, extra_fields={'W': 'W'}) - assert fieldset.U.creation_log == 'from_parcels' - pset = ParticleSet.from_list(fieldset, pclass=JITParticle, lon=[0.5], lat=[0.5], depth=[0.5], - time=[datetime.datetime(2018, 4, 20, 1)]) + fieldset = FieldSet.from_parcels(filepath, extra_fields={"W": "W"}) + assert fieldset.U.creation_log == "from_parcels" + pset = ParticleSet.from_list( + fieldset, pclass=JITParticle, lon=[0.5], lat=[0.5], depth=[0.5], time=[datetime.datetime(2018, 4, 20, 1)] + ) pset.execute(AdvectionRK4_3D, runtime=timedelta(hours=4), dt=timedelta(hours=1)) -@pytest.mark.parametrize('zdim', [2, 8]) -@pytest.mark.parametrize('scale_fac', [0.2, 4, 1]) -def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename='test_parcels_defer_loading'): +@pytest.mark.parametrize("zdim", [2, 8]) +@pytest.mark.parametrize("scale_fac", [0.2, 4, 1]) +def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename="test_parcels_defer_loading"): filepath = tmpdir.join(filename) data0, dims0 = generate_fieldset(3, 3, zdim, 10) - data0['U'][:, 0, :, :] = np.nan # setting first layer to nan, which will be changed to zero (and all other layers to 1) - dims0['time'] = np.arange(0, 10, 1) * 3600 - dims0['depth'] = np.arange(0, zdim, 1) + data0["U"][:, 0, :, :] = ( + np.nan + ) # setting first layer to nan, which will be changed to zero (and all other layers to 1) + dims0["time"] = np.arange(0, 10, 1) * 3600 + dims0["depth"] = np.arange(0, zdim, 1) fieldset_out = FieldSet.from_data(data0, dims0) fieldset_out.write(filepath) - fieldset = FieldSet.from_parcels(filepath, chunksize={'time': ('time_counter', 1), 'depth': ('depthu', 1), 'lat': ('y', 2), 'lon': ('x', 2)}) + fieldset = FieldSet.from_parcels( + filepath, chunksize={"time": ("time_counter", 1), "depth": ("depthu", 1), "lat": ("y", 2), "lon": ("x", 2)} + ) # testing for combination of deferred-loaded and numpy Fields with pytest.raises(ValueError): - fieldset.add_field(Field('numpyfield', np.zeros((10, zdim, 3, 3)), grid=fieldset.U.grid)) + fieldset.add_field(Field("numpyfield", np.zeros((10, zdim, 3, 3)), grid=fieldset.U.grid)) # testing for scaling factors fieldset.U.set_scaling_factor(scale_fac) @@ -919,7 +1024,7 @@ def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename='test fieldset.compute_on_defer = compute fieldset.computeTimeChunk(1, 1) assert isinstance(fieldset.U.data, da.core.Array) - assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim) + assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.0) / zdim) pset = ParticleSet(fieldset, JITParticle, 0, 0) @@ -927,29 +1032,36 @@ def test_fieldset_defer_loading_function(zdim, scale_fac, tmpdir, filename='test pass pset.execute(DoNothing, dt=3600) - assert np.allclose(fieldset.U.data, scale_fac*(zdim-1.)/zdim) + assert np.allclose(fieldset.U.data, scale_fac * (zdim - 1.0) / zdim) -@pytest.mark.parametrize('time2', [1, 7]) -def test_fieldset_initialisation_kernel_dask(time2, tmpdir, filename='test_parcels_defer_loading'): +@pytest.mark.parametrize("time2", [1, 7]) +def test_fieldset_initialisation_kernel_dask(time2, tmpdir, filename="test_parcels_defer_loading"): filepath = tmpdir.join(filename) data0, dims0 = generate_fieldset(3, 3, 4, 10) - data0['U'] = np.random.rand(10, 4, 3, 3) - dims0['time'] = np.arange(0, 10, 1) - dims0['depth'] = np.arange(0, 4, 1) + data0["U"] = np.random.rand(10, 4, 3, 3) + dims0["time"] = np.arange(0, 10, 1) + dims0["depth"] = np.arange(0, 4, 1) fieldset_out = FieldSet.from_data(data0, dims0) fieldset_out.write(filepath) - fieldset = FieldSet.from_parcels(filepath, chunksize={'time': ('time_counter', 1), 'depth': ('depthu', 1), 'lat': ('y', 2), 'lon': ('x', 2)}) + fieldset = FieldSet.from_parcels( + filepath, chunksize={"time": ("time_counter", 1), "depth": ("depthu", 1), "lat": ("y", 2), "lon": ("x", 2)} + ) def SampleField(particle, fieldset, time): particle.u_kernel, particle.v_kernel = fieldset.UV[time, particle.depth, particle.lat, particle.lon] - SampleParticle = JITParticle.add_variables([ - Variable('u_kernel', dtype=np.float32, initial=0.), - Variable('v_kernel', dtype=np.float32, initial=0.), - Variable('u_scipy', dtype=np.float32, initial=0.)]) + SampleParticle = JITParticle.add_variables( + [ + Variable("u_kernel", dtype=np.float32, initial=0.0), + Variable("v_kernel", dtype=np.float32, initial=0.0), + Variable("u_scipy", dtype=np.float32, initial=0.0), + ] + ) - pset = ParticleSet(fieldset, pclass=SampleParticle, time=[0, time2], lon=[0.5, 0.5], lat=[0.5, 0.5], depth=[0.5, 0.5]) + pset = ParticleSet( + fieldset, pclass=SampleParticle, time=[0, time2], lon=[0.5, 0.5], lat=[0.5, 0.5], depth=[0.5, 0.5] + ) if time2 > 1: with pytest.raises(TimeExtrapolationError): @@ -960,38 +1072,39 @@ def test_fieldset_initialisation_kernel_dask(time2, tmpdir, filename='test_parce assert isinstance(fieldset.U.data, da.core.Array) -@pytest.mark.parametrize('tdim', [10, None]) +@pytest.mark.parametrize("tdim", [10, None]) def test_fieldset_from_xarray(tdim): def generate_dataset(xdim, ydim, zdim=1, tdim=1): - lon = np.linspace(0., 12, xdim, dtype=np.float32) - lat = np.linspace(0., 12, ydim, dtype=np.float32) - depth = np.linspace(0., 20., zdim, dtype=np.float32) + lon = np.linspace(0.0, 12, xdim, dtype=np.float32) + lat = np.linspace(0.0, 12, ydim, dtype=np.float32) + depth = np.linspace(0.0, 20.0, zdim, dtype=np.float32) if tdim: - time = np.linspace(0., 10, tdim, dtype=np.float64) + time = np.linspace(0.0, 10, tdim, dtype=np.float64) Uxr = np.ones((tdim, zdim, ydim, xdim), dtype=np.float32) Vxr = np.ones((tdim, zdim, ydim, xdim), dtype=np.float32) for t in range(Uxr.shape[0]): - Uxr[t, :, :, :] = t/10. - coords = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time} - dims = ('time', 'depth', 'lat', 'lon') + Uxr[t, :, :, :] = t / 10.0 + coords = {"lat": lat, "lon": lon, "depth": depth, "time": time} + dims = ("time", "depth", "lat", "lon") else: Uxr = np.ones((zdim, ydim, xdim), dtype=np.float32) Vxr = np.ones((zdim, ydim, xdim), dtype=np.float32) for z in range(Uxr.shape[0]): - Uxr[z, :, :] = z/2. - coords = {'lat': lat, 'lon': lon, 'depth': depth} - dims = ('depth', 'lat', 'lon') - return xr.Dataset({'Uxr': xr.DataArray(Uxr, coords=coords, dims=dims), - 'Vxr': xr.DataArray(Vxr, coords=coords, dims=dims)}) + Uxr[z, :, :] = z / 2.0 + coords = {"lat": lat, "lon": lon, "depth": depth} + dims = ("depth", "lat", "lon") + return xr.Dataset( + {"Uxr": xr.DataArray(Uxr, coords=coords, dims=dims), "Vxr": xr.DataArray(Vxr, coords=coords, dims=dims)} + ) ds = generate_dataset(3, 3, 2, tdim) - variables = {'U': 'Uxr', 'V': 'Vxr'} + variables = {"U": "Uxr", "V": "Vxr"} if tdim: - dimensions = {'lat': 'lat', 'lon': 'lon', 'depth': 'depth', 'time': 'time'} + dimensions = {"lat": "lat", "lon": "lon", "depth": "depth", "time": "time"} else: - dimensions = {'lat': 'lat', 'lon': 'lon', 'depth': 'depth'} - fieldset = FieldSet.from_xarray_dataset(ds, variables, dimensions, mesh='flat') - assert fieldset.U.creation_log == 'from_xarray_dataset' + dimensions = {"lat": "lat", "lon": "lon", "depth": "depth"} + fieldset = FieldSet.from_xarray_dataset(ds, variables, dimensions, mesh="flat") + assert fieldset.U.creation_log == "from_xarray_dataset" pset = ParticleSet(fieldset, JITParticle, 0, 0, depth=20) @@ -1002,36 +1115,36 @@ def test_fieldset_from_xarray(tdim): assert np.allclose(pset.lon_nextloop[0], 5.0) and np.allclose(pset.lat_nextloop[0], 10) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_frompop(mode): - filenames = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'POPtestdata_time.nc') - variables = {'U': 'U', 'V': 'V', 'W': 'W', 'T': 'T'} - dimensions = {'lon': 'lon', 'lat': 'lat', 'time': 'time'} + filenames = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "POPtestdata_time.nc") + variables = {"U": "U", "V": "V", "W": "W", "T": "T"} + dimensions = {"lon": "lon", "lat": "lat", "time": "time"} - fieldset = FieldSet.from_pop(filenames, variables, dimensions, mesh='flat') + fieldset = FieldSet.from_pop(filenames, variables, dimensions, mesh="flat") pset = ParticleSet.from_list(fieldset, ptype[mode], lon=[3, 5, 1], lat=[3, 5, 1]) pset.execute(AdvectionRK4, runtime=3, dt=1) def test_fieldset_from_data_gridtypes(xdim=20, ydim=10, zdim=4): """Simple test for fieldset initialisation from data.""" - lon = np.linspace(0., 10., xdim, dtype=np.float32) - lat = np.linspace(0., 10., ydim, dtype=np.float32) - depth = np.linspace(0., 1., zdim, dtype=np.float32) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float32) + depth = np.linspace(0.0, 1.0, zdim, dtype=np.float32) depth_s = np.ones((zdim, ydim, xdim)) U = np.ones((zdim, ydim, xdim)) V = np.ones((zdim, ydim, xdim)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth} - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} lonm, latm = np.meshgrid(lon, lat) for k in range(zdim): - data['U'][k, :, :] = lonm * (depth[k]+1) + .1 + data["U"][k, :, :] = lonm * (depth[k] + 1) + 0.1 depth_s[k, :, :] = depth[k] # Rectilinear Z grid - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) - pset.execute(AdvectionRK4, runtime=1.5, dt=.5) + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, 0.4]) + pset.execute(AdvectionRK4, runtime=1.5, dt=0.5) plon = pset.lon plat = pset.lat # sol of dx/dt = (init_depth+1)*x+0.1; x(0)=0 @@ -1039,46 +1152,54 @@ def test_fieldset_from_data_gridtypes(xdim=20, ydim=10, zdim=4): assert np.allclose(plat, [1, 1]) # Rectilinear S grid - dimensions['depth'] = depth_s - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) - pset.execute(AdvectionRK4, runtime=1.5, dt=.5) + dimensions["depth"] = depth_s + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, 0.4]) + pset.execute(AdvectionRK4, runtime=1.5, dt=0.5) assert np.allclose(plon, pset.lon) assert np.allclose(plat, pset.lat) # Curvilinear Z grid - dimensions['lon'] = lonm - dimensions['lat'] = latm - dimensions['depth'] = depth - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) - pset.execute(AdvectionRK4, runtime=1.5, dt=.5) + dimensions["lon"] = lonm + dimensions["lat"] = latm + dimensions["depth"] = depth + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, 0.4]) + pset.execute(AdvectionRK4, runtime=1.5, dt=0.5) assert np.allclose(plon, pset.lon) assert np.allclose(plat, pset.lat) # Curvilinear S grid - dimensions['depth'] = depth_s - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, .4]) - pset.execute(AdvectionRK4, runtime=1.5, dt=.5) + dimensions["depth"] = depth_s + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + pset = ParticleSet(fieldset, ScipyParticle, [0, 0], [0, 0], [0, 0.4]) + pset.execute(AdvectionRK4, runtime=1.5, dt=0.5) assert np.allclose(plon, pset.lon) assert np.allclose(plat, pset.lat) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('direction', [1, -1]) -@pytest.mark.parametrize('time_extrapolation', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("direction", [1, -1]) +@pytest.mark.parametrize("time_extrapolation", [True, False]) def test_deferredload_simplefield(mode, direction, time_extrapolation, tmpdir, tdim=10): filename = tmpdir.join("simplefield_deferredload.nc") data = np.zeros((tdim, 2, 2)) for ti in range(tdim): - data[ti, :, :] = ti if direction == 1 else tdim-ti-1 - ds = xr.Dataset({"U": (("t", "y", "x"), data), "V": (("t", "y", "x"), data)}, - coords={"x": [0, 1], "y": [0, 1], "t": np.arange(tdim)}) + data[ti, :, :] = ti if direction == 1 else tdim - ti - 1 + ds = xr.Dataset( + {"U": (("t", "y", "x"), data), "V": (("t", "y", "x"), data)}, + coords={"x": [0, 1], "y": [0, 1], "t": np.arange(tdim)}, + ) ds.to_netcdf(filename) - fieldset = FieldSet.from_netcdf(filename, {'U': 'U', 'V': 'V'}, {'lon': 'x', 'lat': 'y', 'time': 't'}, - deferred_load=True, mesh='flat', allow_time_extrapolation=time_extrapolation) + fieldset = FieldSet.from_netcdf( + filename, + {"U": "U", "V": "V"}, + {"lon": "x", "lat": "y", "time": "t"}, + deferred_load=True, + mesh="flat", + allow_time_extrapolation=time_extrapolation, + ) SamplingParticle = ptype[mode].add_variable("p") pset = ParticleSet(fieldset, SamplingParticle, lon=0.5, lat=0.5) @@ -1086,20 +1207,20 @@ def test_deferredload_simplefield(mode, direction, time_extrapolation, tmpdir, t def SampleU(particle, fieldset, time): particle.p, tmp = fieldset.UV[particle] - runtime = tdim*2 if time_extrapolation else None + runtime = tdim * 2 if time_extrapolation else None pset.execute(SampleU, dt=direction, runtime=runtime) - assert pset.p == tdim-1 if time_extrapolation else tdim-2 + assert pset.p == tdim - 1 if time_extrapolation else tdim - 2 def test_daskfieldfilebuffer_dimnames(): - DaskFileBuffer.add_to_dimension_name_map_global({'lat': 'nydim', 'lon': 'nxdim'}) - fnameU = os.path.join(os.path.dirname(__file__), 'test_data', 'perlinfieldsU.nc') - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} + DaskFileBuffer.add_to_dimension_name_map_global({"lat": "nydim", "lon": "nxdim"}) + fnameU = os.path.join(os.path.dirname(__file__), "test_data", "perlinfieldsU.nc") + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} fb = DaskFileBuffer(fnameU, dimensions, indices={}) - assert ('nxdim' in fb._static_name_maps['lon']) and ('ntdim' not in fb._static_name_maps['time']) - fb.add_to_dimension_name_map({'time': 'ntdim', 'depth': 'nddim'}) - assert ('nxdim' in fb._static_name_maps['lon']) and ('ntdim' in fb._static_name_maps['time']) - assert fb._get_available_dims_indices_by_request() == {'time': None, 'depth': None, 'lat': 0, 'lon': 1} - assert fb._get_available_dims_indices_by_namemap() == {'time': 0, 'depth': 1, 'lat': 2, 'lon': 3} - assert fb._is_dimension_chunked('lon') is False - assert fb._is_dimension_in_chunksize_request('lon') == (-1, '', 0) + assert ("nxdim" in fb._static_name_maps["lon"]) and ("ntdim" not in fb._static_name_maps["time"]) + fb.add_to_dimension_name_map({"time": "ntdim", "depth": "nddim"}) + assert ("nxdim" in fb._static_name_maps["lon"]) and ("ntdim" in fb._static_name_maps["time"]) + assert fb._get_available_dims_indices_by_request() == {"time": None, "depth": None, "lat": 0, "lon": 1} + assert fb._get_available_dims_indices_by_namemap() == {"time": 0, "depth": 1, "lat": 2, "lon": 3} + assert fb._is_dimension_chunked("lon") is False + assert fb._is_dimension_in_chunksize_request("lon") == (-1, "", 0) diff --git a/tests/test_fieldset_sampling.py b/tests/test_fieldset_sampling.py index f232f961..5884f3fd 100644 --- a/tests/test_fieldset_sampling.py +++ b/tests/test_fieldset_sampling.py @@ -19,19 +19,19 @@ from parcels import ( Variable, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def pclass(mode): - return ptype[mode].add_variables([ - Variable('u', dtype=np.float32), - Variable('v', dtype=np.float32), - Variable('p', dtype=np.float32)]) + return ptype[mode].add_variables( + [Variable("u", dtype=np.float32), Variable("v", dtype=np.float32), Variable("p", dtype=np.float32)] + ) def k_sample_uv(): def SampleUV(particle, fieldset, time): (particle.u, particle.v) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] + return SampleUV @@ -42,7 +42,10 @@ def k_sample_uv_fixture(): def k_sample_uv_noconvert(): def SampleUVNoConvert(particle, fieldset, time): - (particle.u, particle.v) = fieldset.UV.eval(time, particle.depth, particle.lat, particle.lon, applyConversion=False) + (particle.u, particle.v) = fieldset.UV.eval( + time, particle.depth, particle.lat, particle.lon, applyConversion=False + ) + return SampleUVNoConvert @@ -54,6 +57,7 @@ def k_sample_uv_noconvert_fixture(): def k_sample_p(): def SampleP(particle, fieldset, time): particle.p = fieldset.P[particle] + return SampleP @@ -67,10 +71,10 @@ def fieldset(xdim=200, ydim=100): lon = np.linspace(-180, 180, xdim, dtype=np.float32) lat = np.linspace(-90, 90, ydim, dtype=np.float32) U, V = np.meshgrid(lat, lon) - data = {'U': U, 'V': V} - dimensions = {'lon': lon, 'lat': lat} + data = {"U": U, "V": V} + dimensions = {"lon": lon, "lat": lat} - return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + return FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) @pytest.fixture(name="fieldset") @@ -83,10 +87,10 @@ def fieldset_geometric(xdim=200, ydim=100): lon = np.linspace(-180, 180, xdim, dtype=np.float32) lat = np.linspace(-90, 90, ydim, dtype=np.float32) U, V = np.meshgrid(lat, lon) - U *= 1000. * 1.852 * 60. - V *= 1000. * 1.852 * 60. - data = {'U': U, 'V': V} - dimensions = {'lon': lon, 'lat': lat} + U *= 1000.0 * 1.852 * 60.0 + V *= 1000.0 * 1.852 * 60.0 + data = {"U": U, "V": V} + dimensions = {"lon": lon, "lat": lat} fieldset = FieldSet.from_data(data, dimensions, transpose=True) fieldset.U.units = Geographic() fieldset.V.units = Geographic() @@ -108,11 +112,11 @@ def fieldset_geometric_polar(xdim=200, ydim=100): # Apply inverse of pole correction to U for i, y in enumerate(lat): U[:, i] *= cos(y * pi / 180) - U *= 1000. * 1.852 * 60. - V *= 1000. * 1.852 * 60. - data = {'U': U, 'V': V} - dimensions = {'lon': lon, 'lat': lat} - return FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) + U *= 1000.0 * 1.852 * 60.0 + V *= 1000.0 * 1.852 * 60.0 + data = {"U": U, "V": V} + dimensions = {"lon": lon, "lat": lat} + return FieldSet.from_data(data, dimensions, mesh="spherical", transpose=True) @pytest.fixture(name="fieldset_geometric_polar") @@ -124,9 +128,11 @@ def test_fieldset_sample(fieldset, xdim=120, ydim=80): """Sample the fieldset using indexing notation.""" lon = np.linspace(-170, 170, xdim, dtype=np.float32) lat = np.linspace(-80, 80, ydim, dtype=np.float32) - v_s = np.array([fieldset.UV[0, 0., 70., x][1] for x in lon]) - u_s = np.array([fieldset.UV[0, 0., y, -45.][0] for y in lat]) - assert np.allclose(v_s, lon, rtol=1e-5) # Tolerances were rtol=1e-7, increased due to numpy v2 float32 changes (see #1603) + v_s = np.array([fieldset.UV[0, 0.0, 70.0, x][1] for x in lon]) + u_s = np.array([fieldset.UV[0, 0.0, y, -45.0][0] for y in lat]) + assert np.allclose( + v_s, lon, rtol=1e-5 + ) # Tolerances were rtol=1e-7, increased due to numpy v2 float32 changes (see #1603) assert np.allclose(u_s, lat, rtol=1e-5) @@ -134,160 +140,179 @@ def test_fieldset_sample_eval(fieldset, xdim=60, ydim=60): """Sample the fieldset using the explicit eval function.""" lon = np.linspace(-170, 170, xdim, dtype=np.float32) lat = np.linspace(-80, 80, ydim, dtype=np.float32) - v_s = np.array([fieldset.UV.eval(0, 0., 70., x)[1] for x in lon]) - u_s = np.array([fieldset.UV.eval(0, 0., y, 0.)[0] for y in lat]) - assert np.allclose(v_s, lon, rtol=1e-5) # Tolerances were rtol=1e-7, increased due to numpy v2 float32 changes (see #1603) + v_s = np.array([fieldset.UV.eval(0, 0.0, 70.0, x)[1] for x in lon]) + u_s = np.array([fieldset.UV.eval(0, 0.0, y, 0.0)[0] for y in lat]) + assert np.allclose( + v_s, lon, rtol=1e-5 + ) # Tolerances were rtol=1e-7, increased due to numpy v2 float32 changes (see #1603) assert np.allclose(u_s, lat, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_polar_with_halo(fieldset_geometric_polar, mode): fieldset_geometric_polar.add_periodic_halo(zonal=5) pset = ParticleSet(fieldset_geometric_polar, pclass=pclass(mode), lon=0, lat=0) pset.execute(runtime=1) - assert pset.lon[0] == 0. + assert pset.lon[0] == 0.0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('zdir', [-1, 1]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("zdir", [-1, 1]) def test_verticalsampling(mode, zdir): dims = (4, 2, 2) - dimensions = {'lon': np.linspace(0., 1., dims[2], dtype=np.float32), - 'lat': np.linspace(0., 1., dims[1], dtype=np.float32), - 'depth': np.linspace(0., 1*zdir, dims[0], dtype=np.float32)} - data = {'U': np.zeros(dims, dtype=np.float32), - 'V': np.zeros(dims, dtype=np.float32)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=0, lat=0, depth=0.7*zdir) - pset.execute(AdvectionRK4, dt=1., runtime=1.) + dimensions = { + "lon": np.linspace(0.0, 1.0, dims[2], dtype=np.float32), + "lat": np.linspace(0.0, 1.0, dims[1], dtype=np.float32), + "depth": np.linspace(0.0, 1 * zdir, dims[0], dtype=np.float32), + } + data = {"U": np.zeros(dims, dtype=np.float32), "V": np.zeros(dims, dtype=np.float32)} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=0, lat=0, depth=0.7 * zdir) + pset.execute(AdvectionRK4, dt=1.0, runtime=1.0) assert pset[0].zi == [2] -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_from_field(mode, xdim=10, ydim=20, npart=10000): np.random.seed(123456) - dimensions = {'lon': np.linspace(0., 1., xdim, dtype=np.float32), - 'lat': np.linspace(0., 1., ydim, dtype=np.float32)} + dimensions = { + "lon": np.linspace(0.0, 1.0, xdim, dtype=np.float32), + "lat": np.linspace(0.0, 1.0, ydim, dtype=np.float32), + } startfield = np.ones((xdim, ydim), dtype=np.float32) for x in range(xdim): startfield[x, :] = x - data = {'U': np.zeros((xdim, ydim), dtype=np.float32), - 'V': np.zeros((xdim, ydim), dtype=np.float32), - 'start': startfield} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + data = { + "U": np.zeros((xdim, ydim), dtype=np.float32), + "V": np.zeros((xdim, ydim), dtype=np.float32), + "start": startfield, + } + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) - densfield = Field(name='densfield', data=np.zeros((xdim+1, ydim+1), dtype=np.float32), - lon=np.linspace(-1./(xdim*2), 1.+1./(xdim*2), xdim+1, dtype=np.float32), - lat=np.linspace(-1./(ydim*2), 1.+1./(ydim*2), ydim+1, dtype=np.float32), transpose=True) + densfield = Field( + name="densfield", + data=np.zeros((xdim + 1, ydim + 1), dtype=np.float32), + lon=np.linspace(-1.0 / (xdim * 2), 1.0 + 1.0 / (xdim * 2), xdim + 1, dtype=np.float32), + lat=np.linspace(-1.0 / (ydim * 2), 1.0 + 1.0 / (ydim * 2), ydim + 1, dtype=np.float32), + transpose=True, + ) fieldset.add_field(densfield) pset = ParticleSet.from_field(fieldset, size=npart, pclass=pclass(mode), start_field=fieldset.start) - pdens = np.histogram2d(pset.lon, pset.lat, bins=[np.linspace(0., 1., xdim+1), np.linspace(0., 1., ydim+1)])[0] - assert np.allclose(pdens/sum(pdens.flatten()), startfield/sum(startfield.flatten()), atol=1e-2) + pdens = np.histogram2d(pset.lon, pset.lat, bins=[np.linspace(0.0, 1.0, xdim + 1), np.linspace(0.0, 1.0, ydim + 1)])[ + 0 + ] + assert np.allclose(pdens / sum(pdens.flatten()), startfield / sum(startfield.flatten()), atol=1e-2) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_nearest_neighbor_interpolation2D(mode, k_sample_p, npart=81): dims = (2, 2) - dimensions = {'lon': np.linspace(0., 1., dims[0], dtype=np.float32), - 'lat': np.linspace(0., 1., dims[1], dtype=np.float32)} - data = {'U': np.zeros(dims, dtype=np.float32), - 'V': np.zeros(dims, dtype=np.float32), - 'P': np.zeros(dims, dtype=np.float32)} - data['P'][0, 1] = 1. - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - fieldset.P.interp_method = 'nearest' - xv, yv = np.meshgrid(np.linspace(0., 1.0, int(np.sqrt(npart))), np.linspace(0., 1.0, int(np.sqrt(npart)))) + dimensions = { + "lon": np.linspace(0.0, 1.0, dims[0], dtype=np.float32), + "lat": np.linspace(0.0, 1.0, dims[1], dtype=np.float32), + } + data = { + "U": np.zeros(dims, dtype=np.float32), + "V": np.zeros(dims, dtype=np.float32), + "P": np.zeros(dims, dtype=np.float32), + } + data["P"][0, 1] = 1.0 + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + fieldset.P.interp_method = "nearest" + xv, yv = np.meshgrid(np.linspace(0.0, 1.0, int(np.sqrt(npart))), np.linspace(0.0, 1.0, int(np.sqrt(npart)))) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten()) pset.execute(k_sample_p, endtime=1, dt=1) assert np.allclose(pset.p[(pset.lon < 0.5) & (pset.lat > 0.5)], 1.0, rtol=1e-5) assert np.allclose(pset.p[(pset.lon > 0.5) | (pset.lat < 0.5)], 0.0, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_nearest_neighbor_interpolation3D(mode, k_sample_p, npart=81): dims = (2, 2, 2) - dimensions = {'lon': np.linspace(0., 1., dims[0], dtype=np.float32), - 'lat': np.linspace(0., 1., dims[1], dtype=np.float32), - 'depth': np.linspace(0., 1., dims[2], dtype=np.float32)} - data = {'U': np.zeros(dims, dtype=np.float32), - 'V': np.zeros(dims, dtype=np.float32), - 'P': np.zeros(dims, dtype=np.float32)} - data['P'][0, 1, 1] = 1. - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - fieldset.P.interp_method = 'nearest' + dimensions = { + "lon": np.linspace(0.0, 1.0, dims[0], dtype=np.float32), + "lat": np.linspace(0.0, 1.0, dims[1], dtype=np.float32), + "depth": np.linspace(0.0, 1.0, dims[2], dtype=np.float32), + } + data = { + "U": np.zeros(dims, dtype=np.float32), + "V": np.zeros(dims, dtype=np.float32), + "P": np.zeros(dims, dtype=np.float32), + } + data["P"][0, 1, 1] = 1.0 + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + fieldset.P.interp_method = "nearest" xv, yv = np.meshgrid(np.linspace(0, 1.0, int(np.sqrt(npart))), np.linspace(0, 1.0, int(np.sqrt(npart)))) # combine a pset at 0m with pset at 1m, as meshgrid does not do 3D - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), - depth=np.zeros(npart)) - pset2 = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), - depth=np.ones(npart)) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.zeros(npart)) + pset2 = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.ones(npart)) pset.add(pset2) pset.execute(k_sample_p, endtime=1, dt=1) assert np.allclose(pset.p[(pset.lon < 0.5) & (pset.lat > 0.5) & (pset.depth > 0.5)], 1.0, rtol=1e-5) assert np.allclose(pset.p[(pset.lon > 0.5) | (pset.lat < 0.5) & (pset.depth < 0.5)], 0.0, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('withDepth', [True, False]) -@pytest.mark.parametrize('arrtype', ['ones', 'rand']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("withDepth", [True, False]) +@pytest.mark.parametrize("arrtype", ["ones", "rand"]) def test_inversedistance_nearland(mode, withDepth, arrtype, k_sample_p, npart=81): dims = (4, 4, 6) if withDepth else (4, 6) - dimensions = {'lon': np.linspace(0., 1., dims[-1], dtype=np.float32), - 'lat': np.linspace(0., 1., dims[-2], dtype=np.float32)} + dimensions = { + "lon": np.linspace(0.0, 1.0, dims[-1], dtype=np.float32), + "lat": np.linspace(0.0, 1.0, dims[-2], dtype=np.float32), + } if withDepth: - dimensions['depth'] = np.linspace(0., 1., dims[0], dtype=np.float32) - P = np.random.rand(dims[0], dims[1], dims[2])+2 if arrtype == 'rand' else np.ones(dims, dtype=np.float32) + dimensions["depth"] = np.linspace(0.0, 1.0, dims[0], dtype=np.float32) + P = np.random.rand(dims[0], dims[1], dims[2]) + 2 if arrtype == "rand" else np.ones(dims, dtype=np.float32) P[1, 1:2, 1:6] = np.nan # setting some values to land (NaN) else: - P = np.random.rand(dims[0], dims[1])+2 if arrtype == 'rand' else np.ones(dims, dtype=np.float32) + P = np.random.rand(dims[0], dims[1]) + 2 if arrtype == "rand" else np.ones(dims, dtype=np.float32) P[1:2, 1:6] = np.nan # setting some values to land (NaN) - data = {'U': np.zeros(dims, dtype=np.float32), - 'V': np.zeros(dims, dtype=np.float32), - 'P': P} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - fieldset.P.interp_method = 'linear_invdist_land_tracer' + data = {"U": np.zeros(dims, dtype=np.float32), "V": np.zeros(dims, dtype=np.float32), "P": P} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + fieldset.P.interp_method = "linear_invdist_land_tracer" xv, yv = np.meshgrid(np.linspace(0.1, 0.9, int(np.sqrt(npart))), np.linspace(0.1, 0.9, int(np.sqrt(npart)))) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), - depth=np.zeros(npart)) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.zeros(npart)) if withDepth: - pset2 = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), - depth=np.ones(npart)) + pset2 = ParticleSet(fieldset, pclass=pclass(mode), lon=xv.flatten(), lat=yv.flatten(), depth=np.ones(npart)) pset.add(pset2) pset.execute(k_sample_p, endtime=1, dt=1) - if arrtype == 'rand': + if arrtype == "rand": assert np.all((pset.p > 2) & (pset.p < 3)) else: assert np.allclose(pset.p, 1.0, rtol=1e-5) success = False try: - fieldset.U.interp_method = 'linear_invdist_land_tracer' + fieldset.U.interp_method = "linear_invdist_land_tracer" fieldset.check_complete() except NotImplementedError: success = True assert success -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('boundaryslip', ['freeslip', 'partialslip']) -@pytest.mark.parametrize('withW', [False, True]) -@pytest.mark.parametrize('withT', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("boundaryslip", ["freeslip", "partialslip"]) +@pytest.mark.parametrize("withW", [False, True]) +@pytest.mark.parametrize("withT", [False, True]) def test_partialslip_nearland_zonal(mode, boundaryslip, withW, withT, npart=20): dims = (3, 9, 3) - U = 0.1*np.ones(dims, dtype=np.float32) + U = 0.1 * np.ones(dims, dtype=np.float32) U[:, 0, :] = np.nan U[:, -1, :] = np.nan V = np.zeros(dims, dtype=np.float32) V[:, 0, :] = np.nan V[:, -1, :] = np.nan - dimensions = {'lon': np.linspace(-10, 10, dims[2]), - 'lat': np.linspace(0., 4., dims[1], dtype=np.float32), - 'depth': np.linspace(-10, 10, dims[0])} + dimensions = { + "lon": np.linspace(-10, 10, dims[2]), + "lat": np.linspace(0.0, 4.0, dims[1], dtype=np.float32), + "depth": np.linspace(-10, 10, dims[0]), + } if withT: - dimensions['time'] = [0, 2] + dimensions["time"] = [0, 2] U = np.tile(U, (2, 1, 1, 1)) V = np.tile(V, (2, 1, 1, 1)) if withW: @@ -296,16 +321,17 @@ def test_partialslip_nearland_zonal(mode, boundaryslip, withW, withT, npart=20): W[:, -1, :] = np.nan if withT: W = np.tile(W, (2, 1, 1, 1)) - data = {'U': U, 'V': V, 'W': W} + data = {"U": U, "V": V, "W": W} else: - data = {'U': U, 'V': V} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', interp_method=boundaryslip) + data = {"U": U, "V": V} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", interp_method=boundaryslip) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=np.zeros(npart), - lat=np.linspace(0.1, 3.9, npart), depth=np.zeros(npart)) + pset = ParticleSet( + fieldset, pclass=pclass(mode), lon=np.zeros(npart), lat=np.linspace(0.1, 3.9, npart), depth=np.zeros(npart) + ) kernel = AdvectionRK4_3D if withW else AdvectionRK4 pset.execute(kernel, endtime=2, dt=1) - if boundaryslip == 'partialslip': + if boundaryslip == "partialslip": assert np.allclose([p.lon for p in pset if p.lat >= 0.5 and p.lat <= 3.5], 0.1) assert np.allclose([pset[0].lon, pset[-1].lon], 0.06) assert np.allclose([pset[1].lon, pset[-2].lon], 0.08) @@ -319,34 +345,35 @@ def test_partialslip_nearland_zonal(mode, boundaryslip, withW, withT, npart=20): assert np.allclose([p.depth for p in pset], 0.1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('boundaryslip', ['freeslip', 'partialslip']) -@pytest.mark.parametrize('withW', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("boundaryslip", ["freeslip", "partialslip"]) +@pytest.mark.parametrize("withW", [False, True]) def test_partialslip_nearland_meridional(mode, boundaryslip, withW, npart=20): dims = (1, 1, 9) U = np.zeros(dims, dtype=np.float32) U[:, :, 0] = np.nan U[:, :, -1] = np.nan - V = 0.1*np.ones(dims, dtype=np.float32) + V = 0.1 * np.ones(dims, dtype=np.float32) V[:, :, 0] = np.nan V[:, :, -1] = np.nan - dimensions = {'lon': np.linspace(0., 4., dims[2], dtype=np.float32), 'lat': 0, 'depth': 0} + dimensions = {"lon": np.linspace(0.0, 4.0, dims[2], dtype=np.float32), "lat": 0, "depth": 0} if withW: W = 0.1 * np.ones(dims, dtype=np.float32) W[:, :, 0] = np.nan W[:, :, -1] = np.nan - data = {'U': U, 'V': V, 'W': W} - interp_method = {'U': boundaryslip, 'V': boundaryslip, 'W': boundaryslip} + data = {"U": U, "V": V, "W": W} + interp_method = {"U": boundaryslip, "V": boundaryslip, "W": boundaryslip} else: - data = {'U': U, 'V': V} - interp_method = {'U': boundaryslip, 'V': boundaryslip} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', interp_method=interp_method) + data = {"U": U, "V": V} + interp_method = {"U": boundaryslip, "V": boundaryslip} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", interp_method=interp_method) - pset = ParticleSet(fieldset, pclass=pclass(mode), lat=np.zeros(npart), - lon=np.linspace(0.1, 3.9, npart), depth=np.zeros(npart)) + pset = ParticleSet( + fieldset, pclass=pclass(mode), lat=np.zeros(npart), lon=np.linspace(0.1, 3.9, npart), depth=np.zeros(npart) + ) kernel = AdvectionRK4_3D if withW else AdvectionRK4 pset.execute(kernel, endtime=2, dt=1) - if boundaryslip == 'partialslip': + if boundaryslip == "partialslip": assert np.allclose([p.lat for p in pset if p.lon >= 0.5 and p.lon <= 3.5], 0.1) assert np.allclose([pset[0].lat, pset[-1].lat], 0.06) assert np.allclose([pset[1].lat, pset[-2].lat], 0.08) @@ -360,24 +387,25 @@ def test_partialslip_nearland_meridional(mode, boundaryslip, withW, npart=20): assert np.allclose([p.depth for p in pset], 0.1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('boundaryslip', ['freeslip', 'partialslip']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("boundaryslip", ["freeslip", "partialslip"]) def test_partialslip_nearland_vertical(mode, boundaryslip, npart=20): dims = (9, 1, 1) - U = 0.1*np.ones(dims, dtype=np.float32) + U = 0.1 * np.ones(dims, dtype=np.float32) U[0, :, :] = np.nan U[-1, :, :] = np.nan - V = 0.1*np.ones(dims, dtype=np.float32) + V = 0.1 * np.ones(dims, dtype=np.float32) V[0, :, :] = np.nan V[-1, :, :] = np.nan - dimensions = {'lon': 0, 'lat': 0, 'depth': np.linspace(0., 4., dims[0], dtype=np.float32)} - data = {'U': U, 'V': V} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', interp_method={'U': boundaryslip, 'V': boundaryslip}) + dimensions = {"lon": 0, "lat": 0, "depth": np.linspace(0.0, 4.0, dims[0], dtype=np.float32)} + data = {"U": U, "V": V} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", interp_method={"U": boundaryslip, "V": boundaryslip}) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=np.zeros(npart), lat=np.zeros(npart), - depth=np.linspace(0.1, 3.9, npart)) + pset = ParticleSet( + fieldset, pclass=pclass(mode), lon=np.zeros(npart), lat=np.zeros(npart), depth=np.linspace(0.1, 3.9, npart) + ) pset.execute(AdvectionRK4, endtime=2, dt=1) - if boundaryslip == 'partialslip': + if boundaryslip == "partialslip": assert np.allclose([p.lon for p in pset if p.depth >= 0.5 and p.depth <= 3.5], 0.1) assert np.allclose([p.lat for p in pset if p.depth >= 0.5 and p.depth <= 3.5], 0.1) assert np.allclose([pset[0].lon, pset[-1].lon, pset[0].lat, pset[-1].lat], 0.06) @@ -387,8 +415,8 @@ def test_partialslip_nearland_vertical(mode, boundaryslip, npart=20): assert np.allclose([p.lat for p in pset], 0.1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('lat_flip', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("lat_flip", [False, True]) def test_fieldset_sample_particle(mode, k_sample_uv, lat_flip, npart=120): """Sample the fieldset using an array of particles. @@ -402,85 +430,86 @@ def test_fieldset_sample_particle(mode, k_sample_uv, lat_flip, npart=120): else: lat = np.linspace(-90, 90, 100, dtype=np.float32) U, V = np.meshgrid(lat, lon) - data = {'U': U, 'V': V} - dimensions = {'lon': lon, 'lat': lat} + data = {"U": U, "V": V} + dimensions = {"lon": lon, "lat": lat} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) lon = np.linspace(-170, 170, npart) lat = np.linspace(-80, 80, npart) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) assert np.allclose(pset.v, lon, rtol=1e-6) - pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) assert np.allclose(pset.u, lat, rtol=1e-6) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_sample_geographic(fieldset_geometric, mode, k_sample_uv, npart=120): """Sample a fieldset with conversion to geographic units (degrees).""" fieldset = fieldset_geometric lon = np.linspace(-170, 170, npart) lat = np.linspace(-80, 80, npart) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) assert np.allclose(pset.v, lon, rtol=1e-6) - pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) assert np.allclose(pset.u, lat, rtol=1e-6) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_sample_geographic_noconvert(fieldset_geometric, mode, k_sample_uv_noconvert, npart=120): """Sample a fieldset without conversion to geographic units.""" fieldset = fieldset_geometric lon = np.linspace(-170, 170, npart) lat = np.linspace(-80, 80, npart) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.) - pset.execute(pset.Kernel(k_sample_uv_noconvert), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.0) + pset.execute(pset.Kernel(k_sample_uv_noconvert), endtime=1.0, dt=1.0) assert np.allclose(pset.v, lon * 1000 * 1.852 * 60, rtol=1e-6) - pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.) - pset.execute(pset.Kernel(k_sample_uv_noconvert), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.0) + pset.execute(pset.Kernel(k_sample_uv_noconvert), endtime=1.0, dt=1.0) assert np.allclose(pset.u, lat * 1000 * 1.852 * 60, rtol=1e-6) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_fieldset_sample_geographic_polar(fieldset_geometric_polar, mode, k_sample_uv, npart=120): """Sample a fieldset with conversion to geographic units and a pole correction.""" fieldset = fieldset_geometric_polar lon = np.linspace(-170, 170, npart) lat = np.linspace(-80, 80, npart) - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lon, lat=np.zeros(npart) + 70.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) assert np.allclose(pset.v, lon, rtol=1e-6) - pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.) - pset.execute(pset.Kernel(k_sample_uv), endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=pclass(mode), lat=lat, lon=np.zeros(npart) - 45.0) + pset.execute(pset.Kernel(k_sample_uv), endtime=1.0, dt=1.0) # Note: 1.e-2 is a very low rtol, so there seems to be a rather # large sampling error for the JIT correction. assert np.allclose(pset.u, lat, rtol=1e-2) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_meridionalflow_spherical(mode, xdim=100, ydim=200): """Create uniform NORTHWARD flow on spherical earth and advect particles. As flow is so simple, it can be directly compared to analytical solution. """ - maxvel = 1. - dimensions = {'lon': np.linspace(-180, 180, xdim, dtype=np.float32), - 'lat': np.linspace(-90, 90, ydim, dtype=np.float32)} - data = {'U': np.zeros([xdim, ydim]), - 'V': maxvel * np.ones([xdim, ydim])} + maxvel = 1.0 + dimensions = { + "lon": np.linspace(-180, 180, xdim, dtype=np.float32), + "lat": np.linspace(-90, 90, ydim, dtype=np.float32), + } + data = {"U": np.zeros([xdim, ydim]), "V": maxvel * np.ones([xdim, ydim])} - fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) + fieldset = FieldSet.from_data(data, dimensions, mesh="spherical", transpose=True) lonstart = [0, 45] latstart = [0, 45] @@ -494,72 +523,80 @@ def test_meridionalflow_spherical(mode, xdim=100, ydim=200): assert pset.lon[1] - lonstart[1] < 1e-4 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_zonalflow_spherical(mode, k_sample_p, xdim=100, ydim=200): """Create uniform EASTWARD flow on spherical earth and advect particles. As flow is so simple, it can be directly compared to analytical solution Note that in this case the cosine conversion is needed """ - maxvel = 1. + maxvel = 1.0 p_fld = 10 - dimensions = {'lon': np.linspace(-180, 180, xdim, dtype=np.float32), - 'lat': np.linspace(-90, 90, ydim, dtype=np.float32)} - data = {'U': maxvel * np.ones([xdim, ydim]), - 'V': np.zeros([xdim, ydim]), - 'P': p_fld * np.ones([xdim, ydim])} + dimensions = { + "lon": np.linspace(-180, 180, xdim, dtype=np.float32), + "lat": np.linspace(-90, 90, ydim, dtype=np.float32), + } + data = {"U": maxvel * np.ones([xdim, ydim]), "V": np.zeros([xdim, ydim]), "P": p_fld * np.ones([xdim, ydim])} - fieldset = FieldSet.from_data(data, dimensions, mesh='spherical', transpose=True) + fieldset = FieldSet.from_data(data, dimensions, mesh="spherical", transpose=True) lonstart = [0, 45] latstart = [0, 45] runtime = timedelta(hours=24) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=lonstart, lat=latstart) - pset.execute(pset.Kernel(AdvectionRK4) + k_sample_p, - runtime=runtime, dt=timedelta(hours=1)) + pset.execute(pset.Kernel(AdvectionRK4) + k_sample_p, runtime=runtime, dt=timedelta(hours=1)) assert pset.lat[0] - latstart[0] < 1e-4 - assert pset.lon[0] - (lonstart[0] + runtime.total_seconds() * maxvel / 1852 / 60 - / cos(latstart[0] * pi / 180)) < 1e-4 + assert ( + pset.lon[0] - (lonstart[0] + runtime.total_seconds() * maxvel / 1852 / 60 / cos(latstart[0] * pi / 180)) < 1e-4 + ) assert abs(pset.p[0] - p_fld) < 1e-4 assert pset.lat[1] - latstart[1] < 1e-4 - assert pset.lon[1] - (lonstart[1] + runtime.total_seconds() * maxvel / 1852 / 60 - / cos(latstart[1] * pi / 180)) < 1e-4 + assert ( + pset.lon[1] - (lonstart[1] + runtime.total_seconds() * maxvel / 1852 / 60 / cos(latstart[1] * pi / 180)) < 1e-4 + ) assert abs(pset.p[1] - p_fld) < 1e-4 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_random_field(mode, k_sample_p, xdim=20, ydim=20, npart=100): """Sampling test that tests for overshoots by sampling a field of random numbers between 0 and 1.""" np.random.seed(123456) - dimensions = {'lon': np.linspace(0., 1., xdim, dtype=np.float32), - 'lat': np.linspace(0., 1., ydim, dtype=np.float32)} - data = {'U': np.zeros((xdim, ydim), dtype=np.float32), - 'V': np.zeros((xdim, ydim), dtype=np.float32), - 'P': np.random.uniform(0, 1., size=(xdim, ydim)), - 'start': np.ones((xdim, ydim), dtype=np.float32)} - - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) - pset = ParticleSet.from_field(fieldset, size=npart, pclass=pclass(mode), - start_field=fieldset.start) - pset.execute(k_sample_p, endtime=1., dt=1.0) + dimensions = { + "lon": np.linspace(0.0, 1.0, xdim, dtype=np.float32), + "lat": np.linspace(0.0, 1.0, ydim, dtype=np.float32), + } + data = { + "U": np.zeros((xdim, ydim), dtype=np.float32), + "V": np.zeros((xdim, ydim), dtype=np.float32), + "P": np.random.uniform(0, 1.0, size=(xdim, ydim)), + "start": np.ones((xdim, ydim), dtype=np.float32), + } + + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) + pset = ParticleSet.from_field(fieldset, size=npart, pclass=pclass(mode), start_field=fieldset.start) + pset.execute(k_sample_p, endtime=1.0, dt=1.0) sampled = pset.p - assert (sampled >= 0.).all() - - -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('allow_time_extrapolation', [True, False]) -def test_sampling_out_of_bounds_time(mode, allow_time_extrapolation, k_sample_p, - xdim=10, ydim=10, tdim=10): - dimensions = {'lon': np.linspace(0., 1., xdim, dtype=np.float32), - 'lat': np.linspace(0., 1., ydim, dtype=np.float32), - 'time': np.linspace(0., 1., tdim, dtype=np.float64)} - data = {'U': np.zeros((xdim, ydim, tdim), dtype=np.float32), - 'V': np.zeros((xdim, ydim, tdim), dtype=np.float32), - 'P': np.ones((xdim, ydim, 1), dtype=np.float32) * dimensions['time']} - - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', - allow_time_extrapolation=allow_time_extrapolation, transpose=True) + assert (sampled >= 0.0).all() + + +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("allow_time_extrapolation", [True, False]) +def test_sampling_out_of_bounds_time(mode, allow_time_extrapolation, k_sample_p, xdim=10, ydim=10, tdim=10): + dimensions = { + "lon": np.linspace(0.0, 1.0, xdim, dtype=np.float32), + "lat": np.linspace(0.0, 1.0, ydim, dtype=np.float32), + "time": np.linspace(0.0, 1.0, tdim, dtype=np.float64), + } + data = { + "U": np.zeros((xdim, ydim, tdim), dtype=np.float32), + "V": np.zeros((xdim, ydim, tdim), dtype=np.float32), + "P": np.ones((xdim, ydim, 1), dtype=np.float32) * dimensions["time"], + } + + fieldset = FieldSet.from_data( + data, dimensions, mesh="flat", allow_time_extrapolation=allow_time_extrapolation, transpose=True + ) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0.5], lat=[0.5], time=-1.0) if allow_time_extrapolation: pset.execute(k_sample_p, endtime=-0.9, dt=0.1) @@ -589,45 +626,61 @@ def test_sampling_out_of_bounds_time(mode, allow_time_extrapolation, k_sample_p, pset.execute(k_sample_p, runtime=0.1, dt=0.1) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) -@pytest.mark.parametrize('npart', [1, 10]) -@pytest.mark.parametrize('chs', [False, 'auto', {'lat': ('y', 10), 'lon': ('x', 10)}]) -def test_sampling_multigrids_non_vectorfield_from_file(mode, npart, tmpdir, chs, filename='test_subsets'): +@pytest.mark.parametrize("mode", ["jit", "scipy"]) +@pytest.mark.parametrize("npart", [1, 10]) +@pytest.mark.parametrize("chs", [False, "auto", {"lat": ("y", 10), "lon": ("x", 10)}]) +def test_sampling_multigrids_non_vectorfield_from_file(mode, npart, tmpdir, chs, filename="test_subsets"): xdim, ydim = 100, 200 filepath = tmpdir.join(filename) - U = Field('U', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - B = Field('B', np.ones((3*ydim, 4*xdim), dtype=np.float32), - lon=np.linspace(0., 1., 4*xdim, dtype=np.float32), - lat=np.linspace(0., 1., 3*ydim, dtype=np.float32)) + U = Field( + "U", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + V = Field( + "V", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + B = Field( + "B", + np.ones((3 * ydim, 4 * xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, 4 * xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, 3 * ydim, dtype=np.float32), + ) fieldset = FieldSet(U, V) - fieldset.add_field(B, 'B') + fieldset.add_field(B, "B") fieldset.write(filepath) fieldset = None - ufiles = [filepath+'U.nc', ] * 4 - vfiles = [filepath+'V.nc', ] * 4 - bfiles = [filepath+'B.nc', ] * 4 + ufiles = [ + filepath + "U.nc", + ] * 4 + vfiles = [ + filepath + "V.nc", + ] * 4 + bfiles = [ + filepath + "B.nc", + ] * 4 timestamps = np.arange(0, 4, 1) * 86400.0 timestamps = np.expand_dims(timestamps, 1) - files = {'U': ufiles, 'V': vfiles, 'B': bfiles} - variables = {'U': 'vozocrtx', 'V': 'vomecrty', 'B': 'B'} - dimensions = {'lon': 'nav_lon', 'lat': 'nav_lat'} - fieldset = FieldSet.from_netcdf(files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, - chunksize=chs) - - fieldset.add_constant('sample_depth', 2.5) - if chs == 'auto': + files = {"U": ufiles, "V": vfiles, "B": bfiles} + variables = {"U": "vozocrtx", "V": "vomecrty", "B": "B"} + dimensions = {"lon": "nav_lon", "lat": "nav_lat"} + fieldset = FieldSet.from_netcdf( + files, variables, dimensions, timestamps=timestamps, allow_time_extrapolation=True, chunksize=chs + ) + + fieldset.add_constant("sample_depth", 2.5) + if chs == "auto": assert fieldset.U.grid != fieldset.V.grid else: assert fieldset.U.grid is fieldset.V.grid assert fieldset.U.grid is not fieldset.B.grid - TestParticle = ptype[mode].add_variable('sample_var', initial=0.) + TestParticle = ptype[mode].add_variable("sample_var", initial=0.0) pset = ParticleSet.from_line(fieldset, pclass=TestParticle, start=[0.3, 0.3], finish=[0.7, 0.7], size=npart) @@ -637,7 +690,7 @@ def test_sampling_multigrids_non_vectorfield_from_file(mode, npart, tmpdir, chs, kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(test_sample) pset.execute(kernels, runtime=10, dt=1) assert np.allclose(pset.sample_var, 10.0) - if mode == 'jit': + if mode == "jit": assert len(pset.xi.shape) == 2 assert pset.xi.shape[0] == len(pset.lon) assert pset.xi.shape[1] == fieldset.gridset.size @@ -651,26 +704,35 @@ def test_sampling_multigrids_non_vectorfield_from_file(mode, npart, tmpdir, chs, assert np.all(pset.yi[:, 0] < ydim) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) -@pytest.mark.parametrize('npart', [1, 10]) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) +@pytest.mark.parametrize("npart", [1, 10]) def test_sampling_multigrids_non_vectorfield(mode, npart): xdim, ydim = 100, 200 - U = Field('U', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - B = Field('B', np.ones((3*ydim, 4*xdim), dtype=np.float32), - lon=np.linspace(0., 1., 4*xdim, dtype=np.float32), - lat=np.linspace(0., 1., 3*ydim, dtype=np.float32)) + U = Field( + "U", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + V = Field( + "V", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + B = Field( + "B", + np.ones((3 * ydim, 4 * xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, 4 * xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, 3 * ydim, dtype=np.float32), + ) fieldset = FieldSet(U, V) - fieldset.add_field(B, 'B') - fieldset.add_constant('sample_depth', 2.5) + fieldset.add_field(B, "B") + fieldset.add_constant("sample_depth", 2.5) assert fieldset.U.grid is fieldset.V.grid assert fieldset.U.grid is not fieldset.B.grid - TestParticle = ptype[mode].add_variable('sample_var', initial=0.) + TestParticle = ptype[mode].add_variable("sample_var", initial=0.0) pset = ParticleSet.from_line(fieldset, pclass=TestParticle, start=[0.3, 0.3], finish=[0.7, 0.7], size=npart) @@ -680,7 +742,7 @@ def test_sampling_multigrids_non_vectorfield(mode, npart): kernels = pset.Kernel(AdvectionRK4) + pset.Kernel(test_sample) pset.execute(kernels, runtime=10, dt=1) assert np.allclose(pset.sample_var, 10.0) - if mode == 'jit': + if mode == "jit": assert len(pset.xi.shape) == 2 assert pset.xi.shape[0] == len(pset.lon) assert pset.xi.shape[1] == fieldset.gridset.size @@ -694,16 +756,22 @@ def test_sampling_multigrids_non_vectorfield(mode, npart): assert np.all(pset.yi[:, 0] < ydim) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) -@pytest.mark.parametrize('ugridfactor', [1, 10]) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) +@pytest.mark.parametrize("ugridfactor", [1, 10]) def test_sampling_multiple_grid_sizes(mode, ugridfactor): xdim, ydim = 10, 20 - U = Field('U', np.zeros((ydim*ugridfactor, xdim*ugridfactor), dtype=np.float32), - lon=np.linspace(0., 1., xdim*ugridfactor, dtype=np.float32), - lat=np.linspace(0., 1., ydim*ugridfactor, dtype=np.float32)) - V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) + U = Field( + "U", + np.zeros((ydim * ugridfactor, xdim * ugridfactor), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim * ugridfactor, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim * ugridfactor, dtype=np.float32), + ) + V = Field( + "V", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) fieldset = FieldSet(U, V) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0.8], lat=[0.9]) @@ -713,24 +781,33 @@ def test_sampling_multiple_grid_sizes(mode, ugridfactor): assert fieldset.U.grid is fieldset.V.grid pset.execute(AdvectionRK4, runtime=10, dt=1) assert np.isclose(pset.lon[0], 0.8) - assert np.all((0 <= pset.xi) & (pset.xi < xdim*ugridfactor)) + assert np.all((0 <= pset.xi) & (pset.xi < xdim * ugridfactor)) def test_multiple_grid_addlater_error(): xdim, ydim = 10, 20 - U = Field('U', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - V = Field('V', np.zeros((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) + U = Field( + "U", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + V = Field( + "V", + np.zeros((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) fieldset = FieldSet(U, V) - pset = ParticleSet(fieldset, pclass=pclass('jit'), lon=[0.8], lat=[0.9]) # noqa ; to trigger fieldset.check_complete + pset = ParticleSet(fieldset, pclass=pclass("jit"), lon=[0.8], lat=[0.9]) # noqa ; to trigger fieldset.check_complete - P = Field('P', np.zeros((ydim*10, xdim*10), dtype=np.float32), - lon=np.linspace(0., 1., xdim*10, dtype=np.float32), - lat=np.linspace(0., 1., ydim*10, dtype=np.float32)) + P = Field( + "P", + np.zeros((ydim * 10, xdim * 10), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim * 10, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim * 10, dtype=np.float32), + ) fail = False try: @@ -740,34 +817,52 @@ def test_multiple_grid_addlater_error(): assert fail -@pytest.mark.parametrize('mode', ['jit', 'scipy']) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) def test_nestedfields(mode, k_sample_p): xdim = 10 ydim = 20 - U1 = Field('U1', 0.1*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - V1 = Field('V1', 0.2*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - U2 = Field('U2', 0.3*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 2., xdim, dtype=np.float32), - lat=np.linspace(0., 2., ydim, dtype=np.float32)) - V2 = Field('V2', 0.4*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 2., xdim, dtype=np.float32), - lat=np.linspace(0., 2., ydim, dtype=np.float32)) - U = NestedField('U', [U1, U2]) - V = NestedField('V', [V1, V2]) + U1 = Field( + "U1", + 0.1 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + V1 = Field( + "V1", + 0.2 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + U2 = Field( + "U2", + 0.3 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 2.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 2.0, ydim, dtype=np.float32), + ) + V2 = Field( + "V2", + 0.4 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 2.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 2.0, ydim, dtype=np.float32), + ) + U = NestedField("U", [U1, U2]) + V = NestedField("V", [V1, V2]) fieldset = FieldSet(U, V) - P1 = Field('P1', 0.1*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 1., xdim, dtype=np.float32), - lat=np.linspace(0., 1., ydim, dtype=np.float32)) - P2 = Field('P2', 0.2*np.ones((ydim, xdim), dtype=np.float32), - lon=np.linspace(0., 2., xdim, dtype=np.float32), - lat=np.linspace(0., 2., ydim, dtype=np.float32)) - P = NestedField('P', [P1, P2]) + P1 = Field( + "P1", + 0.1 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 1.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 1.0, ydim, dtype=np.float32), + ) + P2 = Field( + "P2", + 0.2 * np.ones((ydim, xdim), dtype=np.float32), + lon=np.linspace(0.0, 2.0, xdim, dtype=np.float32), + lat=np.linspace(0.0, 2.0, ydim, dtype=np.float32), + ) + P = NestedField("P", [P1, P2]) fieldset.add_field(P) def Recover(particle, fieldset, time): @@ -780,22 +875,22 @@ def test_nestedfields(mode, k_sample_p): particle.p = 999 particle.state = StatusCode.Evaluate - pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0], lat=[.3]) - pset.execute(AdvectionRK4+pset.Kernel(k_sample_p), runtime=2, dt=1) - assert np.isclose(pset.lat[0], .5) - assert np.isclose(pset.p[0], .1) + pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0], lat=[0.3]) + pset.execute(AdvectionRK4 + pset.Kernel(k_sample_p), runtime=2, dt=1) + assert np.isclose(pset.lat[0], 0.5) + assert np.isclose(pset.p[0], 0.1) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0], lat=[1.1]) - pset.execute(AdvectionRK4+pset.Kernel(k_sample_p), runtime=2, dt=1) + pset.execute(AdvectionRK4 + pset.Kernel(k_sample_p), runtime=2, dt=1) assert np.isclose(pset.lat[0], 1.5) - assert np.isclose(pset.p[0], .2) + assert np.isclose(pset.p[0], 0.2) pset = ParticleSet(fieldset, pclass=pclass(mode), lon=[0], lat=[2.3]) pset.execute(pset.Kernel(AdvectionRK4) + k_sample_p + Recover, runtime=1, dt=1) assert np.isclose(pset.lat[0], 0) assert np.isclose(pset.p[0], 999) - assert np.allclose(fieldset.UV[0][0, 0, 0, 0], [.1, .2]) + assert np.allclose(fieldset.UV[0][0, 0, 0, 0], [0.1, 0.2]) -@pytest.mark.parametrize('mode', ['jit', 'scipy']) +@pytest.mark.parametrize("mode", ["jit", "scipy"]) def fieldset_sampling_updating_order(mode, tmpdir): def calc_p(t, y, x): return 10 * t + x + 0.2 * y @@ -835,4 +930,7 @@ def fieldset_sampling_updating_order(mode, tmpdir): ds = xr.open_zarr(filename) for t in range(len(ds["obs"])): for i in range(len(ds["trajectory"])): - assert np.isclose(ds["p"].values[i, t], calc_p(float(ds["time"].values[i, t]) / 1e9, ds["lat"].values[i, t], ds["lon"].values[i, t])) + assert np.isclose( + ds["p"].values[i, t], + calc_p(float(ds["time"].values[i, t]) / 1e9, ds["lat"].values[i, t], ds["lon"].values[i, t]), + ) diff --git a/tests/test_grids.py b/tests/test_grids.py index 3a64f03d..6669a7fd 100644 --- a/tests/test_grids.py +++ b/tests/test_grids.py @@ -22,14 +22,13 @@ from parcels import ( Variable, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_multi_structured_grids(mode): - def temp_func(lon, lat): - return 20 + lat/1000. + 2 * np.sin(lon*2*np.pi/5000.) + return 20 + lat / 1000.0 + 2 * np.sin(lon * 2 * np.pi / 5000.0) a = 10000 b = 10000 @@ -40,7 +39,7 @@ def test_multi_structured_grids(mode): # Coordinates of the test fieldset (on A-grid in deg) lon_g0 = np.linspace(0, a, xdim_g0, dtype=np.float32) lat_g0 = np.linspace(0, b, ydim_g0, dtype=np.float32) - time_g0 = np.linspace(0., 1000., 2, dtype=np.float64) + time_g0 = np.linspace(0.0, 1000.0, 2, dtype=np.float64) grid_0 = RectilinearZGrid(lon_g0, lat_g0, time=time_g0) # Grid 1 @@ -49,31 +48,31 @@ def test_multi_structured_grids(mode): # Coordinates of the test fieldset (on A-grid in deg) lon_g1 = np.linspace(0, a, xdim_g1, dtype=np.float32) lat_g1 = np.linspace(0, b, ydim_g1, dtype=np.float32) - time_g1 = np.linspace(0., 1000., 2, dtype=np.float64) + time_g1 = np.linspace(0.0, 1000.0, 2, dtype=np.float64) grid_1 = RectilinearZGrid(lon_g1, lat_g1, time=time_g1) u_data = np.ones((lon_g0.size, lat_g0.size, time_g0.size), dtype=np.float32) - u_data = 2*u_data - u_field = Field('U', u_data, grid=grid_0, transpose=True) + u_data = 2 * u_data + u_field = Field("U", u_data, grid=grid_0, transpose=True) temp0_data = np.empty((lon_g0.size, lat_g0.size, time_g0.size), dtype=np.float32) for i in range(lon_g0.size): for j in range(lat_g0.size): temp0_data[i, j, :] = temp_func(lon_g0[i], lat_g0[j]) - temp0_field = Field('temp0', temp0_data, grid=grid_0, transpose=True) + temp0_field = Field("temp0", temp0_data, grid=grid_0, transpose=True) v_data = np.zeros((lon_g1.size, lat_g1.size, time_g1.size), dtype=np.float32) - v_field = Field('V', v_data, grid=grid_1, transpose=True) + v_field = Field("V", v_data, grid=grid_1, transpose=True) temp1_data = np.empty((lon_g1.size, lat_g1.size, time_g1.size), dtype=np.float32) for i in range(lon_g1.size): for j in range(lat_g1.size): temp1_data[i, j, :] = temp_func(lon_g1[i], lat_g1[j]) - temp1_field = Field('temp1', temp1_data, grid=grid_1, transpose=True) + temp1_field = Field("temp1", temp1_data, grid=grid_1, transpose=True) other_fields = {} - other_fields['temp0'] = temp0_field - other_fields['temp1'] = temp1_field + other_fields["temp0"] = temp0_field + other_fields["temp1"] = temp1_field fieldset = FieldSet(u_field, v_field, fields=other_fields) @@ -81,12 +80,12 @@ def test_multi_structured_grids(mode): # Note that fieldset.temp is interpolated at time=time+dt. # Indeed, sampleTemp is called at time=time, but the result is written # at time=time+dt, after the Kernel update - particle.temp0 = fieldset.temp0[time+particle.dt, particle.depth, particle.lat, particle.lon] - particle.temp1 = fieldset.temp1[time+particle.dt, particle.depth, particle.lat, particle.lon] + particle.temp0 = fieldset.temp0[time + particle.dt, particle.depth, particle.lat, particle.lon] + particle.temp1 = fieldset.temp1[time + particle.dt, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('temp0', dtype=np.float32, initial=20.), - Variable('temp1', dtype=np.float32, initial=20.)]) + MyParticle = ptype[mode].add_variables( + [Variable("temp0", dtype=np.float32, initial=20.0), Variable("temp1", dtype=np.float32, initial=20.0)] + ) pset = ParticleSet.from_list(fieldset, MyParticle, lon=[3001], lat=[5001], repeatdt=1) @@ -109,12 +108,11 @@ def test_multi_structured_grids(mode): def test_time_format_in_grid(): lon = np.linspace(0, 1, 2, dtype=np.float32) lat = np.linspace(0, 1, 2, dtype=np.float32) - time = np.array([np.datetime64('2000-01-01')]*2) + time = np.array([np.datetime64("2000-01-01")] * 2) RectilinearZGrid(lon, lat, time=time) def test_avoid_repeated_grids(): - lon_g0 = np.linspace(0, 1000, 11, dtype=np.float32) lat_g0 = np.linspace(0, 1000, 11, dtype=np.float32) time_g0 = np.linspace(0, 1000, 2, dtype=np.float64) @@ -126,15 +124,15 @@ def test_avoid_repeated_grids(): grid_1 = RectilinearZGrid(lon_g1, lat_g1, time=time_g1) u_data = np.zeros((lon_g0.size, lat_g0.size, time_g0.size), dtype=np.float32) - u_field = Field('U', u_data, grid=grid_0, transpose=True) + u_field = Field("U", u_data, grid=grid_0, transpose=True) v_data = np.zeros((lon_g1.size, lat_g1.size, time_g1.size), dtype=np.float32) - v_field = Field('V', v_data, grid=grid_1, transpose=True) + v_field = Field("V", v_data, grid=grid_1, transpose=True) - temp0_field = Field('temp', u_data, lon=lon_g0, lat=lat_g0, time=time_g0, transpose=True) + temp0_field = Field("temp", u_data, lon=lon_g0, lat=lat_g0, time=time_g0, transpose=True) other_fields = {} - other_fields['temp'] = temp0_field + other_fields["temp"] = temp0_field fieldset = FieldSet(u_field, v_field, fields=other_fields) assert fieldset.gridset.size == 2 @@ -142,20 +140,21 @@ def test_avoid_repeated_grids(): assert fieldset.V.grid is not fieldset.U.grid -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_multigrids_pointer(mode): lon_g0 = np.linspace(0, 1e4, 21, dtype=np.float32) lat_g0 = np.linspace(0, 1000, 2, dtype=np.float32) depth_g0 = np.zeros((5, lat_g0.size, lon_g0.size), dtype=np.float32) def bath_func(lon): - return lon / 1000. + 10 + return lon / 1000.0 + 10 + bath = bath_func(lon_g0) zdim = depth_g0.shape[0] for i in range(lon_g0.size): for k in range(zdim): - depth_g0[k, :, i] = bath[i] * k / (zdim-1) + depth_g0[k, :, i] = bath[i] * k / (zdim - 1) grid_0 = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0) grid_1 = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0) @@ -164,11 +163,11 @@ def test_multigrids_pointer(mode): v_data = np.zeros((zdim, lat_g0.size, lon_g0.size), dtype=np.float32) w_data = np.zeros((zdim, lat_g0.size, lon_g0.size), dtype=np.float32) - u_field = Field('U', u_data, grid=grid_0) - v_field = Field('V', v_data, grid=grid_0) - w_field = Field('W', w_data, grid=grid_1) + u_field = Field("U", u_data, grid=grid_0) + v_field = Field("V", v_data, grid=grid_0) + w_field = Field("W", w_data, grid=grid_1) - fieldset = FieldSet(u_field, v_field, fields={'W': w_field}) + fieldset = FieldSet(u_field, v_field, fields={"W": w_field}) fieldset.add_periodic_halo(zonal=3, meridional=2) # unit test of halo for SGrid assert u_field.grid == v_field.grid @@ -180,8 +179,8 @@ def test_multigrids_pointer(mode): pset.execute(AdvectionRK4_3D, runtime=1000, dt=500) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('z4d', ['True', 'False']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("z4d", ["True", "False"]) def test_rectilinear_s_grid_sampling(mode, z4d): lon_g0 = np.linspace(-3e4, 3e4, 61, dtype=np.float32) lat_g0 = np.linspace(0, 1000, 2, dtype=np.float32) @@ -192,19 +191,20 @@ def test_rectilinear_s_grid_sampling(mode, z4d): depth_g0 = np.zeros((5, lat_g0.size, lon_g0.size), dtype=np.float32) def bath_func(lon): - bath = (lon <= -2e4) * 20. - bath += (lon > -2e4) * (lon < 2e4) * (110. + 90 * np.sin(lon/2e4 * np.pi/2.)) - bath += (lon >= 2e4) * 200. + bath = (lon <= -2e4) * 20.0 + bath += (lon > -2e4) * (lon < 2e4) * (110.0 + 90 * np.sin(lon / 2e4 * np.pi / 2.0)) + bath += (lon >= 2e4) * 200.0 return bath + bath = bath_func(lon_g0) zdim = depth_g0.shape[-3] for i in range(depth_g0.shape[-1]): for k in range(zdim): if z4d: - depth_g0[:, k, :, i] = bath[i] * k / (zdim-1) + depth_g0[:, k, :, i] = bath[i] * k / (zdim - 1) else: - depth_g0[k, :, i] = bath[i] * k / (zdim-1) + depth_g0[k, :, i] = bath[i] * k / (zdim - 1) grid = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0, time=time_g0) @@ -212,31 +212,30 @@ def test_rectilinear_s_grid_sampling(mode, z4d): v_data = np.zeros((grid.tdim, grid.zdim, grid.ydim, grid.xdim), dtype=np.float32) temp_data = np.zeros((grid.tdim, grid.zdim, grid.ydim, grid.xdim), dtype=np.float32) for k in range(1, zdim): - temp_data[:, k, :, :] = k / (zdim-1.) - u_field = Field('U', u_data, grid=grid) - v_field = Field('V', v_data, grid=grid) - temp_field = Field('temp', temp_data, grid=grid) + temp_data[:, k, :, :] = k / (zdim - 1.0) + u_field = Field("U", u_data, grid=grid) + v_field = Field("V", v_data, grid=grid) + temp_field = Field("temp", temp_data, grid=grid) other_fields = {} - other_fields['temp'] = temp_field + other_fields["temp"] = temp_field fieldset = FieldSet(u_field, v_field, fields=other_fields) def sampleTemp(particle, fieldset, time): particle.temp = fieldset.temp[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variable('temp', dtype=np.float32, initial=20.) + MyParticle = ptype[mode].add_variable("temp", dtype=np.float32, initial=20.0) lon = 400 lat = 0 - ratio = .3 - pset = ParticleSet.from_list(fieldset, MyParticle, - lon=[lon], lat=[lat], depth=[bath_func(lon)*ratio]) + ratio = 0.3 + pset = ParticleSet.from_list(fieldset, MyParticle, lon=[lon], lat=[lat], depth=[bath_func(lon) * ratio]) pset.execute(pset.Kernel(sampleTemp), runtime=1) assert np.allclose(pset.temp[0], ratio, atol=1e-4) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_rectilinear_s_grids_advect1(mode): # Constant water transport towards the east. check that the particle stays at the same relative depth (z/bath) lon_g0 = np.linspace(0, 1e4, 21, dtype=np.float32) @@ -244,12 +243,13 @@ def test_rectilinear_s_grids_advect1(mode): depth_g0 = np.zeros((lon_g0.size, lat_g0.size, 5), dtype=np.float32) def bath_func(lon): - return lon / 1000. + 10 + return lon / 1000.0 + 10 + bath = bath_func(lon_g0) for i in range(depth_g0.shape[0]): for k in range(depth_g0.shape[2]): - depth_g0[i, :, k] = bath[i] * k / (depth_g0.shape[2]-1) + depth_g0[i, :, k] = bath[i] * k / (depth_g0.shape[2] - 1) depth_g0 = depth_g0.transpose() # we don't change it on purpose, to check if the transpose op if fixed in jit grid = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0) @@ -263,23 +263,23 @@ def test_rectilinear_s_grids_advect1(mode): for k in range(zdim): w_data[k, :, i] = u_data[k, :, i] * depth_g0[k, :, i] / bath[i] * 1e-3 - u_field = Field('U', u_data, grid=grid) - v_field = Field('V', v_data, grid=grid) - w_field = Field('W', w_data, grid=grid) + u_field = Field("U", u_data, grid=grid) + v_field = Field("V", v_data, grid=grid) + w_field = Field("W", w_data, grid=grid) - fieldset = FieldSet(u_field, v_field, fields={'W': w_field}) + fieldset = FieldSet(u_field, v_field, fields={"W": w_field}) lon = np.zeros(11) lat = np.zeros(11) - ratio = [min(i/10., .99) for i in range(11)] - depth = bath_func(lon)*ratio + ratio = [min(i / 10.0, 0.99) for i in range(11)] + depth = bath_func(lon) * ratio pset = ParticleSet.from_list(fieldset, ptype[mode], lon=lon, lat=lat, depth=depth) pset.execute(AdvectionRK4_3D, runtime=10000, dt=500) - assert np.allclose(pset.depth/bath_func(pset.lon), ratio) + assert np.allclose(pset.depth / bath_func(pset.lon), ratio) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_rectilinear_s_grids_advect2(mode): # Move particle towards the east, check relative depth evolution lon_g0 = np.linspace(0, 1e4, 21, dtype=np.float32) @@ -287,13 +287,14 @@ def test_rectilinear_s_grids_advect2(mode): depth_g0 = np.zeros((5, lat_g0.size, lon_g0.size), dtype=np.float32) def bath_func(lon): - return lon / 1000. + 10 + return lon / 1000.0 + 10 + bath = bath_func(lon_g0) zdim = depth_g0.shape[0] for i in range(lon_g0.size): for k in range(zdim): - depth_g0[k, :, i] = bath[i] * k / (zdim-1) + depth_g0[k, :, i] = bath[i] * k / (zdim - 1) grid = RectilinearSGrid(lon_g0, lat_g0, depth=depth_g0) @@ -301,38 +302,37 @@ def test_rectilinear_s_grids_advect2(mode): v_data = np.zeros((zdim, lat_g0.size, lon_g0.size), dtype=np.float32) rel_depth_data = np.zeros((zdim, lat_g0.size, lon_g0.size), dtype=np.float32) for k in range(1, zdim): - rel_depth_data[k, :, :] = k / (zdim-1.) + rel_depth_data[k, :, :] = k / (zdim - 1.0) - u_field = Field('U', u_data, grid=grid) - v_field = Field('V', v_data, grid=grid) - rel_depth_field = Field('relDepth', rel_depth_data, grid=grid) - fieldset = FieldSet(u_field, v_field, fields={'relDepth': rel_depth_field}) + u_field = Field("U", u_data, grid=grid) + v_field = Field("V", v_data, grid=grid) + rel_depth_field = Field("relDepth", rel_depth_data, grid=grid) + fieldset = FieldSet(u_field, v_field, fields={"relDepth": rel_depth_field}) - MyParticle = ptype[mode].add_variable('relDepth', dtype=np.float32, initial=20.) + MyParticle = ptype[mode].add_variable("relDepth", dtype=np.float32, initial=20.0) def moveEast(particle, fieldset, time): particle_dlon += 5 * particle.dt # noqa particle.relDepth = fieldset.relDepth[time, particle.depth, particle.lat, particle.lon] - depth = .9 + depth = 0.9 pset = ParticleSet.from_list(fieldset, MyParticle, lon=[0], lat=[0], depth=[depth]) kernel = pset.Kernel(moveEast) for _ in range(10): pset.execute(kernel, runtime=100, dt=50) - assert np.allclose(pset.relDepth[0], depth/bath_func(pset.lon[0])) + assert np.allclose(pset.relDepth[0], depth / bath_func(pset.lon[0])) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_curvilinear_grids(mode): - x = np.linspace(0, 1e3, 7, dtype=np.float32) y = np.linspace(0, 1e3, 5, dtype=np.float32) (xx, yy) = np.meshgrid(x, y) - r = np.sqrt(xx*xx+yy*yy) + r = np.sqrt(xx * xx + yy * yy) theta = np.arctan2(yy, xx) - theta = theta + np.pi/6. + theta = theta + np.pi / 6.0 lon = r * np.cos(theta) lat = r * np.sin(theta) @@ -342,33 +342,39 @@ def test_curvilinear_grids(mode): u_data = np.ones((2, y.size, x.size), dtype=np.float32) v_data = np.zeros((2, y.size, x.size), dtype=np.float32) u_data[0, :, :] = lon[:, :] + lat[:, :] - u_field = Field('U', u_data, grid=grid, transpose=False) - v_field = Field('V', v_data, grid=grid, transpose=False) + u_field = Field("U", u_data, grid=grid, transpose=False) + v_field = Field("V", v_data, grid=grid, transpose=False) fieldset = FieldSet(u_field, v_field) def sampleSpeed(particle, fieldset, time): u, v = fieldset.UV[time, particle.depth, particle.lat, particle.lon] - particle.speed = math.sqrt(u*u+v*v) + particle.speed = math.sqrt(u * u + v * v) - MyParticle = ptype[mode].add_variable('speed', dtype=np.float32, initial=0.) + MyParticle = ptype[mode].add_variable("speed", dtype=np.float32, initial=0.0) pset = ParticleSet.from_list(fieldset, MyParticle, lon=[400, -200], lat=[600, 600]) pset.execute(pset.Kernel(sampleSpeed), runtime=1) assert np.allclose(pset.speed[0], 1000) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_nemo_grid(mode): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') - - filenames = {'U': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc'}, - 'V': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Vv_eastward_nemo_cross_180lon.nc'}} - variables = {'U': 'U', 'V': 'V'} - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + data_path = os.path.join(os.path.dirname(__file__), "test_data/") + + filenames = { + "U": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", + }, + "V": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Vv_eastward_nemo_cross_180lon.nc", + }, + } + variables = {"U": "U", "V": "V"} + dimensions = {"lon": "glamf", "lat": "gphif"} fieldset = FieldSet.from_nemo(filenames, variables, dimensions) # test ParticleSet.from_field on curvilinear grids @@ -377,9 +383,9 @@ def test_nemo_grid(mode): def sampleVel(particle, fieldset, time): (particle.zonal, particle.meridional) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('zonal', dtype=np.float32, initial=0.), - Variable('meridional', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [Variable("zonal", dtype=np.float32, initial=0.0), Variable("meridional", dtype=np.float32, initial=0.0)] + ) lonp = 175.5 latp = 81.5 @@ -391,18 +397,24 @@ def test_nemo_grid(mode): assert abs(v) < 1e-4 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_advect_nemo(mode): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') - - filenames = {'U': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Uu_eastward_nemo_cross_180lon.nc'}, - 'V': {'lon': data_path + 'mask_nemo_cross_180lon.nc', - 'lat': data_path + 'mask_nemo_cross_180lon.nc', - 'data': data_path + 'Vv_eastward_nemo_cross_180lon.nc'}} - variables = {'U': 'U', 'V': 'V'} - dimensions = {'lon': 'glamf', 'lat': 'gphif'} + data_path = os.path.join(os.path.dirname(__file__), "test_data/") + + filenames = { + "U": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Uu_eastward_nemo_cross_180lon.nc", + }, + "V": { + "lon": data_path + "mask_nemo_cross_180lon.nc", + "lat": data_path + "mask_nemo_cross_180lon.nc", + "data": data_path + "Vv_eastward_nemo_cross_180lon.nc", + }, + } + variables = {"U": "U", "V": "V"} + dimensions = {"lon": "glamf", "lat": "gphif"} fieldset = FieldSet.from_nemo(filenames, variables, dimensions) lonp = 175.5 @@ -412,153 +424,153 @@ def test_advect_nemo(mode): assert abs(pset.lat[0] - latp) < 1e-3 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('time', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("time", [True, False]) def test_cgrid_uniform_2dvel(mode, time): - lon = np.array([[0, 2], [.4, 1.5]]) - lat = np.array([[0, -.5], [.8, .5]]) - U = np.array([[-99, -99], [4.4721359549995793e-01, 1.3416407864998738e+00]]) - V = np.array([[-99, 1.2126781251816650e+00], [-99, 1.2278812270298409e+00]]) + lon = np.array([[0, 2], [0.4, 1.5]]) + lat = np.array([[0, -0.5], [0.8, 0.5]]) + U = np.array([[-99, -99], [4.4721359549995793e-01, 1.3416407864998738e00]]) + V = np.array([[-99, 1.2126781251816650e00], [-99, 1.2278812270298409e00]]) if time: U = np.stack((U, U)) V = np.stack((V, V)) - dimensions = {'lat': lat, 'lon': lon, 'time': np.array([0, 10])} + dimensions = {"lat": lat, "lon": lon, "time": np.array([0, 10])} else: - dimensions = {'lat': lat, 'lon': lon} - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' + dimensions = {"lat": lat, "lon": lon} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" def sampleVel(particle, fieldset, time): (particle.zonal, particle.meridional) = fieldset.UV[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('zonal', dtype=np.float32, initial=0.), - Variable('meridional', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [Variable("zonal", dtype=np.float32, initial=0.0), Variable("meridional", dtype=np.float32, initial=0.0)] + ) - pset = ParticleSet.from_list(fieldset, MyParticle, lon=.7, lat=.3) + pset = ParticleSet.from_list(fieldset, MyParticle, lon=0.7, lat=0.3) pset.execute(pset.Kernel(sampleVel), runtime=1) assert (pset[0].zonal - 1) < 1e-6 assert (pset[0].meridional - 1) < 1e-6 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('vert_mode', ['zlev', 'slev1', 'slev2']) -@pytest.mark.parametrize('time', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("vert_mode", ["zlev", "slev1", "slev2"]) +@pytest.mark.parametrize("time", [True, False]) def test_cgrid_uniform_3dvel(mode, vert_mode, time): - - lon = np.array([[0, 2], [.4, 1.5]]) - lat = np.array([[0, -.5], [.8, .5]]) + lon = np.array([[0, 2], [0.4, 1.5]]) + lat = np.array([[0, -0.5], [0.8, 0.5]]) u0 = 4.4721359549995793e-01 - u1 = 1.3416407864998738e+00 - v0 = 1.2126781251816650e+00 - v1 = 1.2278812270298409e+00 + u1 = 1.3416407864998738e00 + v0 = 1.2126781251816650e00 + v1 = 1.2278812270298409e00 w0 = 1 w1 = 1 - if vert_mode == 'zlev': + if vert_mode == "zlev": depth = np.array([0, 1]) - elif vert_mode == 'slev1': + elif vert_mode == "slev1": depth = np.array([[[0, 0], [0, 0]], [[1, 1], [1, 1]]]) - elif vert_mode == 'slev2': - depth = np.array([[[-1, -.6], [-1.1257142857142859, -.9]], - [[1, 1.5], [0.50857142857142845, .8]]]) - w0 = 1.0483007922296661e+00 - w1 = 1.3098951476312375e+00 - - U = np.array([[[-99, -99], [u0, u1]], - [[-99, -99], [-99, -99]]]) - V = np.array([[[-99, v0], [-99, v1]], - [[-99, -99], [-99, -99]]]) - W = np.array([[[-99, -99], [-99, w0]], - [[-99, -99], [-99, w1]]]) + elif vert_mode == "slev2": + depth = np.array([[[-1, -0.6], [-1.1257142857142859, -0.9]], [[1, 1.5], [0.50857142857142845, 0.8]]]) + w0 = 1.0483007922296661e00 + w1 = 1.3098951476312375e00 + + U = np.array([[[-99, -99], [u0, u1]], [[-99, -99], [-99, -99]]]) + V = np.array([[[-99, v0], [-99, v1]], [[-99, -99], [-99, -99]]]) + W = np.array([[[-99, -99], [-99, w0]], [[-99, -99], [-99, w1]]]) if time: U = np.stack((U, U)) V = np.stack((V, V)) W = np.stack((W, W)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': np.array([0, 10])} + dimensions = {"lat": lat, "lon": lon, "depth": depth, "time": np.array([0, 10])} else: - dimensions = {'lat': lat, 'lon': lon, 'depth': depth} - data = {'U': np.array(U, dtype=np.float32), - 'V': np.array(V, dtype=np.float32), - 'W': np.array(W, dtype=np.float32)} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' - fieldset.W.interp_method = 'cgrid_velocity' + dimensions = {"lat": lat, "lon": lon, "depth": depth} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32), "W": np.array(W, dtype=np.float32)} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" + fieldset.W.interp_method = "cgrid_velocity" def sampleVel(particle, fieldset, time): - (particle.zonal, particle.meridional, particle.vertical) = fieldset.UVW[time, particle.depth, particle.lat, particle.lon] - - MyParticle = ptype[mode].add_variables([ - Variable('zonal', dtype=np.float32, initial=0.), - Variable('meridional', dtype=np.float32, initial=0.), - Variable('vertical', dtype=np.float32, initial=0.)]) - - pset = ParticleSet.from_list(fieldset, MyParticle, lon=.7, lat=.3, depth=.2) + (particle.zonal, particle.meridional, particle.vertical) = fieldset.UVW[ + time, particle.depth, particle.lat, particle.lon + ] + + MyParticle = ptype[mode].add_variables( + [ + Variable("zonal", dtype=np.float32, initial=0.0), + Variable("meridional", dtype=np.float32, initial=0.0), + Variable("vertical", dtype=np.float32, initial=0.0), + ] + ) + + pset = ParticleSet.from_list(fieldset, MyParticle, lon=0.7, lat=0.3, depth=0.2) pset.execute(pset.Kernel(sampleVel), runtime=1) assert abs(pset[0].zonal - 1) < 1e-6 assert abs(pset[0].meridional - 1) < 1e-6 assert abs(pset[0].vertical - 1) < 1e-6 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('vert_mode', ['zlev', 'slev1']) -@pytest.mark.parametrize('time', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("vert_mode", ["zlev", "slev1"]) +@pytest.mark.parametrize("time", [True, False]) def test_cgrid_uniform_3dvel_spherical(mode, vert_mode, time): - data_path = os.path.join(os.path.dirname(__file__), 'test_data/') - dim_file = xr.open_dataset(data_path + 'mask_nemo_cross_180lon.nc') - u_file = xr.open_dataset(data_path + 'Uu_eastward_nemo_cross_180lon.nc') - v_file = xr.open_dataset(data_path + 'Vv_eastward_nemo_cross_180lon.nc') + data_path = os.path.join(os.path.dirname(__file__), "test_data/") + dim_file = xr.open_dataset(data_path + "mask_nemo_cross_180lon.nc") + u_file = xr.open_dataset(data_path + "Uu_eastward_nemo_cross_180lon.nc") + v_file = xr.open_dataset(data_path + "Vv_eastward_nemo_cross_180lon.nc") j = 4 i = 11 - lon = np.array(dim_file.glamf[0, j:j+2, i:i+2]) - lat = np.array(dim_file.gphif[0, j:j+2, i:i+2]) - U = np.array(u_file.U[0, j:j+2, i:i+2]) - V = np.array(v_file.V[0, j:j+2, i:i+2]) + lon = np.array(dim_file.glamf[0, j : j + 2, i : i + 2]) + lat = np.array(dim_file.gphif[0, j : j + 2, i : i + 2]) + U = np.array(u_file.U[0, j : j + 2, i : i + 2]) + V = np.array(v_file.V[0, j : j + 2, i : i + 2]) trash = np.zeros((2, 2)) U = np.stack((U, trash)) V = np.stack((V, trash)) w0 = 1 w1 = 1 - W = np.array([[[-99, -99], [-99, w0]], - [[-99, -99], [-99, w1]]]) + W = np.array([[[-99, -99], [-99, w0]], [[-99, -99], [-99, w1]]]) - if vert_mode == 'zlev': + if vert_mode == "zlev": depth = np.array([0, 1]) - elif vert_mode == 'slev1': + elif vert_mode == "slev1": depth = np.array([[[0, 0], [0, 0]], [[1, 1], [1, 1]]]) if time: U = np.stack((U, U)) V = np.stack((V, V)) W = np.stack((W, W)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': np.array([0, 10])} + dimensions = {"lat": lat, "lon": lon, "depth": depth, "time": np.array([0, 10])} else: - dimensions = {'lat': lat, 'lon': lon, 'depth': depth} - data = {'U': np.array(U, dtype=np.float32), - 'V': np.array(V, dtype=np.float32), - 'W': np.array(W, dtype=np.float32)} - fieldset = FieldSet.from_data(data, dimensions, mesh='spherical') - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' - fieldset.W.interp_method = 'cgrid_velocity' + dimensions = {"lat": lat, "lon": lon, "depth": depth} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32), "W": np.array(W, dtype=np.float32)} + fieldset = FieldSet.from_data(data, dimensions, mesh="spherical") + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" + fieldset.W.interp_method = "cgrid_velocity" def sampleVel(particle, fieldset, time): - (particle.zonal, particle.meridional, particle.vertical) = fieldset.UVW[time, particle.depth, particle.lat, particle.lon] - - MyParticle = ptype[mode].add_variables([ - Variable('zonal', dtype=np.float32, initial=0.), - Variable('meridional', dtype=np.float32, initial=0.), - Variable('vertical', dtype=np.float32, initial=0.)]) + (particle.zonal, particle.meridional, particle.vertical) = fieldset.UVW[ + time, particle.depth, particle.lat, particle.lon + ] + + MyParticle = ptype[mode].add_variables( + [ + Variable("zonal", dtype=np.float32, initial=0.0), + Variable("meridional", dtype=np.float32, initial=0.0), + Variable("vertical", dtype=np.float32, initial=0.0), + ] + ) lonp = 179.8 latp = 81.35 - pset = ParticleSet.from_list(fieldset, MyParticle, lon=lonp, lat=latp, depth=.2) + pset = ParticleSet.from_list(fieldset, MyParticle, lon=lonp, lat=latp, depth=0.2) pset.execute(pset.Kernel(sampleVel), runtime=1) pset.zonal[0] = fieldset.U.units.to_source(pset.zonal[0], lonp, latp, 0) pset.meridional[0] = fieldset.V.units.to_source(pset.meridional[0], lonp, latp, 0) @@ -567,26 +579,23 @@ def test_cgrid_uniform_3dvel_spherical(mode, vert_mode, time): assert abs(pset[0].vertical - 1) < 1e-3 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('vert_discretisation', ['zlevel', 'slevel', 'slevel2']) -@pytest.mark.parametrize('deferred_load', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("vert_discretisation", ["zlevel", "slevel", "slevel2"]) +@pytest.mark.parametrize("deferred_load", [True, False]) def test_popgrid(mode, vert_discretisation, deferred_load): - mesh = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'POPtestdata_time.nc') - if vert_discretisation == 'zlevel': - w_dep = 'w_dep' - elif vert_discretisation == 'slevel': - w_dep = 'w_deps' # same as zlevel, but defined as slevel - elif vert_discretisation == 'slevel2': - w_dep = 'w_deps2' # contains shaved cells + mesh = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "POPtestdata_time.nc") + if vert_discretisation == "zlevel": + w_dep = "w_dep" + elif vert_discretisation == "slevel": + w_dep = "w_deps" # same as zlevel, but defined as slevel + elif vert_discretisation == "slevel2": + w_dep = "w_deps2" # contains shaved cells filenames = mesh - variables = {'U': 'U', - 'V': 'V', - 'W': 'W', - 'T': 'T'} - dimensions = {'lon': 'lon', 'lat': 'lat', 'depth': w_dep, 'time': 'time'} + variables = {"U": "U", "V": "V", "W": "W", "T": "T"} + dimensions = {"lon": "lon", "lat": "lat", "depth": w_dep, "time": "time"} - fieldset = FieldSet.from_pop(filenames, variables, dimensions, mesh='flat', deferred_load=deferred_load) + fieldset = FieldSet.from_pop(filenames, variables, dimensions, mesh="flat", deferred_load=deferred_load) def sampleVel(particle, fieldset, time): (particle.zonal, particle.meridional, particle.vert) = fieldset.UVW[particle] @@ -598,22 +607,25 @@ def test_popgrid(mode, vert_discretisation, deferred_load): particle_ddepth -= 3 # noqa particle.state = StatusCode.Success - MyParticle = ptype[mode].add_variables([ - Variable('zonal', dtype=np.float32, initial=0.), - Variable('meridional', dtype=np.float32, initial=0.), - Variable('vert', dtype=np.float32, initial=0.), - Variable('tracer', dtype=np.float32, initial=0.), - Variable('out_of_bounds', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [ + Variable("zonal", dtype=np.float32, initial=0.0), + Variable("meridional", dtype=np.float32, initial=0.0), + Variable("vert", dtype=np.float32, initial=0.0), + Variable("tracer", dtype=np.float32, initial=0.0), + Variable("out_of_bounds", dtype=np.float32, initial=0.0), + ] + ) pset = ParticleSet.from_list(fieldset, MyParticle, lon=[3, 5, 1], lat=[3, 5, 1], depth=[3, 7, 11]) pset.execute(pset.Kernel(sampleVel) + OutBoundsError, runtime=1) - if vert_discretisation == 'slevel2': - assert np.isclose(pset.vert[0], 0.) - assert np.isclose(pset.zonal[0], 0.) - assert np.isclose(pset.tracer[0], 99.) + if vert_discretisation == "slevel2": + assert np.isclose(pset.vert[0], 0.0) + assert np.isclose(pset.zonal[0], 0.0) + assert np.isclose(pset.tracer[0], 99.0) assert np.isclose(pset.vert[1], -0.0066666666) - assert np.isclose(pset.zonal[1], .015) - assert np.isclose(pset.tracer[1], 1.) + assert np.isclose(pset.zonal[1], 0.015) + assert np.isclose(pset.tracer[1], 1.0) assert pset.out_of_bounds[0] == 0 assert pset.out_of_bounds[1] == 0 assert pset.out_of_bounds[2] == 1 @@ -624,9 +636,9 @@ def test_popgrid(mode, vert_discretisation, deferred_load): assert np.allclose(pset.tracer, 1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('gridindexingtype', ['mitgcm', 'nemo']) -@pytest.mark.parametrize('coordtype', ['rectilinear', 'curvilinear']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("gridindexingtype", ["mitgcm", "nemo"]) +@pytest.mark.parametrize("coordtype", ["rectilinear", "curvilinear"]) def test_cgrid_indexing(mode, gridindexingtype, coordtype): xdim, ydim = 151, 201 a = b = 20000 # domain size @@ -635,26 +647,26 @@ def test_cgrid_indexing(mode, gridindexingtype, coordtype): dx, dy = lon[2] - lon[1], lat[2] - lat[1] omega = 2 * np.pi / timedelta(days=1).total_seconds() - index_signs = {'nemo': -1, 'mitgcm': 1} + index_signs = {"nemo": -1, "mitgcm": 1} isign = index_signs[gridindexingtype] def rotate_coords(lon, lat, alpha=0): - rotmat = np.array([[np.cos(alpha), np.sin(alpha)], - [-np.sin(alpha), np.cos(alpha)]]) + rotmat = np.array([[np.cos(alpha), np.sin(alpha)], [-np.sin(alpha), np.cos(alpha)]]) lons, lats = np.meshgrid(lon, lat) - rotated = np.einsum('ji, mni -> jmn', rotmat, np.dstack([lons, lats])) + rotated = np.einsum("ji, mni -> jmn", rotmat, np.dstack([lons, lats])) return rotated[0], rotated[1] - if coordtype == 'rectilinear': + if coordtype == "rectilinear": alpha = 0 - elif coordtype == 'curvilinear': - alpha = 15*np.pi/180 + elif coordtype == "curvilinear": + alpha = 15 * np.pi / 180 lon, lat = rotate_coords(lon, lat, alpha) def calc_r_phi(ln, lt): - return np.sqrt(ln ** 2 + lt ** 2), np.arctan2(ln, lt) + return np.sqrt(ln**2 + lt**2), np.arctan2(ln, lt) + + if coordtype == "rectilinear": - if coordtype == 'rectilinear': def calculate_UVR(lat, lon, dx, dy, omega, alpha): U = np.zeros((lat.size, lon.size), dtype=np.float32) V = np.zeros((lat.size, lon.size), dtype=np.float32) @@ -668,7 +680,8 @@ def test_cgrid_indexing(mode, gridindexingtype, coordtype): r, phi = calc_r_phi(lon[i], lat[j] + isign * dy / 2) U[j, i] = omega * r * np.cos(phi) return U, V, R - elif coordtype == 'curvilinear': + elif coordtype == "curvilinear": + def calculate_UVR(lat, lon, dx, dy, omega, alpha): U = np.zeros(lat.shape, dtype=np.float32) V = np.zeros(lat.shape, dtype=np.float32) @@ -677,39 +690,42 @@ def test_cgrid_indexing(mode, gridindexingtype, coordtype): for j in range(lat.shape[0]): r, phi = calc_r_phi(lon[j, i], lat[j, i]) R[j, i] = r - r, phi = calc_r_phi(lon[j, i] + isign * (dx / 2) * np.cos(alpha), lat[j, i] - isign * (dx / 2) * np.sin(alpha)) + r, phi = calc_r_phi( + lon[j, i] + isign * (dx / 2) * np.cos(alpha), lat[j, i] - isign * (dx / 2) * np.sin(alpha) + ) V[j, i] = np.sin(alpha) * (omega * r * np.cos(phi)) + np.cos(alpha) * (-omega * r * np.sin(phi)) - r, phi = calc_r_phi(lon[j, i] + isign * (dy / 2) * np.sin(alpha), lat[j, i] + isign * (dy / 2) * np.cos(alpha)) + r, phi = calc_r_phi( + lon[j, i] + isign * (dy / 2) * np.sin(alpha), lat[j, i] + isign * (dy / 2) * np.cos(alpha) + ) U[j, i] = np.cos(alpha) * (omega * r * np.cos(phi)) - np.sin(alpha) * (-omega * r * np.sin(phi)) return U, V, R U, V, R = calculate_UVR(lat, lon, dx, dy, omega, alpha) - data = {'U': U, 'V': V, 'R': R} - dimensions = {'lon': lon, 'lat': lat} - fieldset = FieldSet.from_data(data, dimensions, mesh='flat', gridindexingtype=gridindexingtype) - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' + data = {"U": U, "V": V, "R": R} + dimensions = {"lon": lon, "lat": lat} + fieldset = FieldSet.from_data(data, dimensions, mesh="flat", gridindexingtype=gridindexingtype) + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" def UpdateR(particle, fieldset, time): if time == 0: particle.radius_start = fieldset.R[time, particle.depth, particle.lat, particle.lon] particle.radius = fieldset.R[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('radius', dtype=np.float32, initial=0.), - Variable('radius_start', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [Variable("radius", dtype=np.float32, initial=0.0), Variable("radius_start", dtype=np.float32, initial=0.0)] + ) pset = ParticleSet(fieldset, pclass=MyParticle, lon=0, lat=4e3, time=0) - pset.execute(pset.Kernel(UpdateR) + AdvectionRK4, - runtime=timedelta(hours=14), dt=timedelta(minutes=5)) + pset.execute(pset.Kernel(UpdateR) + AdvectionRK4, runtime=timedelta(hours=14), dt=timedelta(minutes=5)) assert np.allclose(pset.radius, pset.radius_start, atol=10) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('gridindexingtype', ['mitgcm', 'nemo']) -@pytest.mark.parametrize('withtime', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("gridindexingtype", ["mitgcm", "nemo"]) +@pytest.mark.parametrize("withtime", [False, True]) def test_cgrid_indexing_3D(mode, gridindexingtype, withtime): xdim = zdim = 201 ydim = 2 @@ -721,20 +737,20 @@ def test_cgrid_indexing_3D(mode, gridindexingtype, withtime): dx, dz = lon[1] - lon[0], depth[1] - depth[0] omega = 2 * np.pi / timedelta(days=1).total_seconds() if withtime: - time = np.linspace(0, 24*60*60, 10) + time = np.linspace(0, 24 * 60 * 60, 10) dimensions = {"lon": lon, "lat": lat, "depth": depth, "time": time} dsize = (time.size, depth.size, lat.size, lon.size) else: dimensions = {"lon": lon, "lat": lat, "depth": depth} dsize = (depth.size, lat.size, lon.size) - hindex_signs = {'nemo': -1, 'mitgcm': 1} + hindex_signs = {"nemo": -1, "mitgcm": 1} hsign = hindex_signs[gridindexingtype] def calc_r_phi(ln, dp): # r = np.sqrt(ln ** 2 + dp ** 2) # phi = np.arcsin(dp/r) if r > 0 else 0 - return np.sqrt(ln ** 2 + dp ** 2), np.arctan2(ln, dp) + return np.sqrt(ln**2 + dp**2), np.arctan2(ln, dp) def populate_UVWR(lat, lon, depth, dx, dz, omega): U = np.zeros(dsize, dtype=np.float32) @@ -774,20 +790,19 @@ def test_cgrid_indexing_3D(mode, gridindexingtype, withtime): particle.radius_start = fieldset.R[time, particle.depth, particle.lat, particle.lon] particle.radius = fieldset.R[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('radius', dtype=np.float32, initial=0.), - Variable('radius_start', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [Variable("radius", dtype=np.float32, initial=0.0), Variable("radius_start", dtype=np.float32, initial=0.0)] + ) pset = ParticleSet(fieldset, pclass=MyParticle, depth=4e3, lon=0, lat=0, time=0) - pset.execute(pset.Kernel(UpdateR) + AdvectionRK4_3D, - runtime=timedelta(hours=14), dt=timedelta(minutes=5)) + pset.execute(pset.Kernel(UpdateR) + AdvectionRK4_3D, runtime=timedelta(hours=14), dt=timedelta(minutes=5)) assert np.allclose(pset.radius, pset.radius_start, atol=10) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('gridindexingtype', ['pop', 'mom5']) -@pytest.mark.parametrize('withtime', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("gridindexingtype", ["pop", "mom5"]) +@pytest.mark.parametrize("withtime", [False, True]) def test_bgrid_indexing_3D(mode, gridindexingtype, withtime): xdim = zdim = 201 ydim = 2 @@ -799,18 +814,18 @@ def test_bgrid_indexing_3D(mode, gridindexingtype, withtime): dx, dz = lon[1] - lon[0], depth[1] - depth[0] omega = 2 * np.pi / timedelta(days=1).total_seconds() if withtime: - time = np.linspace(0, 24*60*60, 10) + time = np.linspace(0, 24 * 60 * 60, 10) dimensions = {"lon": lon, "lat": lat, "depth": depth, "time": time} dsize = (time.size, depth.size, lat.size, lon.size) else: dimensions = {"lon": lon, "lat": lat, "depth": depth} dsize = (depth.size, lat.size, lon.size) - vindex_signs = {'pop': 1, 'mom5': -1} + vindex_signs = {"pop": 1, "mom5": -1} vsign = vindex_signs[gridindexingtype] def calc_r_phi(ln, dp): - return np.sqrt(ln ** 2 + dp ** 2), np.arctan2(ln, dp) + return np.sqrt(ln**2 + dp**2), np.arctan2(ln, dp) def populate_UVWR(lat, lon, depth, dx, dz, omega): U = np.zeros(dsize, dtype=np.float32) @@ -853,40 +868,43 @@ def test_bgrid_indexing_3D(mode, gridindexingtype, withtime): particle.radius_start = fieldset.R[time, particle.depth, particle.lat, particle.lon] particle.radius = fieldset.R[time, particle.depth, particle.lat, particle.lon] - MyParticle = ptype[mode].add_variables([ - Variable('radius', dtype=np.float32, initial=0.), - Variable('radius_start', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [Variable("radius", dtype=np.float32, initial=0.0), Variable("radius_start", dtype=np.float32, initial=0.0)] + ) pset = ParticleSet(fieldset, pclass=MyParticle, depth=-9.995e3, lon=0, lat=0, time=0) - pset.execute(pset.Kernel(UpdateR) + AdvectionRK4_3D, - runtime=timedelta(hours=14), dt=timedelta(minutes=5)) + pset.execute(pset.Kernel(UpdateR) + AdvectionRK4_3D, runtime=timedelta(hours=14), dt=timedelta(minutes=5)) assert np.allclose(pset.radius, pset.radius_start, atol=10) -@pytest.mark.parametrize('gridindexingtype', ['pop', 'mom5']) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('extrapolation', [True, False]) +@pytest.mark.parametrize("gridindexingtype", ["pop", "mom5"]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("extrapolation", [True, False]) def test_bgrid_interpolation(gridindexingtype, mode, extrapolation): xi, yi = 3, 2 if extrapolation: - zi = 0 if gridindexingtype == 'mom5' else -1 + zi = 0 if gridindexingtype == "mom5" else -1 else: zi = 2 - if gridindexingtype == 'mom5': - ufile = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'access-om2-01_u.nc') - vfile = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'access-om2-01_v.nc') - wfile = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'access-om2-01_wt.nc') + if gridindexingtype == "mom5": + ufile = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "access-om2-01_u.nc") + vfile = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "access-om2-01_v.nc") + wfile = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "access-om2-01_wt.nc") - filenames = {"U": {"lon": ufile, "lat": ufile, "depth": wfile, "data": ufile}, - "V": {"lon": ufile, "lat": ufile, "depth": wfile, "data": vfile}, - "W": {"lon": ufile, "lat": ufile, "depth": wfile, "data": wfile}} + filenames = { + "U": {"lon": ufile, "lat": ufile, "depth": wfile, "data": ufile}, + "V": {"lon": ufile, "lat": ufile, "depth": wfile, "data": vfile}, + "W": {"lon": ufile, "lat": ufile, "depth": wfile, "data": wfile}, + } variables = {"U": "u", "V": "v", "W": "wt"} - dimensions = {"U": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}, - "V": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}, - "W": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}} + dimensions = { + "U": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}, + "V": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}, + "W": {"lon": "xu_ocean", "lat": "yu_ocean", "depth": "sw_ocean", "time": "time"}, + } fieldset = FieldSet.from_mom5(filenames, variables, dimensions) ds_u = xr.open_dataset(ufile) @@ -896,15 +914,17 @@ def test_bgrid_interpolation(gridindexingtype, mode, extrapolation): v = ds_v.v.isel(time=0, st_ocean=zi, yu_ocean=yi, xu_ocean=xi) w = ds_w.wt.isel(time=0, sw_ocean=zi, yt_ocean=yi, xt_ocean=xi) - elif gridindexingtype == 'pop': - datafname = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'popdata.nc') - coordfname = os.path.join(os.path.join(os.path.dirname(__file__), 'test_data'), 'popcoordinates.nc') - filenames = {"U": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}, - "V": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}, - "W": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}} + elif gridindexingtype == "pop": + datafname = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "popdata.nc") + coordfname = os.path.join(os.path.join(os.path.dirname(__file__), "test_data"), "popcoordinates.nc") + filenames = { + "U": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}, + "V": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}, + "W": {"lon": coordfname, "lat": coordfname, "depth": coordfname, "data": datafname}, + } - variables = {'U': 'UVEL', 'V': 'VVEL', 'W': 'WVEL'} - dimensions = {'lon': 'U_LON_2D', 'lat': 'U_LAT_2D', 'depth': 'w_dep'} + variables = {"U": "UVEL", "V": "VVEL", "W": "WVEL"} + dimensions = {"lon": "U_LON_2D", "lat": "U_LAT_2D", "depth": "w_dep"} fieldset = FieldSet.from_pop(filenames, variables, dimensions) dsc = xr.open_dataset(coordfname) @@ -921,10 +941,13 @@ def test_bgrid_interpolation(gridindexingtype, mode, extrapolation): particle.Vvel = fieldset.V[time, particle.depth, particle.lat, particle.lon] particle.Wvel = fieldset.W[time, particle.depth, particle.lat, particle.lon] - myParticle = ptype[mode].add_variables([ - Variable("Uvel", dtype=np.float32, initial=0.0), - Variable("Vvel", dtype=np.float32, initial=0.0), - Variable("Wvel", dtype=np.float32, initial=0.0)]) + myParticle = ptype[mode].add_variables( + [ + Variable("Uvel", dtype=np.float32, initial=0.0), + Variable("Vvel", dtype=np.float32, initial=0.0), + Variable("Wvel", dtype=np.float32, initial=0.0), + ] + ) for pointtype in ["U", "V", "W"]: if gridindexingtype == "pop": @@ -937,7 +960,7 @@ def test_bgrid_interpolation(gridindexingtype, mode, extrapolation): lats = dsc.T_LAT_2D[yi, xi].values deps = dsc.w_dep[zi].values if extrapolation: - deps = 5499. + deps = 5499.0 elif gridindexingtype == "mom5": if pointtype in ["U", "V"]: lons = u.xu_ocean.data.reshape(1) @@ -950,16 +973,15 @@ def test_bgrid_interpolation(gridindexingtype, mode, extrapolation): if extrapolation: deps = 0 - pset = ParticleSet.from_list(fieldset=fieldset, pclass=myParticle, - lon=lons, lat=lats, depth=deps) + pset = ParticleSet.from_list(fieldset=fieldset, pclass=myParticle, lon=lons, lat=lats, depth=deps) pset.execute(VelocityInterpolator, runtime=1) - convfactor = 0.01 if gridindexingtype == "pop" else 1. + convfactor = 0.01 if gridindexingtype == "pop" else 1.0 if pointtype in ["U", "V"]: - assert np.allclose(pset.Uvel[0], u*convfactor) - assert np.allclose(pset.Vvel[0], v*convfactor) + assert np.allclose(pset.Uvel[0], u * convfactor) + assert np.allclose(pset.Vvel[0], v * convfactor) elif pointtype == "W": if extrapolation: assert np.allclose(pset.Wvel[0], 0, atol=1e-9) else: - assert np.allclose(pset.Wvel[0], -w*convfactor) + assert np.allclose(pset.Wvel[0], -w * convfactor) diff --git a/tests/test_interaction.py b/tests/test_interaction.py index 17fc5e32..9f1c72c3 100644 --- a/tests/test_interaction.py +++ b/tests/test_interaction.py @@ -18,7 +18,7 @@ from parcels.interaction.neighborsearch import ( from parcels.interaction.neighborsearch.basehash import BaseHashNeighborSearch from parcels.particle import ScipyInteractionParticle, ScipyParticle, Variable -ptype = {'scipy': ScipyInteractionParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyInteractionParticle, "jit": JITParticle} def DummyMoveNeighbor(particle, fieldset, time, neighbors, mutator): @@ -42,13 +42,13 @@ def DoNothing(particle, fieldset, time): pass -def fieldset(xdim=20, ydim=20, mesh='spherical'): +def fieldset(xdim=20, ydim=20, mesh="spherical"): """Standard unit mesh fieldset.""" - lon = np.linspace(0., 1., xdim, dtype=np.float32) - lat = np.linspace(0., 1., ydim, dtype=np.float32) + lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32) U, V = np.meshgrid(lat, lon) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - dimensions = {'lat': lat, 'lon': lon} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon} return FieldSet.from_data(data, dimensions, mesh=mesh) @@ -57,28 +57,33 @@ def fieldset_fixture(xdim=20, ydim=20): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_simple_interaction_kernel(fieldset, mode): lons = [0.0, 0.1, 0.25, 0.44] lats = [0.0, 0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees - interaction_distance = 6371000*0.2*np.pi/180 - pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, - interaction_distance=interaction_distance) - pset.execute(DoNothing, pyfunc_inter=DummyMoveNeighbor, endtime=2., dt=1.) + interaction_distance = 6371000 * 0.2 * np.pi / 180 + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, interaction_distance=interaction_distance) + pset.execute(DoNothing, pyfunc_inter=DummyMoveNeighbor, endtime=2.0, dt=1.0) assert np.allclose(pset.lat, [0.1, 0.2, 0.1, 0.0], rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy']) -@pytest.mark.parametrize('mesh', ['spherical', 'flat']) -@pytest.mark.parametrize('periodic_domain_zonal', [False, True]) +@pytest.mark.parametrize("mode", ["scipy"]) +@pytest.mark.parametrize("mesh", ["spherical", "flat"]) +@pytest.mark.parametrize("periodic_domain_zonal", [False, True]) def test_zonal_periodic_distance(mode, mesh, periodic_domain_zonal): fset = fieldset(mesh=mesh) - interaction_distance = 0.2 if mesh == 'flat' else 6371000*0.2*np.pi/180 + interaction_distance = 0.2 if mesh == "flat" else 6371000 * 0.2 * np.pi / 180 lons = [0.05, 0.4, 0.95] - pset = ParticleSet(fset, pclass=ptype[mode], lon=lons, lat=[0.5]*len(lons), - interaction_distance=interaction_distance, periodic_domain_zonal=periodic_domain_zonal) - pset.execute(DoNothing, pyfunc_inter=DummyMoveNeighbor, endtime=2., dt=1.) + pset = ParticleSet( + fset, + pclass=ptype[mode], + lon=lons, + lat=[0.5] * len(lons), + interaction_distance=interaction_distance, + periodic_domain_zonal=periodic_domain_zonal, + ) + pset.execute(DoNothing, pyfunc_inter=DummyMoveNeighbor, endtime=2.0, dt=1.0) if periodic_domain_zonal: assert np.allclose([pset[0].lat, pset[2].lat], 0.6) assert np.allclose(pset[1].lat, 0.5) @@ -86,37 +91,37 @@ def test_zonal_periodic_distance(mode, mesh, periodic_domain_zonal): assert np.allclose([p.lat for p in pset], 0.5) -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_concatenate_interaction_kernels(fieldset, mode): lons = [0.0, 0.1, 0.25, 0.44] lats = [0.0, 0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees - interaction_distance = 6371000*0.2*np.pi/180 - - pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, - interaction_distance=interaction_distance) - pset.execute(DoNothing, - pyfunc_inter=pset.InteractionKernel(DummyMoveNeighbor) - + pset.InteractionKernel(DummyMoveNeighbor), endtime=2., - dt=1.) + interaction_distance = 6371000 * 0.2 * np.pi / 180 + + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, interaction_distance=interaction_distance) + pset.execute( + DoNothing, + pyfunc_inter=pset.InteractionKernel(DummyMoveNeighbor) + pset.InteractionKernel(DummyMoveNeighbor), + endtime=2.0, + dt=1.0, + ) # The kernel results are only applied after all interactionkernels # have been executed, so we expect the result to be double the # movement from executing the kernel once. assert np.allclose(pset.lat, [0.2, 0.4, 0.2, 0.0], rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_concatenate_interaction_kernels_as_pyfunc(fieldset, mode): lons = [0.0, 0.1, 0.25, 0.44] lats = [0.0, 0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees - interaction_distance = 6371000*0.2*np.pi/180 + interaction_distance = 6371000 * 0.2 * np.pi / 180 - pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, - interaction_distance=interaction_distance) - pset.execute(DoNothing, - pyfunc_inter=pset.InteractionKernel(DummyMoveNeighbor) - + DummyMoveNeighbor, endtime=2., dt=1.) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lons, lat=lats, interaction_distance=interaction_distance) + pset.execute( + DoNothing, pyfunc_inter=pset.InteractionKernel(DummyMoveNeighbor) + DummyMoveNeighbor, endtime=2.0, dt=1.0 + ) # The kernel results are only applied after all interactionkernels # have been executed, so we expect the result to be double the # movement from executing the kernel once. @@ -127,34 +132,35 @@ def test_neighbor_merge(fieldset): lons = [0.0, 0.1, 0.25, 0.44] lats = [0.0, 0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees - interaction_distance = 6371000*5.5*np.pi/180 - MergeParticle = ScipyInteractionParticle.add_variables([ - Variable('nearest_neighbor', dtype=np.int64, to_write=False), - Variable('mass', initial=1, dtype=np.float32)]) - pset = ParticleSet(fieldset, pclass=MergeParticle, lon=lons, lat=lats, - interaction_distance=interaction_distance) - pyfunc_inter = (pset.InteractionKernel(NearestNeighborWithinRange) - + MergeWithNearestNeighbor) - pset.execute(DoNothing, - pyfunc_inter=pyfunc_inter, runtime=3., dt=1.) + interaction_distance = 6371000 * 5.5 * np.pi / 180 + MergeParticle = ScipyInteractionParticle.add_variables( + [Variable("nearest_neighbor", dtype=np.int64, to_write=False), Variable("mass", initial=1, dtype=np.float32)] + ) + pset = ParticleSet(fieldset, pclass=MergeParticle, lon=lons, lat=lats, interaction_distance=interaction_distance) + pyfunc_inter = pset.InteractionKernel(NearestNeighborWithinRange) + MergeWithNearestNeighbor + pset.execute(DoNothing, pyfunc_inter=pyfunc_inter, runtime=3.0, dt=1.0) # After two steps, the particles should be removed. assert len(pset) == 1 -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_asymmetric_attraction(fieldset, mode): lons = [0.0, 0.1, 0.2] lats = [0.0, 0.0, 0.0] # Distance in meters R_earth*0.2 degrees - interaction_distance = 6371000*5.5*np.pi/180 - AttractingParticle = ScipyInteractionParticle.add_variable('attractor', dtype=np.bool_, to_write='once') - pset = ParticleSet(fieldset, pclass=AttractingParticle, lon=lons, lat=lats, - interaction_distance=interaction_distance, - attractor=[True, False, False]) + interaction_distance = 6371000 * 5.5 * np.pi / 180 + AttractingParticle = ScipyInteractionParticle.add_variable("attractor", dtype=np.bool_, to_write="once") + pset = ParticleSet( + fieldset, + pclass=AttractingParticle, + lon=lons, + lat=lats, + interaction_distance=interaction_distance, + attractor=[True, False, False], + ) pyfunc_inter = pset.InteractionKernel(AsymmetricAttraction) - pset.execute(DoNothing, - pyfunc_inter=pyfunc_inter, runtime=3., dt=1.) + pset.execute(DoNothing, pyfunc_inter=pyfunc_inter, runtime=3.0, dt=1.0) assert lons[1] > pset.lon[1] assert lons[2] > pset.lon[2] @@ -164,21 +170,22 @@ def test_asymmetric_attraction(fieldset, mode): def ConstantMoveInteraction(particle, fieldset, time, neighbors, mutator): def f(p): p.lat_nextloop += p.dt + mutator[particle.id].append((f, ())) -@pytest.mark.parametrize('runtime, dt', - [(1, 1e-2), - (1, -2.123e-3), - (1, -3.12452-3)]) +@pytest.mark.parametrize("runtime, dt", [(1, 1e-2), (1, -2.123e-3), (1, -3.12452 - 3)]) def test_pseudo_interaction(runtime, dt): # A linear field where advected particles are moving at # 1 m/s in the longitudinal direction. xdim, ydim = (10, 20) - Uflow = Field('U', np.ones((ydim, xdim), dtype=np.float64), - lon=np.linspace(0., 1e3, xdim, dtype=np.float64), - lat=np.linspace(0., 1e3, ydim, dtype=np.float64)) - Vflow = Field('V', np.zeros((ydim, xdim), dtype=np.float64), grid=Uflow.grid) + Uflow = Field( + "U", + np.ones((ydim, xdim), dtype=np.float64), + lon=np.linspace(0.0, 1e3, xdim, dtype=np.float64), + lat=np.linspace(0.0, 1e3, ydim, dtype=np.float64), + ) + Vflow = Field("V", np.zeros((ydim, xdim), dtype=np.float64), grid=Uflow.grid) fieldset = FieldSet(Uflow, Vflow) # Execute the advection kernel only @@ -207,22 +214,19 @@ def compare_results_by_idx(instance, particle_idx, ref_result, active_idx=None): for neigh in cur_neigh: assert neigh in active_idx assert set(cur_neigh) <= set(active_idx) - neigh_by_coor, _ = instance.find_neighbors_by_coor( - instance._values[:, particle_idx]) + neigh_by_coor, _ = instance.find_neighbors_by_coor(instance._values[:, particle_idx]) assert np.allclose(cur_neigh, neigh_by_coor) assert isinstance(cur_neigh, np.ndarray) assert set(ref_result) == set(cur_neigh) -@pytest.mark.parametrize( - "test_class", [KDTreeFlatNeighborSearch, HashFlatNeighborSearch, - BruteFlatNeighborSearch]) +@pytest.mark.parametrize("test_class", [KDTreeFlatNeighborSearch, HashFlatNeighborSearch, BruteFlatNeighborSearch]) def test_flat_neighbors(test_class): np.random.seed(129873) ref_class = BruteFlatNeighborSearch n_particle = 1000 - positions = np.random.rand(n_particle*3).reshape(3, n_particle) + positions = np.random.rand(n_particle * 3).reshape(3, n_particle) ref_instance = ref_class(inter_dist_vert=0.3, inter_dist_horiz=0.3) test_instance = test_class(inter_dist_vert=0.3, inter_dist_horiz=0.3) ref_instance.rebuild(positions) @@ -234,15 +238,15 @@ def test_flat_neighbors(test_class): def create_spherical_positions(n_particles, max_depth=100000): - yrange = 2*np.random.rand(n_particles) - lat = 180*(np.arccos(1-yrange)-0.5*np.pi)/np.pi - lon = 360*np.random.rand(n_particles) - depth = max_depth*np.random.rand(n_particles) + yrange = 2 * np.random.rand(n_particles) + lat = 180 * (np.arccos(1 - yrange) - 0.5 * np.pi) / np.pi + lon = 360 * np.random.rand(n_particles) + depth = max_depth * np.random.rand(n_particles) return np.array((depth, lat, lon)) def create_flat_positions(n_particle): - return np.random.rand(n_particle*3).reshape(3, n_particle) + return np.random.rand(n_particle * 3).reshape(3, n_particle) @pytest.mark.parametrize("test_class", [BruteSphericalNeighborSearch, HashSphericalNeighborSearch]) @@ -251,10 +255,8 @@ def test_spherical_neighbors(test_class): ref_class = BruteSphericalNeighborSearch positions = create_spherical_positions(10000, max_depth=100000) - ref_instance = ref_class(inter_dist_vert=100000, - inter_dist_horiz=1000000) - test_instance = test_class(inter_dist_vert=100000, - inter_dist_horiz=1000000) + ref_instance = ref_class(inter_dist_vert=100000, inter_dist_horiz=1000000) + test_instance = test_class(inter_dist_vert=100000, inter_dist_horiz=1000000) ref_instance.rebuild(positions) test_instance.rebuild(positions) @@ -263,9 +265,7 @@ def test_spherical_neighbors(test_class): compare_results_by_idx(test_instance, particle_idx, ref_result) -@pytest.mark.parametrize( - "test_class", [KDTreeFlatNeighborSearch, HashFlatNeighborSearch, - BruteFlatNeighborSearch]) +@pytest.mark.parametrize("test_class", [KDTreeFlatNeighborSearch, HashFlatNeighborSearch, BruteFlatNeighborSearch]) def test_flat_update(test_class): np.random.seed(9182741) n_particle = 1000 @@ -276,19 +276,17 @@ def test_flat_update(test_class): test_instance = test_class(inter_dist_vert=0.3, inter_dist_horiz=0.3) for _ in range(1, n_active_mask): - positions = create_flat_positions(n_particle) + 10*np.random.rand() + positions = create_flat_positions(n_particle) + 10 * np.random.rand() active_mask = np.random.rand(n_particle) > 0.5 ref_instance.update_values(positions, active_mask) test_instance.update_values(positions, active_mask) active_idx = np.where(active_mask)[0] if len(active_idx) == 0: continue - test_particles = np.random.choice( - active_idx, size=min(n_test_particle, len(active_idx)), replace=False) + test_particles = np.random.choice(active_idx, size=min(n_test_particle, len(active_idx)), replace=False) for particle_idx in test_particles: ref_result, _ = ref_instance.find_neighbors_by_idx(particle_idx) - compare_results_by_idx(test_instance, particle_idx, ref_result, - active_idx=active_idx) + compare_results_by_idx(test_instance, particle_idx, ref_result, active_idx=active_idx) @pytest.mark.parametrize("test_class", [BruteSphericalNeighborSearch, HashSphericalNeighborSearch]) @@ -311,8 +309,7 @@ def test_spherical_update(test_class): active_idx = np.where(active_mask)[0] if len(active_idx) == 0: continue - test_particles = np.random.choice( - active_idx, size=min(n_test_particle, len(active_idx)), replace=False) + test_particles = np.random.choice(active_idx, size=min(n_test_particle, len(active_idx)), replace=False) for particle_idx in test_particles: ref_result, _ = ref_instance.find_neighbors_by_idx(particle_idx) compare_results_by_idx(test_instance, particle_idx, ref_result, active_idx=active_idx) diff --git a/tests/test_kernel_execution.py b/tests/test_kernel_execution.py index fa094018..a16702c5 100644 --- a/tests/test_kernel_execution.py +++ b/tests/test_kernel_execution.py @@ -15,7 +15,7 @@ from parcels import ( StatusCode, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def DoNothing(particle, fieldset, time): @@ -24,12 +24,12 @@ def DoNothing(particle, fieldset, time): def fieldset(xdim=20, ydim=20): """Standard unit mesh fieldset.""" - lon = np.linspace(0., 1., xdim, dtype=np.float32) - lat = np.linspace(0., 1., ydim, dtype=np.float32) + lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32) U, V = np.meshgrid(lat, lon) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - dimensions = {'lat': lat, 'lon': lon} - return FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon} + return FieldSet.from_data(data, dimensions, mesh="flat") @pytest.fixture(name="fieldset") @@ -37,23 +37,25 @@ def fieldset_fixture(xdim=20, ydim=20): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('kernel_type', ['update_lon', 'update_dlon']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("kernel_type", ["update_lon", "update_dlon"]) def test_execution_order(mode, kernel_type): - fieldset = FieldSet.from_data({'U': [[0, 1], [2, 3]], 'V': np.ones((2, 2))}, {'lon': [0, 2], 'lat': [0, 2]}, mesh='flat') + fieldset = FieldSet.from_data( + {"U": [[0, 1], [2, 3]], "V": np.ones((2, 2))}, {"lon": [0, 2], "lat": [0, 2]}, mesh="flat" + ) def MoveLon_Update_Lon(particle, fieldset, time): - particle.lon += 0.2 + particle.lon += 0.2 def MoveLon_Update_dlon(particle, fieldset, time): - particle_dlon += 0.2 # noqa + particle_dlon += 0.2 # noqa def SampleP(particle, fieldset, time): particle.p = fieldset.U[time, particle.depth, particle.lat, particle.lon] - SampleParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0.) + SampleParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0.0) - MoveLon = MoveLon_Update_dlon if kernel_type == 'update_dlon' else MoveLon_Update_Lon + MoveLon = MoveLon_Update_dlon if kernel_type == "update_dlon" else MoveLon_Update_Lon kernels = [MoveLon, SampleP] lons = [] @@ -64,7 +66,7 @@ def test_execution_order(mode, kernel_type): lons.append(pset.lon) ps.append(pset.p) - if kernel_type == 'update_dlon': + if kernel_type == "update_dlon": assert np.isclose(lons[0], lons[1]) assert np.isclose(ps[0], ps[1]) assert np.allclose(lons[0], 0) @@ -73,76 +75,78 @@ def test_execution_order(mode, kernel_type): assert np.allclose(lons[0], 0.2) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('start, end, substeps, dt', [ - (0., 10., 1, 1.), - (0., 10., 4, 1.), - (0., 10., 1, 3.), - (2., 16., 5, 3.), - (20., 10., 4, -1.), - (20., -10., 7, -2.), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "start, end, substeps, dt", + [ + (0.0, 10.0, 1, 1.0), + (0.0, 10.0, 4, 1.0), + (0.0, 10.0, 1, 3.0), + (2.0, 16.0, 5, 3.0), + (20.0, 10.0, 4, -1.0), + (20.0, -10.0, 7, -2.0), + ], +) def test_execution_endtime(fieldset, mode, start, end, substeps, dt, npart=10): - pset = ParticleSet(fieldset, pclass=ptype[mode], time=start, - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + pset = ParticleSet( + fieldset, pclass=ptype[mode], time=start, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart) + ) pset.execute(DoNothing, endtime=end, dt=dt) assert np.allclose(pset.time_nextloop, end) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('start, end, substeps, dt', [ - (0., 10., 1, 1.), - (0., 10., 4, 1.), - (0., 10., 1, 3.), - (2., 16., 5, 3.), - (20., 10., 4, -1.), - (20., -10., 7, -2.), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "start, end, substeps, dt", + [ + (0.0, 10.0, 1, 1.0), + (0.0, 10.0, 4, 1.0), + (0.0, 10.0, 1, 3.0), + (2.0, 16.0, 5, 3.0), + (20.0, 10.0, 4, -1.0), + (20.0, -10.0, 7, -2.0), + ], +) def test_execution_runtime(fieldset, mode, start, end, substeps, dt, npart=10): - pset = ParticleSet(fieldset, pclass=ptype[mode], time=start, - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + pset = ParticleSet( + fieldset, pclass=ptype[mode], time=start, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart) + ) t_step = abs(end - start) / substeps for _ in range(substeps): pset.execute(DoNothing, runtime=t_step, dt=dt) assert np.allclose(pset.time_nextloop, end) -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_execution_fail_python_exception(fieldset, mode, npart=10): def PythonFail(particle, fieldset, time): - if particle.time >= 10.: + if particle.time >= 10.0: raise RuntimeError("Enough is enough!") else: pass - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) with pytest.raises(RuntimeError): - pset.execute(PythonFail, endtime=20., dt=2.) + pset.execute(PythonFail, endtime=20.0, dt=2.0) assert len(pset) == npart assert np.isclose(pset.time[0], 10) - assert np.allclose(pset.time[1:], 0.) + assert np.allclose(pset.time[1:], 0.0) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_execution_fail_out_of_bounds(fieldset, mode, npart=10): def MoveRight(particle, fieldset, time): tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] particle_dlon += 0.1 # noqa - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) with pytest.raises(FieldOutOfBoundError): - pset.execute(MoveRight, endtime=10., dt=1.) + pset.execute(MoveRight, endtime=10.0, dt=1.0) assert len(pset) == npart - assert (pset.lon - 1. > -1.e12).all() + assert (pset.lon - 1.0 > -1.0e12).all() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_execution_recover_out_of_bounds(fieldset, mode, npart=2): def MoveRight(particle, fieldset, time): tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] @@ -150,19 +154,19 @@ def test_execution_recover_out_of_bounds(fieldset, mode, npart=2): def MoveLeft(particle, fieldset, time): if particle.state == StatusCode.ErrorOutOfBounds: - particle_dlon -= 1. # noqa + particle_dlon -= 1.0 # noqa particle.state = StatusCode.Success lon = np.linspace(0.05, 0.95, npart) lat = np.linspace(1, 0, npart) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lon, lat=lat) - pset.execute([MoveRight, MoveLeft], endtime=11., dt=1.) + pset.execute([MoveRight, MoveLeft], endtime=11.0, dt=1.0) assert len(pset) == npart assert np.allclose(pset.lon, lon, rtol=1e-5) assert np.allclose(pset.lat, lat, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_execution_check_all_errors(fieldset, mode): def MoveRight(particle, fieldset, time): tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon, particle] @@ -172,13 +176,12 @@ def test_execution_check_all_errors(fieldset, mode): particle.state = StatusCode.Delete pset = ParticleSet(fieldset, pclass=ptype[mode], lon=10, lat=0) - pset.execute([MoveRight, RecoverAllErrors], endtime=11., dt=1.) + pset.execute([MoveRight, RecoverAllErrors], endtime=11.0, dt=1.0) assert len(pset) == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_execution_check_stopallexecution(fieldset, mode): - def addoneLon(particle, fieldset, time): particle_dlon += 1 # noqa @@ -186,14 +189,14 @@ def test_execution_check_stopallexecution(fieldset, mode): particle.state = StatusCode.StopAllExecution pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0, 1], lat=[0, 0]) - pset.execute(addoneLon, endtime=20., dt=1.) + pset.execute(addoneLon, endtime=20.0, dt=1.0) assert pset[0].lon == 9 assert pset[0].time == 9 assert pset[1].lon == 1 assert pset[1].time == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_execution_delete_out_of_bounds(fieldset, mode, npart=10): def MoveRight(particle, fieldset, time): tmp1, tmp2 = fieldset.UV[time, particle.depth, particle.lat, particle.lon + 0.1, particle] @@ -206,11 +209,11 @@ def test_execution_delete_out_of_bounds(fieldset, mode, npart=10): lon = np.linspace(0.05, 0.95, npart) lat = np.linspace(1, 0, npart) pset = ParticleSet(fieldset, pclass=ptype[mode], lon=lon, lat=lat) - pset.execute([MoveRight, DeleteMe], endtime=10., dt=1.) + pset.execute([MoveRight, DeleteMe], endtime=10.0, dt=1.0) assert len(pset) == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_kernel_add_no_new_variables(fieldset, mode): def MoveEast(particle, fieldset, time): particle_dlon += 0.1 # noqa @@ -219,13 +222,12 @@ def test_kernel_add_no_new_variables(fieldset, mode): particle_dlat += 0.1 # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5]) - pset.execute(pset.Kernel(MoveEast) + pset.Kernel(MoveNorth), - endtime=2., dt=1.) + pset.execute(pset.Kernel(MoveEast) + pset.Kernel(MoveNorth), endtime=2.0, dt=1.0) assert np.allclose(pset.lon, 0.6, rtol=1e-5) assert np.allclose(pset.lat, 0.6, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_multi_kernel_duplicate_varnames(fieldset, mode): # Testing for merging of two Kernels with the same variable declared # Should throw a warning, but go ahead regardless @@ -238,11 +240,11 @@ def test_multi_kernel_duplicate_varnames(fieldset, mode): particle_dlon += add_lon # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5]) - pset.execute([MoveEast, MoveWest], endtime=2., dt=1.) + pset.execute([MoveEast, MoveWest], endtime=2.0, dt=1.0) assert np.allclose(pset.lon, 0.3, rtol=1e-5) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_multi_kernel_reuse_varnames(fieldset, mode): # Testing for merging of two Kernels with the same variable declared # Should throw a warning, but go ahead regardless @@ -254,8 +256,7 @@ def test_multi_kernel_reuse_varnames(fieldset, mode): particle_dlon += add_lon # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5]) - pset.execute(pset.Kernel(MoveEast1) + pset.Kernel(MoveEast2), - endtime=2., dt=1.) + pset.execute(pset.Kernel(MoveEast1) + pset.Kernel(MoveEast2), endtime=2.0, dt=1.0) assert np.allclose(pset.lon, [0.9], rtol=1e-5) # should be 0.5 + 0.2 + 0.2 = 0.9 @@ -266,6 +267,7 @@ def test_combined_kernel_from_list(fieldset): Tests that a Kernel can be created from a list functions, or a list of mixed functions and kernel objects. """ + def MoveEast(particle, fieldset, time): particle_dlon += 0.1 # noqa @@ -287,6 +289,7 @@ def test_combined_kernel_from_list_error_checking(fieldset): Tests that various error cases raise appropriate messages. """ + def MoveEast(particle, fieldset, time): particle_dlon += 0.1 # noqa @@ -309,7 +312,7 @@ def test_combined_kernel_from_list_error_checking(fieldset): assert kernels_mixed.funcname == "AdvectionRK4MoveEastMoveNorth" -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_update_kernel_in_script(fieldset, mode): # Testing what happens when kernels are updated during runtime of a script # Should throw a warning, but go ahead regardless @@ -322,16 +325,18 @@ def test_update_kernel_in_script(fieldset, mode): particle_dlon += add_lon # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5]) - pset.execute(pset.Kernel(MoveEast), endtime=1., dt=1.) - pset.execute(pset.Kernel(MoveWest), endtime=3., dt=1.) + pset.execute(pset.Kernel(MoveEast), endtime=1.0, dt=1.0) + pset.execute(pset.Kernel(MoveWest), endtime=3.0, dt=1.0) assert np.allclose(pset.lon, 0.3, rtol=1e-5) # should be 0.5 + 0.1 - 0.3 = 0.3 -@pytest.mark.parametrize('delete_cfiles', [True, False]) -@pytest.mark.skipif(sys.platform.startswith("win"), reason="skipping windows test as windows compiler generates warning") +@pytest.mark.parametrize("delete_cfiles", [True, False]) +@pytest.mark.skipif( + sys.platform.startswith("win"), reason="skipping windows test as windows compiler generates warning" +) def test_execution_keep_cfiles_and_nocompilation_warnings(fieldset, delete_cfiles): - pset = ParticleSet(fieldset, pclass=JITParticle, lon=[0.], lat=[0.]) - pset.execute(AdvectionRK4, delete_cfiles=delete_cfiles, endtime=1., dt=1.) + pset = ParticleSet(fieldset, pclass=JITParticle, lon=[0.0], lat=[0.0]) + pset.execute(AdvectionRK4, delete_cfiles=delete_cfiles, endtime=1.0, dt=1.0) cfile = pset.kernel.src_file logfile = pset.kernel.log_file del pset.kernel @@ -340,7 +345,7 @@ def test_execution_keep_cfiles_and_nocompilation_warnings(fieldset, delete_cfile else: assert os.path.exists(cfile) with open(logfile) as f: - assert 'warning' not in f.read(), 'Compilation WARNING in log file' + assert "warning" not in f.read(), "Compilation WARNING in log file" def test_compilers(): @@ -357,7 +362,7 @@ def test_compilers(): print(CCompiler_SS()) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_explicit_ParcelsRandom(fieldset, mode): """Testing `from parcels import ParcelsRandom` in kernel code""" from parcels import ParcelsRandom @@ -371,19 +376,19 @@ def test_explicit_ParcelsRandom(fieldset, mode): assert 2.5 <= pset[0].lat <= 3.5 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_parcels_dot_ParcelsRandom(fieldset, mode): """Testing `parcels.ParcelsRandom` in kernel code""" def nudge_kernel(particle, fieldset, time): - particle_dlat += parcels.ParcelsRandom.uniform(2, 3) # noqa + particle_dlat += parcels.ParcelsRandom.uniform(2, 3) # noqa pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0.5]) pset.execute(nudge_kernel, runtime=2, dt=1) assert 2.5 <= pset[0].lat <= 3.5 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_parcels_dot_rng(fieldset, mode): """Testing `parcels.rng` in kernel code.""" @@ -396,7 +401,7 @@ def test_parcels_dot_rng(fieldset, mode): assert 2.5 <= pset[0].lat <= 3.5 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_custom_ParcelsRandom_alias(fieldset, mode): """Testing aliasing ParcelsRandom to another name.""" from parcels import ParcelsRandom as my_custom_name diff --git a/tests/test_kernel_language.py b/tests/test_kernel_language.py index a7998de8..a71771ad 100644 --- a/tests/test_kernel_language.py +++ b/tests/test_kernel_language.py @@ -23,24 +23,24 @@ from parcels.application_kernels.EOSseawaterproperties import ( ) from parcels.application_kernels.TEOSseawaterdensity import PolyTEOS10_bsq -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def expr_kernel(name, pset, expr): - pycode = (f"def {name}(particle, fieldset, time):\n" - f" particle.p = {expr}") - return Kernel(pset.fieldset, pset.particledata.ptype, pyfunc=None, - funccode=pycode, funcname=name, funcvars=['particle']) + pycode = f"def {name}(particle, fieldset, time):\n" f" particle.p = {expr}" + return Kernel( + pset.fieldset, pset.particledata.ptype, pyfunc=None, funccode=pycode, funcname=name, funcvars=["particle"] + ) def fieldset(xdim=20, ydim=20): """Standard unit mesh fieldset.""" - lon = np.linspace(0., 1., xdim, dtype=np.float32) - lat = np.linspace(0., 1., ydim, dtype=np.float32) + lon = np.linspace(0.0, 1.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 1.0, ydim, dtype=np.float32) U, V = np.meshgrid(lat, lon) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - dimensions = {'lat': lat, 'lon': lon} - return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True) + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon} + return FieldSet.from_data(data, dimensions, mesh="flat", transpose=True) @pytest.fixture(name="fieldset") @@ -48,72 +48,75 @@ def fieldset_fixture(xdim=20, ydim=20): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('name, expr, result', [ - ('Add', '2 + 5', 7), - ('Sub', '6 - 2', 4), - ('Mul', '3 * 5', 15), - ('Div', '24 / 4', 6), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "name, expr, result", + [ + ("Add", "2 + 5", 7), + ("Sub", "6 - 2", 4), + ("Mul", "3 * 5", 15), + ("Div", "24 / 4", 6), + ], +) def test_expression_int(mode, name, expr, result, npart=10): """Test basic arithmetic expressions.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) - pset = ParticleSet(fieldset(), pclass=TestParticle, - lon=np.linspace(0., 1., npart), - lat=np.zeros(npart) + 0.5) - pset.execute(expr_kernel(f'Test{name}', pset, expr), endtime=1., dt=1.) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) + pset = ParticleSet(fieldset(), pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=np.zeros(npart) + 0.5) + pset.execute(expr_kernel(f"Test{name}", pset, expr), endtime=1.0, dt=1.0) assert np.all([p.p == result for p in pset]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('name, expr, result', [ - ('Add', '2. + 5.', 7), - ('Sub', '6. - 2.', 4), - ('Mul', '3. * 5.', 15), - ('Div', '24. / 4.', 6), - ('Pow', '2 ** 3', 8), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "name, expr, result", + [ + ("Add", "2. + 5.", 7), + ("Sub", "6. - 2.", 4), + ("Mul", "3. * 5.", 15), + ("Div", "24. / 4.", 6), + ("Pow", "2 ** 3", 8), + ], +) def test_expression_float(mode, name, expr, result, npart=10): """Test basic arithmetic expressions.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) - pset = ParticleSet(fieldset(), pclass=TestParticle, - lon=np.linspace(0., 1., npart), - lat=np.zeros(npart) + 0.5) - pset.execute(expr_kernel(f'Test{name}', pset, expr), endtime=1., dt=1.) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) + pset = ParticleSet(fieldset(), pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=np.zeros(npart) + 0.5) + pset.execute(expr_kernel(f"Test{name}", pset, expr), endtime=1.0, dt=1.0) assert np.all([p.p == result for p in pset]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('name, expr, result', [ - ('True', 'True', True), - ('False', 'False', False), - ('And', 'True and False', False), - ('Or', 'True or False', True), - ('Equal', '5 == 5', True), - ('NotEqual', '3 != 4', True), - ('Lesser', '5 < 3', False), - ('LesserEq', '3 <= 5', True), - ('Greater', '4 > 2', True), - ('GreaterEq', '2 >= 4', False), - ('CheckNaN', 'math.nan != math.nan', True), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "name, expr, result", + [ + ("True", "True", True), + ("False", "False", False), + ("And", "True and False", False), + ("Or", "True or False", True), + ("Equal", "5 == 5", True), + ("NotEqual", "3 != 4", True), + ("Lesser", "5 < 3", False), + ("LesserEq", "3 <= 5", True), + ("Greater", "4 > 2", True), + ("GreaterEq", "2 >= 4", False), + ("CheckNaN", "math.nan != math.nan", True), + ], +) def test_expression_bool(mode, name, expr, result, npart=10): """Test basic arithmetic expressions.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) - pset = ParticleSet(fieldset(), pclass=TestParticle, - lon=np.linspace(0., 1., npart), - lat=np.zeros(npart) + 0.5) - pset.execute(expr_kernel(f'Test{name}', pset, expr), endtime=1., dt=1.) - if mode == 'jit': + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) + pset = ParticleSet(fieldset(), pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=np.zeros(npart) + 0.5) + pset.execute(expr_kernel(f"Test{name}", pset, expr), endtime=1.0, dt=1.0) + if mode == "jit": assert np.all(result == (pset.p == 1)) else: assert np.all(result == pset.p) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_while_if_break(mode): """Test while, if and break commands.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) pset = ParticleSet(fieldset(), pclass=TestParticle, lon=[0], lat=[0]) def kernel(particle, fieldset, time): @@ -122,17 +125,18 @@ def test_while_if_break(mode): break particle.p += 1 if particle.p > 5: - particle.p *= 2. - pset.execute(kernel, endtime=1., dt=1.) - assert np.allclose(pset.p, 20., rtol=1e-12) + particle.p *= 2.0 + pset.execute(kernel, endtime=1.0, dt=1.0) + assert np.allclose(pset.p, 20.0, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) + +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_nested_if(mode): """Test nested if commands.""" - TestParticle = ptype[mode].add_variables([ - Variable('p0', dtype=np.int32, initial=0), - Variable('p1', dtype=np.int32, initial=1)]) + TestParticle = ptype[mode].add_variables( + [Variable("p0", dtype=np.int32, initial=0), Variable("p1", dtype=np.int32, initial=1)] + ) pset = ParticleSet(fieldset(), pclass=TestParticle, lon=0, lat=0) def kernel(particle, fieldset, time): @@ -141,32 +145,32 @@ def test_nested_if(mode): if var + 1 < particle.p1: particle.p1 = -1 - pset.execute(kernel, endtime=10, dt=1.) + pset.execute(kernel, endtime=10, dt=1.0) assert np.allclose([pset.p0[0], pset.p1[0]], [0, 1]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pass(mode): """Test pass commands.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) pset = ParticleSet(fieldset(), pclass=TestParticle, lon=0, lat=0) def kernel(particle, fieldset, time): particle.p = -1 pass - pset.execute(kernel, endtime=10, dt=1.) + pset.execute(kernel, endtime=10, dt=1.0) assert np.allclose(pset[0].p, -1) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_dt_as_variable_in_kernel(mode): pset = ParticleSet(fieldset(), pclass=ptype[mode], lon=0, lat=0) def kernel(particle, fieldset, time): - dt = 1. # noqa + dt = 1.0 # noqa - pset.execute(kernel, endtime=10, dt=1.) + pset.execute(kernel, endtime=10, dt=1.0) def test_parcels_tmpvar_in_kernel(): @@ -181,28 +185,28 @@ def test_parcels_tmpvar_in_kernel(): for kernel in [kernel_tmpvar, kernel_pnum]: with pytest.raises(NotImplementedError): - pset.execute(kernel, endtime=1, dt=1.) + pset.execute(kernel, endtime=1, dt=1.0) def test_varname_as_fieldname(): """Tests for error thrown if variable has same name as Field.""" fset = fieldset() - fset.add_field(Field('speed', 10, lon=0, lat=0)) - fset.add_constant('vertical_speed', 0.1) - Particle = JITParticle.add_variable('speed') + fset.add_field(Field("speed", 10, lon=0, lat=0)) + fset.add_constant("vertical_speed", 0.1) + Particle = JITParticle.add_variable("speed") pset = ParticleSet(fset, pclass=Particle, lon=0, lat=0) def kernel_particlename(particle, fieldset, time): particle.speed = fieldset.speed[particle] - pset.execute(kernel_particlename, endtime=1, dt=1.) + pset.execute(kernel_particlename, endtime=1, dt=1.0) assert pset[0].speed == 10 def kernel_varname(particle, fieldset, time): vertical_speed = fieldset.vertical_speed # noqa with pytest.raises(NotImplementedError): - pset.execute(kernel_varname, endtime=1, dt=1.) + pset.execute(kernel_varname, endtime=1, dt=1.0) def test_abs(): @@ -213,68 +217,76 @@ def test_abs(): particle.lon = abs(3.1) with pytest.raises(NotImplementedError): - pset.execute(kernel_abs, endtime=1, dt=1.) + pset.execute(kernel_abs, endtime=1, dt=1.0) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_if_withfield(fieldset, mode): """Test combination of if and Field sampling commands.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0]) def kernel(particle, fieldset, time): - u, v = fieldset.UV[time, 0, 0, 1.] + u, v = fieldset.UV[time, 0, 0, 1.0] particle.p = 0 - if fieldset.U[time, 0, 0, 1.] == u: + if fieldset.U[time, 0, 0, 1.0] == u: particle.p += 1 - if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]: + if fieldset.U[time, 0, 0, 1.0] == fieldset.U[time, 0, 0, 1.0]: particle.p += 1 if True: particle.p += 1 - if fieldset.U[time, 0, 0, 1.] == u and 1 == 1: + if fieldset.U[time, 0, 0, 1.0] == u and 1 == 1: particle.p += 1 - if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.] and fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]: + if ( + fieldset.U[time, 0, 0, 1.0] == fieldset.U[time, 0, 0, 1.0] + and fieldset.U[time, 0, 0, 1.0] == fieldset.U[time, 0, 0, 1.0] + ): particle.p += 1 - if fieldset.U[time, 0, 0, 1.] == u: + if fieldset.U[time, 0, 0, 1.0] == u: particle.p += 1 else: particle.p += 1000 - if fieldset.U[time, 0, 0, 1.] == 3: + if fieldset.U[time, 0, 0, 1.0] == 3: particle.p += 1000 else: particle.p += 1 - pset.execute(kernel, endtime=1., dt=1.) - assert np.allclose(pset.p, 7., rtol=1e-12) + pset.execute(kernel, endtime=1.0, dt=1.0) + assert np.allclose(pset.p, 7.0, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy']) +@pytest.mark.parametrize("mode", ["scipy"]) def test_print(fieldset, mode, capfd): """Test print statements.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5]) def kernel(particle, fieldset, time): particle.p = 1e-3 tmp = 5 print("%d %f %f" % (particle.id, particle.p, tmp)) - pset.execute(kernel, endtime=1., dt=1., verbose_progress=False) + + pset.execute(kernel, endtime=1.0, dt=1.0, verbose_progress=False) out, err = capfd.readouterr() - lst = out.split(' ') + lst = out.split(" ") tol = 1e-8 - assert abs(float(lst[0]) - pset.id[0]) < tol and abs(float(lst[1]) - pset.p[0]) < tol and abs(float(lst[2]) - 5) < tol + assert ( + abs(float(lst[0]) - pset.id[0]) < tol and abs(float(lst[1]) - pset.p[0]) < tol and abs(float(lst[2]) - 5) < tol + ) def kernel2(particle, fieldset, time): tmp = 3 print("%f" % (tmp)) - pset.execute(kernel2, endtime=2., dt=1., verbose_progress=False) + + pset.execute(kernel2, endtime=2.0, dt=1.0, verbose_progress=False) out, err = capfd.readouterr() - lst = out.split(' ') + lst = out.split(" ") assert abs(float(lst[0]) - 3) < tol -@pytest.mark.parametrize(('mode', 'expectation'), [('scipy', does_not_raise()), - ('jit', pytest.raises(NotImplementedError))]) +@pytest.mark.parametrize( + ("mode", "expectation"), [("scipy", does_not_raise()), ("jit", pytest.raises(NotImplementedError))] +) def test_fieldset_access(fieldset, expectation, mode): pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0, lat=0) @@ -282,12 +294,12 @@ def test_fieldset_access(fieldset, expectation, mode): particle.lon = fieldset.U.grid.lon[2] with expectation: - pset.execute(kernel, endtime=1, dt=1.) + pset.execute(kernel, endtime=1, dt=1.0) assert pset.lon[0] == fieldset.U.grid.lon[2] def random_series(npart, rngfunc, rngargs, mode): - random = ParcelsRandom if mode == 'jit' else py_random + random = ParcelsRandom if mode == "jit" else py_random random.seed(1234) func = getattr(random, rngfunc) series = [func(*rngargs) for _ in range(npart)] @@ -296,55 +308,56 @@ def random_series(npart, rngfunc, rngargs, mode): return series -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('rngfunc, rngargs', [ - ('random', []), - ('uniform', [0., 20.]), - ('randint', [0, 20]), -]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + "rngfunc, rngargs", + [ + ("random", []), + ("uniform", [0.0, 20.0]), + ("randint", [0, 20]), + ], +) def test_random_float(mode, rngfunc, rngargs, npart=10): """Test basic random number generation.""" - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) - pset = ParticleSet(fieldset(), pclass=TestParticle, - lon=np.linspace(0., 1., npart), - lat=np.zeros(npart) + 0.5) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) + pset = ParticleSet(fieldset(), pclass=TestParticle, lon=np.linspace(0.0, 1.0, npart), lat=np.zeros(npart) + 0.5) series = random_series(npart, rngfunc, rngargs, mode) - rnglib = 'ParcelsRandom' if mode == 'jit' else 'random' - kernel = expr_kernel(f"TestRandom_{rngfunc}", pset, - f"{rnglib}.{rngfunc}({', '.join([str(a) for a in rngargs])})") - pset.execute(kernel, endtime=1., dt=1.) + rnglib = "ParcelsRandom" if mode == "jit" else "random" + kernel = expr_kernel(f"TestRandom_{rngfunc}", pset, f"{rnglib}.{rngfunc}({', '.join([str(a) for a in rngargs])})") + pset.execute(kernel, endtime=1.0, dt=1.0) assert np.allclose(pset.p, series, atol=1e-9) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('concat', [False, True]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("concat", [False, True]) def test_random_kernel_concat(fieldset, mode, concat): - TestParticle = ptype[mode].add_variable('p', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("p", dtype=np.float32, initial=0) pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0) def RandomKernel(particle, fieldset, time): particle.p += ParcelsRandom.uniform(0, 1) def AddOne(particle, fieldset, time): - particle.p += 1. + particle.p += 1.0 kernels = [RandomKernel, AddOne] if concat else RandomKernel pset.execute(kernels, runtime=1) assert pset.p > 1 if concat else pset.p < 1 -@pytest.mark.parametrize('mode', ['jit', pytest.param('scipy', marks=pytest.mark.xfail(reason="c_kernels don't work in scipy mode"))]) -@pytest.mark.parametrize('c_inc', ['str', 'file']) +@pytest.mark.parametrize( + "mode", ["jit", pytest.param("scipy", marks=pytest.mark.xfail(reason="c_kernels don't work in scipy mode"))] +) +@pytest.mark.parametrize("c_inc", ["str", "file"]) def test_c_kernel(fieldset, mode, c_inc): - coord_type = np.float32 if c_inc == 'str' else np.float64 - pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0], - lonlatdepth_dtype=coord_type) + coord_type = np.float32 if c_inc == "str" else np.float64 + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0], lonlatdepth_dtype=coord_type) def func(U, lon, dt): u = U.data[0, 2, 1] return lon + u * dt - if c_inc == 'str': + if c_inc == "str": c_include = """ static inline StatusCode func(CField *f, double *particle_dlon, double *dt) { @@ -356,19 +369,19 @@ def test_c_kernel(fieldset, mode, c_inc): } """ else: - c_include = os.path.join(os.path.dirname(__file__), 'customed_header.h') + c_include = os.path.join(os.path.dirname(__file__), "customed_header.h") def ckernel(particle, fieldset, time): - func('parcels_customed_Cfunc_pointer_args', fieldset.U, particle_dlon, particle.dt) # noqa + func("parcels_customed_Cfunc_pointer_args", fieldset.U, particle_dlon, particle.dt) # noqa kernel = pset.Kernel(ckernel, c_include=c_include) - pset.execute(kernel, endtime=4., dt=1.) + pset.execute(kernel, endtime=4.0, dt=1.0) assert np.allclose(pset.lon[0], 0.81578948) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_dt_modif_by_kernel(mode): - TestParticle = ptype[mode].add_variable('age', dtype=np.float32, initial=0) + TestParticle = ptype[mode].add_variable("age", dtype=np.float32, initial=0) pset = ParticleSet(fieldset(), pclass=TestParticle, lon=[0.5], lat=[0]) def modif_dt(particle, fieldset, time): @@ -376,48 +389,51 @@ def test_dt_modif_by_kernel(mode): particle.dt = 2 endtime = 4 - pset.execute(modif_dt, endtime=endtime+1, dt=1.) + pset.execute(modif_dt, endtime=endtime + 1, dt=1.0) assert np.isclose(pset.time[0], endtime) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize(('dt', 'expectation'), [(1e-2, does_not_raise()), - (1e-5, does_not_raise()), - (1e-6, pytest.raises(ValueError))]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize( + ("dt", "expectation"), [(1e-2, does_not_raise()), (1e-5, does_not_raise()), (1e-6, pytest.raises(ValueError))] +) def test_small_dt(mode, dt, expectation, npart=10): - pset = ParticleSet(fieldset(), pclass=ptype[mode], lon=np.zeros(npart), - lat=np.zeros(npart), time=np.arange(0, npart)*dt*10) + pset = ParticleSet( + fieldset(), pclass=ptype[mode], lon=np.zeros(npart), lat=np.zeros(npart), time=np.arange(0, npart) * dt * 10 + ) def DoNothing(particle, fieldset, time): pass with expectation: - pset.execute(DoNothing, dt=dt, runtime=dt*101) - assert np.allclose([p.time for p in pset], dt*100) + pset.execute(DoNothing, dt=dt, runtime=dt * 101) + assert np.allclose([p.time for p in pset], dt * 100) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_TEOSdensity_kernels(mode): - def generate_fieldset(xdim=2, ydim=2, zdim=2, tdim=1): - lon = np.linspace(0., 10., xdim, dtype=np.float32) - lat = np.linspace(0., 10., ydim, dtype=np.float32) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float32) depth = np.linspace(0, 2000, zdim, dtype=np.float32) time = np.zeros(tdim, dtype=np.float64) U = np.ones((tdim, zdim, ydim, xdim)) V = np.ones((tdim, zdim, ydim, xdim)) abs_salinity = 30 * np.ones((tdim, zdim, ydim, xdim)) cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time} - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32), - 'abs_salinity': np.array(abs_salinity, dtype=np.float32), - 'cons_temperature': np.array(cons_temperature, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth, "time": time} + data = { + "U": np.array(U, dtype=np.float32), + "V": np.array(V, dtype=np.float32), + "abs_salinity": np.array(abs_salinity, dtype=np.float32), + "cons_temperature": np.array(cons_temperature, dtype=np.float32), + } return (data, dimensions) data, dimensions = generate_fieldset() fieldset = FieldSet.from_data(data, dimensions) - DensParticle = ptype[mode].add_variable('density', dtype=np.float32) + DensParticle = ptype[mode].add_variable("density", dtype=np.float32) pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000) @@ -425,25 +441,24 @@ def test_TEOSdensity_kernels(mode): assert np.allclose(pset[0].density, 1022.85377) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_EOSseawaterproperties_kernels(mode): - fieldset = FieldSet.from_data(data={'U': 0, 'V': 0, - 'psu_salinity': 40, - 'temperature': 40, - 'potemperature': 36.89073}, - dimensions={'lat': 0, 'lon': 0, 'depth': 0}) - fieldset.add_constant('refpressure', float(0)) - - PoTempParticle = ptype[mode].add_variables([ - Variable('potemp', dtype=np.float32), - Variable('pressure', dtype=np.float32, initial=10000)]) + fieldset = FieldSet.from_data( + data={"U": 0, "V": 0, "psu_salinity": 40, "temperature": 40, "potemperature": 36.89073}, + dimensions={"lat": 0, "lon": 0, "depth": 0}, + ) + fieldset.add_constant("refpressure", float(0)) + + PoTempParticle = ptype[mode].add_variables( + [Variable("potemp", dtype=np.float32), Variable("pressure", dtype=np.float32, initial=10000)] + ) pset = ParticleSet(fieldset, pclass=PoTempParticle, lon=5, lat=5, depth=1000) pset.execute(PtempFromTemp, runtime=1) assert np.allclose(pset[0].potemp, 36.89073) - TempParticle = ptype[mode].add_variables([ - Variable('temp', dtype=np.float32), - Variable('pressure', dtype=np.float32, initial=10000)]) + TempParticle = ptype[mode].add_variables( + [Variable("temp", dtype=np.float32), Variable("pressure", dtype=np.float32, initial=10000)] + ) pset = ParticleSet(fieldset, pclass=TempParticle, lon=5, lat=5, depth=1000) pset.execute(TempFromPtemp, runtime=1) assert np.allclose(pset[0].temp, 40) @@ -453,13 +468,12 @@ def test_EOSseawaterproperties_kernels(mode): assert np.allclose(pset[0].pressure, 7500, atol=1e-2) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('pressure', [0, 10]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("pressure", [0, 10]) def test_UNESCOdensity_kernel(mode, pressure): - def generate_fieldset(p, xdim=2, ydim=2, zdim=2, tdim=1): - lon = np.linspace(0., 10., xdim, dtype=np.float32) - lat = np.linspace(0., 10., ydim, dtype=np.float32) + lon = np.linspace(0.0, 10.0, xdim, dtype=np.float32) + lat = np.linspace(0.0, 10.0, ydim, dtype=np.float32) depth = np.linspace(0, 2000, zdim, dtype=np.float32) time = np.zeros(tdim, dtype=np.float64) U = np.ones((tdim, zdim, ydim, xdim)) @@ -467,17 +481,20 @@ def test_UNESCOdensity_kernel(mode, pressure): psu_salinity = 8 * np.ones((tdim, zdim, ydim, xdim)) cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim)) cons_pressure = p * np.ones((tdim, zdim, ydim, xdim)) - dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time} - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32), - 'psu_salinity': np.array(psu_salinity, dtype=np.float32), - 'cons_pressure': np.array(cons_pressure, dtype=np.float32), - 'cons_temperature': np.array(cons_temperature, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth, "time": time} + data = { + "U": np.array(U, dtype=np.float32), + "V": np.array(V, dtype=np.float32), + "psu_salinity": np.array(psu_salinity, dtype=np.float32), + "cons_pressure": np.array(cons_pressure, dtype=np.float32), + "cons_temperature": np.array(cons_temperature, dtype=np.float32), + } return (data, dimensions) data, dimensions = generate_fieldset(pressure) fieldset = FieldSet.from_data(data, dimensions) - DensParticle = ptype[mode].add_variable('density', dtype=np.float32) + DensParticle = ptype[mode].add_variable("density", dtype=np.float32) pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000) diff --git a/tests/test_mpirun.py b/tests/test_mpirun.py index eb3862c6..bb132b16 100644 --- a/tests/test_mpirun.py +++ b/tests/test_mpirun.py @@ -8,32 +8,38 @@ import xarray as xr @pytest.mark.skipif(sys.platform.startswith("win"), reason="skipping windows as mpi4py not available for windows") -@pytest.mark.parametrize('repeatdt, maxage', [(200*86400, 600*86400), (100*86400, 100*86400)]) -@pytest.mark.parametrize('nump', [8]) +@pytest.mark.parametrize("repeatdt, maxage", [(200 * 86400, 600 * 86400), (100 * 86400, 100 * 86400)]) +@pytest.mark.parametrize("nump", [8]) def test_mpi_run(tmpdir, repeatdt, maxage, nump): - stommel_file = os.path.join(os.path.dirname(__file__), '..', 'docs', 'examples', 'example_stommel.py') - outputMPI = tmpdir.join('StommelMPI') - outputMPI_partition_function = tmpdir.join('StommelMPI_partition_function') - outputNoMPI = tmpdir.join('StommelNoMPI.zarr') - - os.system('mpirun -np 2 python %s -p %d -o %s -r %d -a %d -wf False -cpf True' % (stommel_file, nump, outputMPI_partition_function, repeatdt, maxage)) - os.system('mpirun -np 2 python %s -p %d -o %s -r %d -a %d -wf False' % (stommel_file, nump, outputMPI, repeatdt, maxage)) - os.system('python %s -p %d -o %s -r %d -a %d -wf False' % (stommel_file, nump, outputNoMPI, repeatdt, maxage)) + stommel_file = os.path.join(os.path.dirname(__file__), "..", "docs", "examples", "example_stommel.py") + outputMPI = tmpdir.join("StommelMPI") + outputMPI_partition_function = tmpdir.join("StommelMPI_partition_function") + outputNoMPI = tmpdir.join("StommelNoMPI.zarr") + + os.system( + "mpirun -np 2 python %s -p %d -o %s -r %d -a %d -wf False -cpf True" + % (stommel_file, nump, outputMPI_partition_function, repeatdt, maxage) + ) + os.system( + "mpirun -np 2 python %s -p %d -o %s -r %d -a %d -wf False" % (stommel_file, nump, outputMPI, repeatdt, maxage) + ) + os.system("python %s -p %d -o %s -r %d -a %d -wf False" % (stommel_file, nump, outputNoMPI, repeatdt, maxage)) ds2 = xr.open_zarr(outputNoMPI) for mpi_run in [outputMPI, outputMPI_partition_function]: files = glob(os.path.join(mpi_run, "proc*")) - ds1 = xr.concat([xr.open_zarr(f) for f in files], dim='trajectory', - compat='no_conflicts', coords='minimal').sortby(['trajectory']) + ds1 = xr.concat( + [xr.open_zarr(f) for f in files], dim="trajectory", compat="no_conflicts", coords="minimal" + ).sortby(["trajectory"]) for v in ds2.variables.keys(): - if v == 'time': + if v == "time": continue # skip because np.allclose does not work well on np.datetime64 assert np.allclose(ds1.variables[v][:], ds2.variables[v][:], equal_nan=True) for a in ds2.attrs: - if a != 'parcels_version': + if a != "parcels_version": assert ds1.attrs[a] == ds2.attrs[a] ds1.close() diff --git a/tests/test_particlefile.py b/tests/test_particlefile.py index a724b952..8dac7f10 100755 --- a/tests/test_particlefile.py +++ b/tests/test_particlefile.py @@ -18,7 +18,7 @@ from parcels import ( from parcels.particlefile import _set_calendar from parcels.tools.converters import _get_cftime_calendars, _get_cftime_datetimes -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def fieldset(xdim=40, ydim=100): @@ -27,8 +27,8 @@ def fieldset(xdim=40, ydim=100): lon = np.linspace(0, 1, xdim, dtype=np.float32) lat = np.linspace(-60, 60, ydim, dtype=np.float32) depth = np.zeros(1, dtype=np.float32) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - dimensions = {'lat': lat, 'lon': lon, 'depth': depth} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth} return FieldSet.from_data(data, dimensions) @@ -37,7 +37,7 @@ def fieldset_ficture(xdim=40, ydim=100): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_metadata(fieldset, mode, tmpdir): filepath = tmpdir.join("pfile_metadata.zarr") pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0, lat=0) @@ -48,16 +48,14 @@ def test_metadata(fieldset, mode, tmpdir): pset.execute(DoNothing, runtime=1, output_file=pset.ParticleFile(filepath)) ds = xr.open_zarr(filepath) - assert ds.attrs['parcels_kernels'].lower() == f'{mode}ParticleDoNothing'.lower() + assert ds.attrs["parcels_kernels"].lower() == f"{mode}ParticleDoNothing".lower() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pfile_array_write_zarr_memorystore(fieldset, mode, npart=10): """Check that writing to a Zarr MemoryStore works.""" zarr_store = MemoryStore() - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=0.5*np.ones(npart), time=0) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=0.5 * np.ones(npart), time=0) pfile = pset.ParticleFile(zarr_store) pfile.write(pset, 0) @@ -66,12 +64,10 @@ def test_pfile_array_write_zarr_memorystore(fieldset, mode, npart=10): ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pfile_array_remove_particles(fieldset, mode, tmpdir, npart=10): filepath = tmpdir.join("pfile_array_remove_particles.zarr") - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=0.5*np.ones(npart), time=0) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=0.5 * np.ones(npart), time=0) pfile = pset.ParticleFile(filepath) pfile.write(pset, 0) pset.remove_indices(3) @@ -80,19 +76,17 @@ def test_pfile_array_remove_particles(fieldset, mode, tmpdir, npart=10): pfile.write(pset, 1) ds = xr.open_zarr(filepath) - timearr = ds['time'][:] + timearr = ds["time"][:] assert (np.isnat(timearr[3, 1])) and (np.isfinite(timearr[3, 0])) ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pfile_set_towrite_False(fieldset, mode, tmpdir, npart=10): filepath = tmpdir.join("pfile_set_towrite_False.zarr") - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=0.5*np.ones(npart)) - pset.set_variable_write_status('depth', False) - pset.set_variable_write_status('lat', False) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=0.5 * np.ones(npart)) + pset.set_variable_write_status("depth", False) + pset.set_variable_write_status("lat", False) pfile = pset.ParticleFile(filepath, outputdt=1) def Update_lon(particle, fieldset, time): @@ -101,24 +95,21 @@ def test_pfile_set_towrite_False(fieldset, mode, tmpdir, npart=10): pset.execute(Update_lon, runtime=10, output_file=pfile) ds = xr.open_zarr(filepath) - assert 'time' in ds - assert 'z' not in ds - assert 'lat' not in ds + assert "time" in ds + assert "z" not in ds + assert "lat" not in ds ds.close() # For pytest purposes, we need to reset to original status - pset.set_variable_write_status('depth', True) - pset.set_variable_write_status('lat', True) + pset.set_variable_write_status("depth", True) + pset.set_variable_write_status("lat", True) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('chunks_obs', [1, None]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("chunks_obs", [1, None]) def test_pfile_array_remove_all_particles(fieldset, mode, chunks_obs, tmpdir, npart=10): - filepath = tmpdir.join("pfile_array_remove_particles.zarr") - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=0.5*np.ones(npart), time=0) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=0.5 * np.ones(npart), time=0) chunks = (npart, chunks_obs) if chunks_obs else None pfile = pset.ParticleFile(filepath, chunks=chunks) pfile.write(pset, 0) @@ -128,16 +119,16 @@ def test_pfile_array_remove_all_particles(fieldset, mode, chunks_obs, tmpdir, np pfile.write(pset, 2) ds = xr.open_zarr(filepath) - assert np.allclose(ds['time'][:, 0], np.timedelta64(0, 's'), atol=np.timedelta64(1, 'ms')) + assert np.allclose(ds["time"][:, 0], np.timedelta64(0, "s"), atol=np.timedelta64(1, "ms")) if chunks_obs is not None: - assert ds['time'][:].shape == chunks + assert ds["time"][:].shape == chunks else: - assert ds['time'][:].shape[0] == npart - assert np.all(np.isnan(ds['time'][:, 1:])) + assert ds["time"][:].shape[0] == npart + assert np.all(np.isnan(ds["time"][:, 1:])) ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_variable_write_double(fieldset, mode, tmpdir): filepath = tmpdir.join("pfile_variable_write_double.zarr") @@ -149,106 +140,115 @@ def test_variable_write_double(fieldset, mode, tmpdir): pset.execute(pset.Kernel(Update_lon), endtime=0.001, dt=0.00001, output_file=ofile) ds = xr.open_zarr(filepath) - lons = ds['lon'][:] - assert (isinstance(lons.values[0, 0], np.float64)) + lons = ds["lon"][:] + assert isinstance(lons.values[0, 0], np.float64) ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_write_dtypes_pfile(fieldset, mode, tmpdir): filepath = tmpdir.join("pfile_dtypes.zarr") dtypes = [np.float32, np.float64, np.int32, np.uint32, np.int64, np.uint64] - if mode == 'scipy': + if mode == "scipy": dtypes.extend([np.bool_, np.int8, np.uint8, np.int16, np.uint16]) - extra_vars = [Variable(f'v_{d.__name__}', dtype=d, initial=0.) for d in dtypes] + extra_vars = [Variable(f"v_{d.__name__}", dtype=d, initial=0.0) for d in dtypes] MyParticle = ptype[mode].add_variables(extra_vars) pset = ParticleSet(fieldset, pclass=MyParticle, lon=0, lat=0, time=0) pfile = pset.ParticleFile(name=filepath, outputdt=1) pfile.write(pset, 0) - ds = xr.open_zarr(filepath, mask_and_scale=False) # Note masking issue at https://stackoverflow.com/questions/68460507/xarray-loading-int-data-as-float + ds = xr.open_zarr( + filepath, mask_and_scale=False + ) # Note masking issue at https://stackoverflow.com/questions/68460507/xarray-loading-int-data-as-float for d in dtypes: - assert ds[f'v_{d.__name__}'].dtype == d + assert ds[f"v_{d.__name__}"].dtype == d -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('npart', [1, 2, 5]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("npart", [1, 2, 5]) def test_variable_written_once(fieldset, mode, tmpdir, npart): filepath = tmpdir.join("pfile_once_written_variables.zarr") def Update_v(particle, fieldset, time): - particle.v_once += 1. + particle.v_once += 1.0 particle.age += particle.dt - MyParticle = ptype[mode].add_variables([ - Variable('v_once', dtype=np.float64, initial=0., to_write='once'), - Variable('age', dtype=np.float32, initial=0.)]) + MyParticle = ptype[mode].add_variables( + [ + Variable("v_once", dtype=np.float64, initial=0.0, to_write="once"), + Variable("age", dtype=np.float32, initial=0.0), + ] + ) lon = np.linspace(0, 1, npart) lat = np.linspace(1, 0, npart) - time = np.arange(0, npart/10., 0.1, dtype=np.float64) + time = np.arange(0, npart / 10.0, 0.1, dtype=np.float64) pset = ParticleSet(fieldset, pclass=MyParticle, lon=lon, lat=lat, time=time, v_once=time) ofile = pset.ParticleFile(name=filepath, outputdt=0.1) pset.execute(pset.Kernel(Update_v), endtime=1, dt=0.1, output_file=ofile) - assert np.allclose(pset.v_once - time - pset.age*10, 1, atol=1e-5) + assert np.allclose(pset.v_once - time - pset.age * 10, 1, atol=1e-5) ds = xr.open_zarr(filepath) - vfile = np.ma.filled(ds['v_once'][:], np.nan) - assert (vfile.shape == (npart, )) + vfile = np.ma.filled(ds["v_once"][:], np.nan) + assert vfile.shape == (npart,) ds.close() -@pytest.mark.parametrize('type', ['repeatdt', 'timearr']) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('repeatdt', range(1, 3)) -@pytest.mark.parametrize('dt', [-1, 1]) -@pytest.mark.parametrize('maxvar', [2, 4, 10]) +@pytest.mark.parametrize("type", ["repeatdt", "timearr"]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("repeatdt", range(1, 3)) +@pytest.mark.parametrize("dt", [-1, 1]) +@pytest.mark.parametrize("maxvar", [2, 4, 10]) def test_pset_repeated_release_delayed_adding_deleting(type, fieldset, mode, repeatdt, tmpdir, dt, maxvar, runtime=10): fieldset.maxvar = maxvar pset = None - MyParticle = ptype[mode].add_variables([ - Variable('sample_var', initial=0.), - Variable('v_once', dtype=np.float64, initial=0., to_write='once')]) + MyParticle = ptype[mode].add_variables( + [Variable("sample_var", initial=0.0), Variable("v_once", dtype=np.float64, initial=0.0, to_write="once")] + ) - if type == 'repeatdt': + if type == "repeatdt": pset = ParticleSet(fieldset, lon=[0], lat=[0], pclass=MyParticle, repeatdt=repeatdt) - elif type == 'timearr': - pset = ParticleSet(fieldset, lon=np.zeros(runtime), lat=np.zeros(runtime), pclass=MyParticle, time=list(range(runtime))) + elif type == "timearr": + pset = ParticleSet( + fieldset, lon=np.zeros(runtime), lat=np.zeros(runtime), pclass=MyParticle, time=list(range(runtime)) + ) outfilepath = tmpdir.join("pfile_repeated_release.zarr") pfile = pset.ParticleFile(outfilepath, outputdt=abs(dt), chunks=(1, 1)) def IncrLon(particle, fieldset, time): - particle.sample_var += 1. + particle.sample_var += 1.0 if particle.sample_var > fieldset.maxvar: particle.delete() + for _ in range(runtime): - pset.execute(IncrLon, dt=dt, runtime=1., output_file=pfile) + pset.execute(IncrLon, dt=dt, runtime=1.0, output_file=pfile) ds = xr.open_zarr(outfilepath) - samplevar = ds['sample_var'][:] - if type == 'repeatdt': - assert samplevar.shape == (runtime // repeatdt, min(maxvar+1, runtime)) + samplevar = ds["sample_var"][:] + if type == "repeatdt": + assert samplevar.shape == (runtime // repeatdt, min(maxvar + 1, runtime)) assert np.allclose(pset.sample_var, np.arange(maxvar, -1, -repeatdt)) - elif type == 'timearr': + elif type == "timearr": assert samplevar.shape == (runtime, min(maxvar + 1, runtime)) # test whether samplevar[:, k] = k for k in range(samplevar.shape[1]): - assert np.allclose([p for p in samplevar[:, k] if np.isfinite(p)], k+1) + assert np.allclose([p for p in samplevar[:, k] if np.isfinite(p)], k + 1) filesize = os.path.getsize(str(outfilepath)) assert filesize < 1024 * 65 # test that chunking leads to filesize less than 65KB ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('repeatdt', [1, 2]) -@pytest.mark.parametrize('nump', [1, 10]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("repeatdt", [1, 2]) +@pytest.mark.parametrize("nump", [1, 10]) def test_pfile_chunks_repeatedrelease(fieldset, mode, repeatdt, nump, tmpdir): runtime = 8 - pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.zeros((nump, 1)), - lat=np.zeros((nump, 1)), repeatdt=repeatdt) + pset = ParticleSet( + fieldset, pclass=ptype[mode], lon=np.zeros((nump, 1)), lat=np.zeros((nump, 1)), repeatdt=repeatdt + ) outfilepath = tmpdir.join("pfile_chunks_repeatedrelease.zarr") chunks = (20, 10) pfile = pset.ParticleFile(outfilepath, outputdt=1, chunks=chunks) @@ -258,39 +258,40 @@ def test_pfile_chunks_repeatedrelease(fieldset, mode, repeatdt, nump, tmpdir): pset.execute(DoNothing, dt=1, runtime=runtime, output_file=pfile) ds = xr.open_zarr(outfilepath) - assert ds['time'].shape == (int(nump*runtime/repeatdt), chunks[1]) + assert ds["time"].shape == (int(nump * runtime / repeatdt), chunks[1]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_write_timebackward(fieldset, mode, tmpdir): outfilepath = tmpdir.join("pfile_write_timebackward.zarr") def Update_lon(particle, fieldset, time): particle_dlon -= 0.1 * particle.dt # noqa - pset = ParticleSet(fieldset, pclass=ptype[mode], - lat=np.linspace(0, 1, 3), lon=[0, 0, 0], time=[1, 2, 3]) - pfile = pset.ParticleFile(name=outfilepath, outputdt=1.) - pset.execute(pset.Kernel(Update_lon), runtime=4, dt=-1., - output_file=pfile) + pset = ParticleSet(fieldset, pclass=ptype[mode], lat=np.linspace(0, 1, 3), lon=[0, 0, 0], time=[1, 2, 3]) + pfile = pset.ParticleFile(name=outfilepath, outputdt=1.0) + pset.execute(pset.Kernel(Update_lon), runtime=4, dt=-1.0, output_file=pfile) ds = xr.open_zarr(outfilepath) - trajs = ds['trajectory'][:] - assert trajs.values.dtype == 'int64' + trajs = ds["trajectory"][:] + assert trajs.values.dtype == "int64" assert np.all(np.diff(trajs.values) < 0) # all particles written in order of release ds.close() -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_write_xiyi(fieldset, mode, tmpdir): outfilepath = tmpdir.join("pfile_xiyi.zarr") fieldset.U.data[:] = 1 # set a non-zero zonal velocity - fieldset.add_field(Field(name='P', data=np.zeros((3, 20)), lon=np.linspace(0, 1, 20), lat=[-2, 0, 2])) + fieldset.add_field(Field(name="P", data=np.zeros((3, 20)), lon=np.linspace(0, 1, 20), lat=[-2, 0, 2])) dt = 3600 - XiYiParticle = ptype[mode].add_variables([ - Variable('pxi0', dtype=np.int32, initial=0.), - Variable('pxi1', dtype=np.int32, initial=0.), - Variable('pyi', dtype=np.int32, initial=0.)]) + XiYiParticle = ptype[mode].add_variables( + [ + Variable("pxi0", dtype=np.int32, initial=0.0), + Variable("pxi1", dtype=np.int32, initial=0.0), + Variable("pyi", dtype=np.int32, initial=0.0), + ] + ) def Get_XiYi(particle, fieldset, time): """Kernel to sample the grid indices of the particle. @@ -303,30 +304,30 @@ def test_write_xiyi(fieldset, mode, tmpdir): particle.pyi = particle.yi[0] def SampleP(particle, fieldset, time): - if time > 5*3600: + if time > 5 * 3600: _ = fieldset.P[particle] # To trigger sampling of the P field pset = ParticleSet(fieldset, pclass=XiYiParticle, lon=[0, 0.2], lat=[0.2, 1], lonlatdepth_dtype=np.float64) pfile = pset.ParticleFile(name=outfilepath, outputdt=dt) - pset.execute([SampleP, Get_XiYi, AdvectionRK4], endtime=10*dt, dt=dt, output_file=pfile) + pset.execute([SampleP, Get_XiYi, AdvectionRK4], endtime=10 * dt, dt=dt, output_file=pfile) ds = xr.open_zarr(outfilepath) - pxi0 = ds['pxi0'][:].values.astype(np.int32) - pxi1 = ds['pxi1'][:].values.astype(np.int32) - lons = ds['lon'][:].values - pyi = ds['pyi'][:].values.astype(np.int32) - lats = ds['lat'][:].values + pxi0 = ds["pxi0"][:].values.astype(np.int32) + pxi1 = ds["pxi1"][:].values.astype(np.int32) + lons = ds["lon"][:].values + pyi = ds["pyi"][:].values.astype(np.int32) + lats = ds["lat"][:].values for p in range(pyi.shape[0]): assert (pxi0[p, 0] == 0) and (pxi0[p, -1] == pset[p].pxi0) # check that particle has moved assert np.all(pxi1[p, :6] == 0) # check that particle has not been sampled on grid 1 until time 6 assert np.all(pxi1[p, 6:] > 0) # check that particle has not been sampled on grid 1 after time 6 for xi, lon in zip(pxi0[p, 1:], lons[p, 1:]): - assert fieldset.U.grid.lon[xi] <= lon < fieldset.U.grid.lon[xi+1] + assert fieldset.U.grid.lon[xi] <= lon < fieldset.U.grid.lon[xi + 1] for xi, lon in zip(pxi1[p, 6:], lons[p, 6:]): - assert fieldset.P.grid.lon[xi] <= lon < fieldset.P.grid.lon[xi+1] + assert fieldset.P.grid.lon[xi] <= lon < fieldset.P.grid.lon[xi + 1] for yi, lat in zip(pyi[p, 1:], lats[p, 1:]): - assert fieldset.U.grid.lat[yi] <= lat < fieldset.U.grid.lat[yi+1] + assert fieldset.U.grid.lat[yi] <= lat < fieldset.U.grid.lat[yi + 1] ds.close() @@ -334,10 +335,10 @@ def test_set_calendar(): for _calendar_name, cf_datetime in zip(_get_cftime_calendars(), _get_cftime_datetimes()): date = getattr(cftime, cf_datetime)(1990, 1, 1) assert _set_calendar(date.calendar) == date.calendar - assert _set_calendar('np_datetime64') == 'standard' + assert _set_calendar("np_datetime64") == "standard" -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_reset_dt(fieldset, mode, tmpdir): # Assert that p.dt gets reset when a write_time is not a multiple of dt # for p.dt=0.02 to reach outputdt=0.05 and endtime=0.1, the steps should be [0.2, 0.2, 0.1, 0.2, 0.2, 0.1], resulting in 6 kernel executions @@ -350,4 +351,4 @@ def test_reset_dt(fieldset, mode, tmpdir): ofile = pset.ParticleFile(name=filepath, outputdt=0.05) pset.execute(pset.Kernel(Update_lon), endtime=0.12, dt=0.02, output_file=ofile) - assert np.allclose(pset.lon, .6) + assert np.allclose(pset.lon, 0.6) diff --git a/tests/test_particles.py b/tests/test_particles.py index d1e04600..12ca2bd0 100644 --- a/tests/test_particles.py +++ b/tests/test_particles.py @@ -12,15 +12,13 @@ from parcels import ( Variable, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def fieldset(xdim=100, ydim=100): - data = {'U': np.zeros((ydim, xdim), dtype=np.float32), - 'V': np.zeros((ydim, xdim), dtype=np.float32)} - dimensions = {'lon': np.linspace(0, 1, xdim, dtype=np.float32), - 'lat': np.linspace(0, 1, ydim, dtype=np.float32)} - return FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": np.zeros((ydim, xdim), dtype=np.float32), "V": np.zeros((ydim, xdim), dtype=np.float32)} + dimensions = {"lon": np.linspace(0, 1, xdim, dtype=np.float32), "lat": np.linspace(0, 1, ydim, dtype=np.float32)} + return FieldSet.from_data(data, dimensions, mesh="flat") @pytest.fixture(name="fieldset") @@ -28,71 +26,75 @@ def fieldset_fixture(xdim=100, ydim=100): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_print(fieldset, mode): - TestParticle = ptype[mode].add_variable('p', to_write=True) + TestParticle = ptype[mode].add_variable("p", to_write=True) pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0, 1], lat=[0, 1]) print(pset) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_variable_init(fieldset, mode, npart=10): """Test that checks correct initialisation of custom variables.""" - extra_vars = [Variable('p_float', dtype=np.float32, initial=10.), - Variable('p_double', dtype=np.float64, initial=11.)] + extra_vars = [ + Variable("p_float", dtype=np.float32, initial=10.0), + Variable("p_double", dtype=np.float64, initial=11.0), + ] TestParticle = ptype[mode].add_variables(extra_vars) - TestParticle = TestParticle.add_variable('p_int', np.int32, initial=12.) - pset = ParticleSet(fieldset, pclass=TestParticle, - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + TestParticle = TestParticle.add_variable("p_int", np.int32, initial=12.0) + pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) def addOne(particle, fieldset, time): - particle.p_float += 1. - particle.p_double += 1. + particle.p_float += 1.0 + particle.p_double += 1.0 particle.p_int += 1 - pset.execute(pset.Kernel(AdvectionRK4)+addOne, runtime=1., dt=1.) - assert np.allclose([p.p_float for p in pset], 11., rtol=1e-12) - assert np.allclose([p.p_double for p in pset], 12., rtol=1e-12) + + pset.execute(pset.Kernel(AdvectionRK4) + addOne, runtime=1.0, dt=1.0) + assert np.allclose([p.p_float for p in pset], 11.0, rtol=1e-12) + assert np.allclose([p.p_double for p in pset], 12.0, rtol=1e-12) assert np.allclose([p.p_int for p in pset], 13, rtol=1e-12) -@pytest.mark.parametrize('mode', ['jit']) -@pytest.mark.parametrize('type', ['np.int8', 'mp.float', 'np.int16']) +@pytest.mark.parametrize("mode", ["jit"]) +@pytest.mark.parametrize("type", ["np.int8", "mp.float", "np.int16"]) def test_variable_unsupported_dtypes(fieldset, mode, type): """Test that checks errors thrown for unsupported dtypes in JIT mode.""" - TestParticle = ptype[mode].add_variable('p', dtype=type, initial=10.) + TestParticle = ptype[mode].add_variable("p", dtype=type, initial=10.0) with pytest.raises((RuntimeError, TypeError)): ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_variable_special_names(fieldset, mode): """Test that checks errors thrown for special names.""" - for vars in ['z', 'lon']: - TestParticle = ptype[mode].add_variable(vars, dtype=np.float32, initial=10.) + for vars in ["z", "lon"]: + TestParticle = ptype[mode].add_variable(vars, dtype=np.float32, initial=10.0) with pytest.raises(AttributeError): ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('coord_type', [np.float32, np.float64]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("coord_type", [np.float32, np.float64]) def test_variable_init_relative(fieldset, mode, coord_type, npart=10): """Test that checks relative initialisation of custom variables.""" - lonlat_type = np.float64 if coord_type == 'double' else np.float32 + lonlat_type = np.float64 if coord_type == "double" else np.float32 - TestParticle = ptype[mode].add_variables([ - Variable('p_base', dtype=lonlat_type, initial=10.), - Variable('p_relative', dtype=lonlat_type, initial=attrgetter('p_base')), - Variable('p_lon', dtype=lonlat_type, initial=attrgetter('lon')), - Variable('p_lat', dtype=lonlat_type, initial=attrgetter('lat'))]) + TestParticle = ptype[mode].add_variables( + [ + Variable("p_base", dtype=lonlat_type, initial=10.0), + Variable("p_relative", dtype=lonlat_type, initial=attrgetter("p_base")), + Variable("p_lon", dtype=lonlat_type, initial=attrgetter("lon")), + Variable("p_lat", dtype=lonlat_type, initial=attrgetter("lat")), + ] + ) lon = np.linspace(0, 1, npart, dtype=lonlat_type) lat = np.linspace(1, 0, npart, dtype=lonlat_type) pset = ParticleSet(fieldset, pclass=TestParticle, lon=lon, lat=lat, lonlatdepth_dtype=coord_type) # Adjust base variable to test for aliasing effects for p in pset: - p.p_base += 3. - assert np.allclose([p.p_base for p in pset], 13., rtol=1e-12) - assert np.allclose([p.p_relative for p in pset], 10., rtol=1e-12) + p.p_base += 3.0 + assert np.allclose([p.p_base for p in pset], 13.0, rtol=1e-12) + assert np.allclose([p.p_relative for p in pset], 10.0, rtol=1e-12) assert np.allclose([p.p_lon for p in pset], lon, rtol=1e-12) assert np.allclose([p.p_lat for p in pset], lat, rtol=1e-12) diff --git a/tests/test_particlesets.py b/tests/test_particlesets.py index b5fa74de..dfd1f86c 100644 --- a/tests/test_particlesets.py +++ b/tests/test_particlesets.py @@ -12,7 +12,7 @@ from parcels import ( Variable, ) -ptype = {'scipy': ScipyParticle, 'jit': JITParticle} +ptype = {"scipy": ScipyParticle, "jit": JITParticle} def fieldset(xdim=40, ydim=100): @@ -21,8 +21,8 @@ def fieldset(xdim=40, ydim=100): lon = np.linspace(0, 1, xdim, dtype=np.float32) lat = np.linspace(-60, 60, ydim, dtype=np.float32) depth = np.zeros(1, dtype=np.float32) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - dimensions = {'lat': lat, 'lon': lon, 'depth': depth} + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + dimensions = {"lat": lat, "lon": lon, "depth": depth} return FieldSet.from_data(data, dimensions) @@ -31,7 +31,7 @@ def fieldset_fixture(xdim=40, ydim=100): return fieldset(xdim=xdim, ydim=ydim) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_create_lon_lat(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart, dtype=np.float32) lat = np.linspace(1, 0, npart, dtype=np.float32) @@ -40,19 +40,20 @@ def test_pset_create_lon_lat(fieldset, mode, npart=100): assert np.allclose([p.lat for p in pset], lat, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('lonlatdepth_dtype', [np.float64, np.float32]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("lonlatdepth_dtype", [np.float64, np.float32]) def test_pset_create_line(fieldset, mode, lonlatdepth_dtype, npart=100): lon = np.linspace(0, 1, npart, dtype=lonlatdepth_dtype) lat = np.linspace(1, 0, npart, dtype=lonlatdepth_dtype) - pset = ParticleSet.from_line(fieldset, size=npart, start=(0, 1), finish=(1, 0), - pclass=ptype[mode], lonlatdepth_dtype=lonlatdepth_dtype) + pset = ParticleSet.from_line( + fieldset, size=npart, start=(0, 1), finish=(1, 0), pclass=ptype[mode], lonlatdepth_dtype=lonlatdepth_dtype + ) assert np.allclose([p.lon for p in pset], lon, rtol=1e-12) assert np.allclose([p.lat for p in pset], lat, rtol=1e-12) assert isinstance(pset[0].lat, lonlatdepth_dtype) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_create_empty_pset(fieldset, mode): pset = ParticleSet(fieldset, pclass=ptype[mode]) assert pset.size == 0 @@ -60,11 +61,11 @@ def test_create_empty_pset(fieldset, mode): def DoNothing(particle, fieldset, time): pass - pset.execute(DoNothing, endtime=1., dt=1.) + pset.execute(DoNothing, endtime=1.0, dt=1.0) assert pset.size == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_create_list_with_customvariable(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart, dtype=np.float32) lat = np.linspace(1, 0, npart, dtype=np.float32) @@ -78,49 +79,50 @@ def test_pset_create_list_with_customvariable(fieldset, mode, npart=100): assert np.allclose([p.v for p in pset], v_vals, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) -@pytest.mark.parametrize('restart', [True, False]) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) +@pytest.mark.parametrize("restart", [True, False]) def test_pset_create_fromparticlefile(fieldset, mode, restart, tmpdir): filename = tmpdir.join("pset_fromparticlefile.zarr") lon = np.linspace(0, 1, 10, dtype=np.float32) lat = np.linspace(1, 0, 10, dtype=np.float32) - TestParticle = ptype[mode].add_variable('p', np.float32, initial=0.33) - TestParticle = TestParticle.add_variable('p2', np.float32, initial=1, to_write=False) - TestParticle = TestParticle.add_variable('p3', np.float64, to_write='once') + TestParticle = ptype[mode].add_variable("p", np.float32, initial=0.33) + TestParticle = TestParticle.add_variable("p2", np.float32, initial=1, to_write=False) + TestParticle = TestParticle.add_variable("p3", np.float64, to_write="once") - pset = ParticleSet(fieldset, lon=lon, lat=lat, depth=[4]*len(lon), pclass=TestParticle, p3=np.arange(len(lon))) + pset = ParticleSet(fieldset, lon=lon, lat=lat, depth=[4] * len(lon), pclass=TestParticle, p3=np.arange(len(lon))) pfile = pset.ParticleFile(filename, outputdt=1) def Kernel(particle, fieldset, time): - particle.p = 2. - if particle.lon == 1.: + particle.p = 2.0 + if particle.lon == 1.0: particle.delete() pset.execute(Kernel, runtime=2, dt=1, output_file=pfile) - pset_new = ParticleSet.from_particlefile(fieldset, pclass=TestParticle, filename=filename, - restart=restart, repeatdt=1) + pset_new = ParticleSet.from_particlefile( + fieldset, pclass=TestParticle, filename=filename, restart=restart, repeatdt=1 + ) - for var in ['lon', 'lat', 'depth', 'time', 'p', 'p2', 'p3']: + for var in ["lon", "lat", "depth", "time", "p", "p2", "p3"]: assert np.allclose([getattr(p, var) for p in pset], [getattr(p, var) for p in pset_new]) if restart: assert np.allclose([p.id for p in pset], [p.id for p in pset_new]) pset_new.execute(Kernel, runtime=2, dt=1) - assert len(pset_new) == 3*len(pset) + assert len(pset_new) == 3 * len(pset) assert pset[0].p3.dtype == np.float64 -@pytest.mark.parametrize('mode', ['scipy']) -@pytest.mark.parametrize('lonlatdepth_dtype', [np.float64, np.float32]) +@pytest.mark.parametrize("mode", ["scipy"]) +@pytest.mark.parametrize("lonlatdepth_dtype", [np.float64, np.float32]) def test_pset_create_field(fieldset, mode, lonlatdepth_dtype, npart=100): np.random.seed(123456) shape = (fieldset.U.lon.size, fieldset.U.lat.size) - K = Field('K', lon=fieldset.U.lon, lat=fieldset.U.lat, - data=np.ones(shape, dtype=np.float32), transpose=True) - pset = ParticleSet.from_field(fieldset, size=npart, pclass=ptype[mode], - start_field=K, lonlatdepth_dtype=lonlatdepth_dtype) + K = Field("K", lon=fieldset.U.lon, lat=fieldset.U.lat, data=np.ones(shape, dtype=np.float32), transpose=True) + pset = ParticleSet.from_field( + fieldset, size=npart, pclass=ptype[mode], start_field=K, lonlatdepth_dtype=lonlatdepth_dtype + ) assert (np.array([p.lon for p in pset]) <= K.lon[-1]).all() assert (np.array([p.lon for p in pset]) >= K.lon[0]).all() assert (np.array([p.lat for p in pset]) <= K.lat[-1]).all() @@ -130,10 +132,10 @@ def test_pset_create_field(fieldset, mode, lonlatdepth_dtype, npart=100): def test_pset_create_field_curvi(npart=100): np.random.seed(123456) - r_v = np.linspace(.25, 2, 20) - theta_v = np.linspace(0, np.pi/2, 200) - dtheta = theta_v[1]-theta_v[0] - dr = r_v[1]-r_v[0] + r_v = np.linspace(0.25, 2, 20) + theta_v = np.linspace(0, np.pi / 2, 200) + dtheta = theta_v[1] - theta_v[0] + dr = r_v[1] - r_v[0] (r, theta) = np.meshgrid(r_v, theta_v) x = -1 + r * np.cos(theta) @@ -141,44 +143,42 @@ def test_pset_create_field_curvi(npart=100): grid = CurvilinearZGrid(x, y) u = np.ones(x.shape) - v = np.where(np.logical_and(theta > np.pi/4, theta < np.pi/3), 1, 0) + v = np.where(np.logical_and(theta > np.pi / 4, theta < np.pi / 3), 1, 0) - ufield = Field('U', u, grid=grid) - vfield = Field('V', v, grid=grid) + ufield = Field("U", u, grid=grid) + vfield = Field("V", v, grid=grid) fieldset = FieldSet(ufield, vfield) - pset = ParticleSet.from_field(fieldset, size=npart, pclass=ptype['scipy'], start_field=fieldset.V) + pset = ParticleSet.from_field(fieldset, size=npart, pclass=ptype["scipy"], start_field=fieldset.V) - lons = np.array([p.lon+1 for p in pset]) - lats = np.array([p.lat+1 for p in pset]) + lons = np.array([p.lon + 1 for p in pset]) + lats = np.array([p.lat + 1 for p in pset]) thetas = np.arctan2(lats, lons) - rs = np.sqrt(lons*lons + lats*lats) + rs = np.sqrt(lons * lons + lats * lats) - test = np.pi/4-dtheta < thetas - test *= thetas < np.pi/3+dtheta - test *= rs > .25-dr - test *= rs < 2+dr + test = np.pi / 4 - dtheta < thetas + test *= thetas < np.pi / 3 + dtheta + test *= rs > 0.25 - dr + test *= rs < 2 + dr assert np.all(test) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_create_with_time(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart) lat = np.linspace(1, 0, npart) - time = 5. + time = 5.0 pset = ParticleSet(fieldset, lon=lon, lat=lat, pclass=ptype[mode], time=time) assert np.allclose([p.time for p in pset], time, rtol=1e-12) - pset = ParticleSet.from_list(fieldset, lon=lon, lat=lat, pclass=ptype[mode], - time=[time]*npart) + pset = ParticleSet.from_list(fieldset, lon=lon, lat=lat, pclass=ptype[mode], time=[time] * npart) assert np.allclose([p.time for p in pset], time, rtol=1e-12) - pset = ParticleSet.from_line(fieldset, size=npart, start=(0, 1), finish=(1, 0), - pclass=ptype[mode], time=time) + pset = ParticleSet.from_line(fieldset, size=npart, start=(0, 1), finish=(1, 0), pclass=ptype[mode], time=time) assert np.allclose([p.time for p in pset], time, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_not_multipldt_time(fieldset, mode): times = [0, 1.1] - pset = ParticleSet(fieldset, lon=[0]*2, lat=[0]*2, pclass=ptype[mode], time=times) + pset = ParticleSet(fieldset, lon=[0] * 2, lat=[0] * 2, pclass=ptype[mode], time=times) def Addlon(particle, fieldset, time): particle_dlon += particle.dt # noqa @@ -187,16 +187,16 @@ def test_pset_not_multipldt_time(fieldset, mode): assert np.allclose([p.lon_nextloop for p in pset], [2 - t for t in times]) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_repeated_release(fieldset, mode, npart=10): time = np.arange(0, npart, 1) # release 1 particle every second - pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), - pclass=ptype[mode], time=time) + pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), pclass=ptype[mode], time=time) assert np.allclose([p.time for p in pset], time) def IncrLon(particle, fieldset, time): - particle_dlon += 1. # noqa - pset.execute(IncrLon, dt=1., runtime=npart+1) + particle_dlon += 1.0 # noqa + + pset.execute(IncrLon, dt=1.0, runtime=npart + 1) assert np.allclose([p.lon for p in pset], np.arange(npart, 0, -1)) @@ -204,14 +204,15 @@ def test_pset_repeatdt_check_dt(fieldset): pset = ParticleSet(fieldset, lon=[0], lat=[0], pclass=ScipyParticle, repeatdt=5) def IncrLon(particle, fieldset, time): - particle.lon = 1. + particle.lon = 1.0 + pset.execute(IncrLon, dt=2, runtime=21) assert np.allclose([p.lon for p in pset], 1) # if p.dt is nan, it won't be executed so p.lon will be 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_repeatdt_custominit(fieldset, mode): - MyParticle = ptype[mode].add_variable('sample_var') + MyParticle = ptype[mode].add_variable("sample_var") pset = ParticleSet(fieldset, lon=0, lat=0, pclass=MyParticle, repeatdt=1, sample_var=5) @@ -219,10 +220,10 @@ def test_pset_repeatdt_custominit(fieldset, mode): pass pset.execute(DoNothing, dt=1, runtime=21) - assert np.allclose([p.sample_var for p in pset], 5.) + assert np.allclose([p.sample_var for p in pset], 5.0) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_stop_simulation(fieldset, mode): pset = ParticleSet(fieldset, lon=0, lat=0, pclass=ptype[mode]) @@ -234,7 +235,7 @@ def test_pset_stop_simulation(fieldset, mode): assert pset[0].time == 4 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_access(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart, dtype=np.float32) lat = np.linspace(1, 0, npart, dtype=np.float32) @@ -244,35 +245,34 @@ def test_pset_access(fieldset, mode, npart=100): assert np.allclose([pset[i].lat for i in range(pset.size)], lat, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_custom_ptype(fieldset, mode, npart=100): + TestParticle = ptype[mode].add_variable( + [Variable("p", np.float32, initial=0.33), Variable("n", np.int32, initial=2)] + ) - TestParticle = ptype[mode].add_variable([Variable('p', np.float32, initial=0.33), - Variable('n', np.int32, initial=2)]) - - pset = ParticleSet(fieldset, pclass=TestParticle, - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) + pset = ParticleSet(fieldset, pclass=TestParticle, lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) assert pset.size == npart assert np.allclose([p.p - 0.33 for p in pset], np.zeros(npart), atol=1e-5) assert np.allclose([p.n - 2 for p in pset], np.zeros(npart), rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_add_explicit(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart) lat = np.linspace(1, 0, npart) pset = ParticleSet(fieldset, lon=[], lat=[], pclass=ptype[mode], lonlatdepth_dtype=np.float64) for i in range(npart): - particle = ParticleSet(pclass=ptype[mode], lon=lon[i], lat=lat[i], - fieldset=fieldset, lonlatdepth_dtype=np.float64) + particle = ParticleSet( + pclass=ptype[mode], lon=lon[i], lat=lat[i], fieldset=fieldset, lonlatdepth_dtype=np.float64 + ) pset.add(particle) assert pset.size == npart assert np.allclose([p.lon for p in pset], lon, rtol=1e-12) assert np.allclose([p.lat for p in pset], lat, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_add_shorthand(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart, dtype=np.float32) lat = np.linspace(1, 0, npart, dtype=np.float32) @@ -284,7 +284,7 @@ def test_pset_add_shorthand(fieldset, mode, npart=100): assert np.allclose([p.lat for p in pset], lat, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_add_execute(fieldset, mode, npart=10): def AddLat(particle, fieldset, time): particle_dlat += 0.1 # noqa @@ -293,40 +293,32 @@ def test_pset_add_execute(fieldset, mode, npart=10): for _ in range(npart): pset += ParticleSet(pclass=ptype[mode], lon=0.1, lat=0.1, fieldset=fieldset) for _ in range(4): - pset.execute(pset.Kernel(AddLat), runtime=1., dt=1.0) + pset.execute(pset.Kernel(AddLat), runtime=1.0, dt=1.0) assert np.allclose(np.array([p.lat for p in pset]), 0.4, rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_merge_inplace(fieldset, mode, npart=100): - pset1 = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) - pset2 = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(0, 1, npart)) + pset1 = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) + pset2 = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(0, 1, npart)) assert pset1.size == npart assert pset2.size == npart pset1.add(pset2) - assert pset1.size == 2*npart + assert pset1.size == 2 * npart @pytest.mark.xfail(reason="ParticleSet duplication has not been implemented yet") -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_merge_duplicate(fieldset, mode, npart=100): - pset1 = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) - pset2 = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(0, 1, npart)) + pset1 = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) + pset2 = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(0, 1, npart)) pset3 = pset1 + pset2 assert pset1.size == npart assert pset2.size == npart - assert pset3.size == 2*npart + assert pset3.size == 2 * npart -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_remove_index(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart) lat = np.linspace(1, 0, npart) @@ -339,7 +331,7 @@ def test_pset_remove_index(fieldset, mode, npart=100): @pytest.mark.xfail(reason="Particle removal has not been implemented yet") -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_remove_particle(fieldset, mode, npart=100): lon = np.linspace(0, 1, npart) lat = np.linspace(1, 0, npart) @@ -351,49 +343,43 @@ def test_pset_remove_particle(fieldset, mode, npart=100): assert pset.size == 0 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_remove_kernel(fieldset, mode, npart=100): def DeleteKernel(particle, fieldset, time): - if particle.lon >= .4: + if particle.lon >= 0.4: particle.delete() - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.linspace(1, 0, npart)) - pset.execute(pset.Kernel(DeleteKernel), endtime=1., dt=1.0) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.linspace(1, 0, npart)) + pset.execute(pset.Kernel(DeleteKernel), endtime=1.0, dt=1.0) assert pset.size == 40 -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_multi_execute(fieldset, mode, npart=10, n=5): def AddLat(particle, fieldset, time): particle_dlat += 0.1 # noqa - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.zeros(npart)) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.zeros(npart)) k_add = pset.Kernel(AddLat) - for _ in range(n+1): - pset.execute(k_add, runtime=1., dt=1.0) - assert np.allclose([p.lat - n*0.1 for p in pset], np.zeros(npart), rtol=1e-12) + for _ in range(n + 1): + pset.execute(k_add, runtime=1.0, dt=1.0) + assert np.allclose([p.lat - n * 0.1 for p in pset], np.zeros(npart), rtol=1e-12) -@pytest.mark.parametrize('mode', ['scipy', 'jit']) +@pytest.mark.parametrize("mode", ["scipy", "jit"]) def test_pset_multi_execute_delete(fieldset, mode, npart=10, n=5): def AddLat(particle, fieldset, time): particle_dlat += 0.1 # noqa - pset = ParticleSet(fieldset, pclass=ptype[mode], - lon=np.linspace(0, 1, npart), - lat=np.zeros(npart)) + pset = ParticleSet(fieldset, pclass=ptype[mode], lon=np.linspace(0, 1, npart), lat=np.zeros(npart)) k_add = pset.Kernel(AddLat) - for _ in range(n+1): - pset.execute(k_add, runtime=1., dt=1.0) + for _ in range(n + 1): + pset.execute(k_add, runtime=1.0, dt=1.0) pset.remove_indices(-1) - assert np.allclose(pset.lat, n*0.1, atol=1e-12) + assert np.allclose(pset.lat, n * 0.1, atol=1e-12) -@pytest.mark.parametrize('staggered_grid', ['Agrid', 'Cgrid']) +@pytest.mark.parametrize("staggered_grid", ["Agrid", "Cgrid"]) def test_from_field_exact_val(staggered_grid): xdim = 4 ydim = 3 @@ -401,37 +387,29 @@ def test_from_field_exact_val(staggered_grid): lon = np.linspace(-1, 2, xdim, dtype=np.float32) lat = np.linspace(50, 52, ydim, dtype=np.float32) - dimensions = {'lat': lat, 'lon': lon} - if staggered_grid == 'Agrid': + dimensions = {"lat": lat, "lon": lon} + if staggered_grid == "Agrid": U = np.zeros((ydim, xdim), dtype=np.float32) V = np.zeros((ydim, xdim), dtype=np.float32) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - mask = np.array([[1, 1, 0, 0], - [1, 1, 1, 0], - [1, 1, 1, 1]]) - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + mask = np.array([[1, 1, 0, 0], [1, 1, 1, 0], [1, 1, 1, 1]]) + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") - FMask = Field('mask', mask, lon, lat) + FMask = Field("mask", mask, lon, lat) fieldset.add_field(FMask) - elif staggered_grid == 'Cgrid': - U = np.array([[0, 0, 0, 0], - [1, 0, 0, 0], - [1, 1, 0, 0]]) - V = np.array([[0, 1, 0, 0], - [0, 1, 0, 0], - [0, 1, 1, 0]]) - data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)} - mask = np.array([[-1, -1, -1, -1], - [-1, 1, 0, 0], - [-1, 1, 1, 0]]) - fieldset = FieldSet.from_data(data, dimensions, mesh='flat') - fieldset.U.interp_method = 'cgrid_velocity' - fieldset.V.interp_method = 'cgrid_velocity' - - FMask = Field('mask', mask, lon, lat, interp_method='cgrid_tracer') + elif staggered_grid == "Cgrid": + U = np.array([[0, 0, 0, 0], [1, 0, 0, 0], [1, 1, 0, 0]]) + V = np.array([[0, 1, 0, 0], [0, 1, 0, 0], [0, 1, 1, 0]]) + data = {"U": np.array(U, dtype=np.float32), "V": np.array(V, dtype=np.float32)} + mask = np.array([[-1, -1, -1, -1], [-1, 1, 0, 0], [-1, 1, 1, 0]]) + fieldset = FieldSet.from_data(data, dimensions, mesh="flat") + fieldset.U.interp_method = "cgrid_velocity" + fieldset.V.interp_method = "cgrid_velocity" + + FMask = Field("mask", mask, lon, lat, interp_method="cgrid_tracer") fieldset.add_field(FMask) - SampleParticle = ptype['scipy'].add_variable('mask', initial=0) + SampleParticle = ptype["scipy"].add_variable("mask", initial=0) def SampleMask(particle, fieldset, time): particle.mask = fieldset.mask[particle] diff --git a/tests/test_tools.py b/tests/test_tools.py index f31f2eff..2d7df023 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -24,7 +24,7 @@ def test_download_example_dataset(tmp_path): def test_download_example_dataset_lite(tmp_path): # test valid datasets # avoids downloading the dataset (only verifying that the URL is responsive, and folders are created) - with unittest.mock.patch('urllib.request.urlretrieve', new=mock_urlretrieve) as mock_function: # noqa: F841 + with unittest.mock.patch("urllib.request.urlretrieve", new=mock_urlretrieve) as mock_function: # noqa: F841 for dataset in list_example_datasets()[0:1]: dataset_folder_path = download_example_dataset(dataset, data_home=tmp_path)