diff --git a/docs/community/v4-migration.md b/docs/community/v4-migration.md index 21da7fdd78..076c9b02d4 100644 --- a/docs/community/v4-migration.md +++ b/docs/community/v4-migration.md @@ -14,6 +14,7 @@ Version 4 of Parcels is unreleased at the moment. The information in this migrat - The `time` argument in the Kernel signature has been removed in the Kernel API, so can't be used. Use `particle.time` instead. - The `particle` argument in the Kernel signature has been renamed to `particles`. - `math` functions should be replaced with array compatible equivalents (e.g., `math.sin` -> `np.sin`). Instead of `ParcelsRandom` you should use numpy's random functions. +- `particle.depth` has been changed to `particles.z` to be consistent with the [CF conventions for trajectory data](https://cfconventions.org/cf-conventions/cf-conventions.html#trajectory-data), and to make Parcels also generalizable to atmospheric contexts. ## FieldSet diff --git a/parcels/_core/field.py b/parcels/_core/field.py index d582e5b3c6..332df25ec9 100644 --- a/parcels/_core/field.py +++ b/parcels/_core/field.py @@ -233,7 +233,7 @@ def __getitem__(self, key): self._check_velocitysampling() try: if isinstance(key, KernelParticle): - return self.eval(key.time, key.depth, key.lat, key.lon, key) + return self.eval(key.time, key.z, key.lat, key.lon, key) else: return self.eval(*key) except tuple(AllParcelsErrorCodes.keys()) as error: @@ -334,7 +334,7 @@ def eval(self, time: datetime, z, y, x, particles=None, applyConversion=True): def __getitem__(self, key): try: if isinstance(key, KernelParticle): - return self.eval(key.time, key.depth, key.lat, key.lon, key) + return self.eval(key.time, key.z, key.lat, key.lon, key) else: return self.eval(*key) except tuple(AllParcelsErrorCodes.keys()) as error: diff --git a/parcels/_core/fieldset.py b/parcels/_core/fieldset.py index bbc064a19e..7cbe024995 100644 --- a/parcels/_core/fieldset.py +++ b/parcels/_core/fieldset.py @@ -240,7 +240,7 @@ def from_copernicusmarine(ds: xr.Dataset): if "W" in ds.data_vars: ds["W"] -= ds[ "W" - ] # Negate W to convert from up positive to down positive (as that's the direction of positive depth) + ] # Negate W to convert from up positive to down positive (as that's the direction of positive z) fields["W"] = Field("W", ds["W"], grid) fields["UVW"] = VectorField("UVW", fields["U"], fields["V"], fields["W"]) else: diff --git a/parcels/_core/kernel.py b/parcels/_core/kernel.py index cbcc5d2cd0..a78074f8e0 100644 --- a/parcels/_core/kernel.py +++ b/parcels/_core/kernel.py @@ -120,11 +120,11 @@ def Setcoords(particles, fieldset): # pragma: no cover particles.lon += particles.dlon particles.lat += particles.dlat - particles.depth += particles.ddepth + particles.z += particles.dz particles.dlon = 0 particles.dlat = 0 - particles.ddepth = 0 + particles.dz = 0 particles.time = particles.time_nextloop @@ -286,6 +286,6 @@ def execute(self, pset, endtime, dt): if error_code == StatusCode.ErrorTimeExtrapolation: error_func(pset[inds].time) else: - error_func(pset[inds].depth, pset[inds].lat, pset[inds].lon) + error_func(pset[inds].z, pset[inds].lat, pset[inds].lon) return pset diff --git a/parcels/_core/particle.py b/parcels/_core/particle.py index 4d3ff09459..64dd2a447c 100644 --- a/parcels/_core/particle.py +++ b/parcels/_core/particle.py @@ -176,13 +176,13 @@ def get_default_particle(spatial_dtype: np.float32 | np.float64) -> ParticleClas attrs={"standard_name": "latitude", "units": "degrees_north", "axis": "Y"}, ), Variable( - "depth", + "z", dtype=spatial_dtype, - attrs={"standard_name": "depth", "units": "m", "positive": "down"}, + attrs={"standard_name": "vertical coordinate", "units": "m", "positive": "down"}, ), Variable("dlon", dtype=spatial_dtype, to_write=False), Variable("dlat", dtype=spatial_dtype, to_write=False), - Variable("ddepth", dtype=spatial_dtype, to_write=False), + Variable("dz", dtype=spatial_dtype, to_write=False), Variable( "time", dtype=_SAME_AS_FIELDSET_TIME_INTERVAL.VALUE, diff --git a/parcels/_core/particlefile.py b/parcels/_core/particlefile.py index f3ae6c041e..3e290ac2f9 100644 --- a/parcels/_core/particlefile.py +++ b/parcels/_core/particlefile.py @@ -126,12 +126,6 @@ def store(self): def create_new_zarrfile(self): return self._create_new_zarrfile - def _convert_varout_name(self, var): - if var == "depth": - return "z" - else: - return var - def _extend_zarr_dims(self, Z, store, dtype, axis): if axis == 1: a = np.full((Z.shape[0], self.chunks[1]), _DATATYPES_TO_FILL_VALUES[dtype], dtype=dtype) @@ -214,8 +208,7 @@ def _write_particle_data(self, *, particle_data, pclass, time_interval, time, in obs = np.zeros((self._maxids), dtype=np.int32) for var in vars_to_write: dtype = _maybe_convert_time_dtype(var.dtype) - varout = self._convert_varout_name(var.name) - if varout not in ["trajectory"]: # because 'trajectory' is written as coordinate + if var.name not in ["trajectory"]: # because 'trajectory' is written as coordinate if var.to_write == "once": data = np.full( (arrsize[0],), @@ -228,8 +221,8 @@ def _write_particle_data(self, *, particle_data, pclass, time_interval, time, in data = np.full(arrsize, _DATATYPES_TO_FILL_VALUES[dtype], dtype=dtype) data[ids, 0] = particle_data[var.name][indices_to_write] dims = ["trajectory", "obs"] - ds[varout] = xr.DataArray(data=data, dims=dims, attrs=attrs[var.name]) - ds[varout].encoding["chunks"] = self.chunks[0] if var.to_write == "once" else self.chunks # type: ignore[index] + ds[var.name] = xr.DataArray(data=data, dims=dims, attrs=attrs[var.name]) + ds[var.name].encoding["chunks"] = self.chunks[0] if var.to_write == "once" else self.chunks # type: ignore[index] ds.to_zarr(store, mode="w") self._create_new_zarrfile = False else: @@ -237,16 +230,15 @@ def _write_particle_data(self, *, particle_data, pclass, time_interval, time, in obs = particle_data["obs_written"][indices_to_write] for var in vars_to_write: dtype = _maybe_convert_time_dtype(var.dtype) - varout = self._convert_varout_name(var.name) - if self._maxids > Z[varout].shape[0]: - self._extend_zarr_dims(Z[varout], store, dtype=dtype, axis=0) + if self._maxids > Z[var.name].shape[0]: + self._extend_zarr_dims(Z[var.name], store, dtype=dtype, axis=0) if var.to_write == "once": if len(once_ids) > 0: - Z[varout].vindex[ids_once] = particle_data[var.name][indices_to_write_once] + Z[var.name].vindex[ids_once] = particle_data[var.name][indices_to_write_once] else: - if max(obs) >= Z[varout].shape[1]: # type: ignore[type-var] - self._extend_zarr_dims(Z[varout], store, dtype=dtype, axis=1) - Z[varout].vindex[ids, obs] = particle_data[var.name][indices_to_write] + if max(obs) >= Z[var.name].shape[1]: # type: ignore[type-var] + self._extend_zarr_dims(Z[var.name], store, dtype=dtype, axis=1) + Z[var.name].vindex[ids, obs] = particle_data[var.name][indices_to_write] particle_data["obs_written"][indices_to_write] = obs + 1 @@ -262,7 +254,7 @@ def write_latest_locations(self, pset, time): time : Time at which to write ParticleSet. Note that typically this would be pset.time_nextloop """ - for var in ["lon", "lat", "depth"]: + for var in ["lon", "lat", "z"]: pset._data[f"{var}"] += pset._data[f"d{var}"] pset._data["time"] = pset._data["time_nextloop"] self.write(pset, time) diff --git a/parcels/_core/particleset.py b/parcels/_core/particleset.py index fe0691c7e4..ace99e9738 100644 --- a/parcels/_core/particleset.py +++ b/parcels/_core/particleset.py @@ -39,16 +39,12 @@ class ParticleSet: List of initial longitude values for particles lat : List of initial latitude values for particles - depth : - Optional list of initial depth values for particles. Default is 0m + z : + Optional list of initial z values for particles. Default is 0m time : Optional list of initial time values for particles. Default is fieldset.U.grid.time[0] repeatdt : datetime.timedelta or float, optional Optional interval on which to repeat the release of the ParticleSet. Either timedelta object, or float in seconds. - lonlatdepth_dtype : - Floating precision for lon, lat, depth particle coordinates. - It is either np.float32 or np.float64. Default is np.float32 if fieldset.U.interp_method is 'linear' - and np.float64 if the interpolation method is 'cgrid_velocity' trajectory_ids : Optional list of "trajectory" values (integers) for the particle IDs partition_function : @@ -66,7 +62,7 @@ def __init__( pclass=Particle, lon=None, lat=None, - depth=None, + z=None, time=None, trajectory_ids=None, **kwargs, @@ -84,15 +80,15 @@ def __init__( if trajectory_ids is None: trajectory_ids = np.arange(lon.size) - if depth is None: - mindepth = 0 + if z is None: + minz = 0 for field in self.fieldset.fields.values(): if field.grid.depth is not None: - mindepth = min(mindepth, field.grid.depth[0]) - depth = np.ones(lon.size) * mindepth + minz = min(minz, field.grid.depth[0]) + z = np.ones(lon.size) * minz else: - depth = _convert_to_flat_array(depth) - assert lon.size == lat.size and lon.size == depth.size, "lon, lat, depth don't all have the same lenghts" + z = _convert_to_flat_array(z) + assert lon.size == lat.size and lon.size == z.size, "lon, lat, z don't all have the same lenghts" if time is None or len(time) == 0: # do not set a time yet (because sign_dt not known) @@ -106,7 +102,7 @@ def __init__( raise TypeError("particle time must be a datetime, timedelta, or date object") time = np.repeat(time, lon.size) if time.size == 1 else time - assert lon.size == time.size, "time and positions (lon, lat, depth) do not have the same lengths." + assert lon.size == time.size, "time and positions (lon, lat, z) do not have the same lengths." if fieldset.time_interval: _warn_particle_times_outside_fieldset_time_bounds(time, fieldset.time_interval) @@ -115,7 +111,7 @@ def __init__( if kwvar not in ["partition_function"]: kwargs[kwvar] = _convert_to_flat_array(kwargs[kwvar]) assert lon.size == kwargs[kwvar].size, ( - f"{kwvar} and positions (lon, lat, depth) don't have the same lengths." + f"{kwvar} and positions (lon, lat, z) don't have the same lengths." ) self._data = create_particle_data( @@ -126,7 +122,7 @@ def __init__( initial=dict( lon=lon, lat=lat, - depth=depth, + z=z, time=time, time_nextloop=time, trajectory=trajectory_ids, @@ -182,13 +178,6 @@ def __setattr__(self, name, value): else: object.__setattr__(self, name, value) - @staticmethod - def lonlatdepth_dtype_from_field_interp_method(field): - # TODO update this when now interp methods are implemented - if field.interp_method == "cgrid_velocity": - return np.float64 - return np.float32 - @property def size(self): return len(self) @@ -277,7 +266,7 @@ def _compute_neighbor_tree(self, time, dt): self._values = np.vstack( ( - self._data["depth"], + self._data["z"], self._data["lat"], self._data["lon"], ) @@ -306,7 +295,7 @@ def _neighbors_by_coor(self, coor): def populate_indices(self): """Pre-populate guesses of particle ei (element id) indices""" for i, grid in enumerate(self.fieldset.gridset): - position = grid.search(self.depth, self.lat, self.lon) + position = grid.search(self.z, self.lat, self.lon) self._data["ei"][:, i] = grid.ravel_index( { "X": position["X"][0], diff --git a/parcels/_core/statuscodes.py b/parcels/_core/statuscodes.py index 9b41ba0338..e754a08d08 100644 --- a/parcels/_core/statuscodes.py +++ b/parcels/_core/statuscodes.py @@ -40,7 +40,7 @@ class FieldInterpolationError(RuntimeError): def _raise_field_interpolation_error(z, y, x): - raise FieldInterpolationError(f"Field interpolation returned NaN at (depth={z}, lat={y}, lon={x})") + raise FieldInterpolationError(f"Field interpolation returned NaN at (z={z}, lat={y}, lon={x})") class FieldOutOfBoundError(RuntimeError): @@ -50,7 +50,7 @@ class FieldOutOfBoundError(RuntimeError): def _raise_field_out_of_bound_error(z, y, x): - raise FieldOutOfBoundError(f"Field sampled out-of-bound, at (depth={z}, lat={y}, lon={x})") + raise FieldOutOfBoundError(f"Field sampled out-of-bound, at (z={z}, lat={y}, lon={x})") class FieldOutOfBoundSurfaceError(RuntimeError): @@ -64,7 +64,7 @@ def format_out(val): return "unknown" if val is None else val raise FieldOutOfBoundSurfaceError( - f"Field sampled out-of-bound at the surface, at (depth={format_out(z)}, lat={format_out(y)}, lon={format_out(x)})" + f"Field sampled out-of-bound at the surface, at (z={format_out(z)}, lat={format_out(y)}, lon={format_out(x)})" ) @@ -81,7 +81,7 @@ class GridSearchingError(RuntimeError): def _raise_grid_searching_error(z, y, x): - raise GridSearchingError(f"Grid searching failed at (depth={z}, lat={y}, lon={x})") + raise GridSearchingError(f"Grid searching failed at (z={z}, lat={y}, lon={x})") class GeneralError(RuntimeError): @@ -91,7 +91,7 @@ class GeneralError(RuntimeError): def _raise_general_error(z, y, x): - raise GeneralError(f"General error occurred at (depth={z}, lat={y}, lon={x})") + raise GeneralError(f"General error occurred at (z={z}, lat={y}, lon={x})") class TimeExtrapolationError(RuntimeError): diff --git a/parcels/interpolators.py b/parcels/interpolators.py index 79842bfcfa..ec57f847e4 100644 --- a/parcels/interpolators.py +++ b/parcels/interpolators.py @@ -77,18 +77,18 @@ def _get_corner_data_Agrid( ti_1 = np.clip(ti + 1, 0, data.shape[0] - 1) ti = np.concatenate([np.repeat(ti, lenZ * 4), np.repeat(ti_1, lenZ * 4)]) - # Depth coordinates: 4 points at zi, 4 at zi+1, repeated for both time levels + # Z coordinates: 4 points at zi, 4 at zi+1, repeated for both time levels if lenZ == 1: zi = np.repeat(zi, lenT * 4) else: zi_1 = np.clip(zi + 1, 0, data.shape[1] - 1) zi = np.tile(np.array([zi, zi, zi, zi, zi_1, zi_1, zi_1, zi_1]).flatten(), lenT) - # Y coordinates: [yi, yi, yi+1, yi+1] for each spatial point, repeated for time/depth + # Y coordinates: [yi, yi, yi+1, yi+1] for each spatial point, repeated for time/z yi_1 = np.clip(yi + 1, 0, data.shape[2] - 1) yi = np.tile(np.repeat(np.column_stack([yi, yi_1]), 2), (lenT) * (lenZ)) - # X coordinates: [xi, xi+1, xi, xi+1] for each spatial point, repeated for time/depth + # X coordinates: [xi, xi+1, xi, xi+1] for each spatial point, repeated for time/z xi_1 = np.clip(xi + 1, 0, data.shape[3] - 1) xi = np.tile(np.column_stack([xi, xi_1, xi, xi_1]).flatten(), (lenT) * (lenZ)) @@ -211,17 +211,17 @@ def CGrid_Velocity( ti_1 = np.clip(ti + 1, 0, tdim - 1) ti_full = np.concatenate([np.repeat(ti, 4), np.repeat(ti_1, 4)]) - # Depth coordinates: 4 points at zi, repeated for both time levels + # Z coordinates: 4 points at zi, repeated for both time levels zi_full = np.repeat(zi, lenT * 4) - # Y coordinates: [yi, yi, yi+1, yi+1] for each spatial point, repeated for time/depth + # Y coordinates: [yi, yi, yi+1, yi+1] for each spatial point, repeated for time/z yi_1 = np.clip(yi + 1, 0, ydim - 1) yi_full = np.tile(np.repeat(np.column_stack([yi, yi_1]), 2), (lenT)) # # TODO check why in some cases minus needed here!!! # yi_minus_1 = np.clip(yi - 1, 0, ydim - 1) # yi = np.tile(np.repeat(np.column_stack([yi_minus_1, yi]), 2), (lenT)) - # X coordinates: [xi, xi+1, xi, xi+1] for each spatial point, repeated for time/depth + # X coordinates: [xi, xi+1, xi, xi+1] for each spatial point, repeated for time/z xi_1 = np.clip(xi + 1, 0, xdim - 1) xi_full = np.tile(np.column_stack([xi, xi_1, xi, xi_1]).flatten(), (lenT)) @@ -313,15 +313,15 @@ def CGrid_Velocity( ti_1 = np.clip(ti + 1, 0, tdim - 1) ti_full = np.concatenate([np.repeat(ti, 2), np.repeat(ti_1, 2)]) - # Depth coordinates: 1 points at zi, repeated for both time levels + # Z coordinates: 1 points at zi, repeated for both time levels zi_1 = np.clip(zi + 1, 0, zdim - 1) zi_full = np.tile(np.array([zi, zi_1]).flatten(), lenT) - # Y coordinates: yi+1 for each spatial point, repeated for time/depth + # Y coordinates: yi+1 for each spatial point, repeated for time/z yi_1 = np.clip(yi + 1, 0, ydim - 1) yi_full = np.tile(yi_1, (lenT) * 2) - # X coordinates: xi+1 for each spatial point, repeated for time/depth + # X coordinates: xi+1 for each spatial point, repeated for time/z xi_1 = np.clip(xi + 1, 0, xdim - 1) xi_full = np.tile(xi_1, (lenT) * 2) diff --git a/parcels/kernels/advection.py b/parcels/kernels/advection.py index 8d21392903..aa08d48912 100644 --- a/parcels/kernels/advection.py +++ b/parcels/kernels/advection.py @@ -21,11 +21,11 @@ def AdvectionRK4(particles, fieldset): # pragma: no cover dt = particles.dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds (u1, v1) = fieldset.UV[particles] lon1, lat1 = (particles.lon + u1 * 0.5 * dt, particles.lat + v1 * 0.5 * dt) - (u2, v2) = fieldset.UV[particles.time + 0.5 * particles.dt, particles.depth, lat1, lon1, particles] + (u2, v2) = fieldset.UV[particles.time + 0.5 * particles.dt, particles.z, lat1, lon1, particles] lon2, lat2 = (particles.lon + u2 * 0.5 * dt, particles.lat + v2 * 0.5 * dt) - (u3, v3) = fieldset.UV[particles.time + 0.5 * particles.dt, particles.depth, lat2, lon2, particles] + (u3, v3) = fieldset.UV[particles.time + 0.5 * particles.dt, particles.z, lat2, lon2, particles] lon3, lat3 = (particles.lon + u3 * dt, particles.lat + v3 * dt) - (u4, v4) = fieldset.UV[particles.time + particles.dt, particles.depth, lat3, lon3, particles] + (u4, v4) = fieldset.UV[particles.time + particles.dt, particles.z, lat3, lon3, particles] particles.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6.0 * dt particles.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6.0 * dt @@ -36,19 +36,19 @@ def AdvectionRK4_3D(particles, fieldset): # pragma: no cover (u1, v1, w1) = fieldset.UVW[particles] lon1 = particles.lon + u1 * 0.5 * dt lat1 = particles.lat + v1 * 0.5 * dt - dep1 = particles.depth + w1 * 0.5 * dt - (u2, v2, w2) = fieldset.UVW[particles.time + 0.5 * particles.dt, dep1, lat1, lon1, particles] + z1 = particles.z + w1 * 0.5 * dt + (u2, v2, w2) = fieldset.UVW[particles.time + 0.5 * particles.dt, z1, lat1, lon1, particles] lon2 = particles.lon + u2 * 0.5 * dt lat2 = particles.lat + v2 * 0.5 * dt - dep2 = particles.depth + w2 * 0.5 * dt - (u3, v3, w3) = fieldset.UVW[particles.time + 0.5 * particles.dt, dep2, lat2, lon2, particles] + z2 = particles.z + w2 * 0.5 * dt + (u3, v3, w3) = fieldset.UVW[particles.time + 0.5 * particles.dt, z2, lat2, lon2, particles] lon3 = particles.lon + u3 * dt lat3 = particles.lat + v3 * dt - dep3 = particles.depth + w3 * dt - (u4, v4, w4) = fieldset.UVW[particles.time + particles.dt, dep3, lat3, lon3, particles] + z3 = particles.z + w3 * dt + (u4, v4, w4) = fieldset.UVW[particles.time + particles.dt, z3, lat3, lon3, particles] particles.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6 * dt particles.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6 * dt - particles.ddepth += (w1 + 2 * w2 + 2 * w3 + w4) / 6 * dt + particles.dz += (w1 + 2 * w2 + 2 * w3 + w4) / 6 * dt def AdvectionRK4_3D_CROCO(particles, fieldset): # pragma: no cover @@ -56,9 +56,9 @@ def AdvectionRK4_3D_CROCO(particles, fieldset): # pragma: no cover This kernel assumes the vertical velocity is the 'w' field from CROCO output and works on sigma-layers. """ dt = particles.dt / np.timedelta64(1, "s") # TODO: improve API for converting dt to seconds - sig_dep = particles.depth / fieldset.H[particles.time, 0, particles.lat, particles.lon] + sig_dep = particles.z / fieldset.H[particles.time, 0, particles.lat, particles.lon] - (u1, v1, w1) = fieldset.UVW[particles.time, particles.depth, particles.lat, particles.lon, particles] + (u1, v1, w1) = fieldset.UVW[particles.time, particles.z, particles.lat, particles.lon, particles] w1 *= sig_dep / fieldset.H[particles.time, 0, particles.lat, particles.lon] lon1 = particles.lon + u1 * 0.5 * dt lat1 = particles.lat + v1 * 0.5 * dt @@ -88,12 +88,8 @@ def AdvectionRK4_3D_CROCO(particles, fieldset): # pragma: no cover particles.dlon += (u1 + 2 * u2 + 2 * u3 + u4) / 6 * dt particles.dlat += (v1 + 2 * v2 + 2 * v3 + v4) / 6 * dt - particles.ddepth += ( - (dep1 - particles.depth) * 2 - + 2 * (dep2 - particles.depth) * 2 - + 2 * (dep3 - particles.depth) - + dep4 - - particles.depth + particles.dz += ( + (dep1 - particles.z) * 2 + 2 * (dep2 - particles.z) * 2 + 2 * (dep3 - particles.z) + dep4 - particles.z ) / 6 @@ -130,27 +126,27 @@ def AdvectionRK45(particles, fieldset): # pragma: no cover (u1, v1) = fieldset.UV[particles] lon1, lat1 = (particles.lon + u1 * A[0][0] * dt, particles.lat + v1 * A[0][0] * dt) - (u2, v2) = fieldset.UV[particles.time + c[0] * particles.dt, particles.depth, lat1, lon1, particles] + (u2, v2) = fieldset.UV[particles.time + c[0] * particles.dt, particles.z, lat1, lon1, particles] lon2, lat2 = ( particles.lon + (u1 * A[1][0] + u2 * A[1][1]) * dt, particles.lat + (v1 * A[1][0] + v2 * A[1][1]) * dt, ) - (u3, v3) = fieldset.UV[particles.time + c[1] * particles.dt, particles.depth, lat2, lon2, particles] + (u3, v3) = fieldset.UV[particles.time + c[1] * particles.dt, particles.z, lat2, lon2, particles] lon3, lat3 = ( particles.lon + (u1 * A[2][0] + u2 * A[2][1] + u3 * A[2][2]) * dt, particles.lat + (v1 * A[2][0] + v2 * A[2][1] + v3 * A[2][2]) * dt, ) - (u4, v4) = fieldset.UV[particles.time + c[2] * particles.dt, particles.depth, lat3, lon3, particles] + (u4, v4) = fieldset.UV[particles.time + c[2] * particles.dt, particles.z, lat3, lon3, particles] lon4, lat4 = ( particles.lon + (u1 * A[3][0] + u2 * A[3][1] + u3 * A[3][2] + u4 * A[3][3]) * dt, particles.lat + (v1 * A[3][0] + v2 * A[3][1] + v3 * A[3][2] + v4 * A[3][3]) * dt, ) - (u5, v5) = fieldset.UV[particles.time + c[3] * particles.dt, particles.depth, lat4, lon4, particles] + (u5, v5) = fieldset.UV[particles.time + c[3] * particles.dt, particles.z, lat4, lon4, particles] lon5, lat5 = ( particles.lon + (u1 * A[4][0] + u2 * A[4][1] + u3 * A[4][2] + u4 * A[4][3] + u5 * A[4][4]) * dt, particles.lat + (v1 * A[4][0] + v2 * A[4][1] + v3 * A[4][2] + v4 * A[4][3] + v5 * A[4][4]) * dt, ) - (u6, v6) = fieldset.UV[particles.time + c[4] * particles.dt, particles.depth, lat5, lon5, particles] + (u6, v6) = fieldset.UV[particles.time + c[4] * particles.dt, particles.z, lat5, lon5, particles] lon_4th = (u1 * b4[0] + u2 * b4[1] + u3 * b4[2] + u4 * b4[3] + u5 * b4[4]) * dt lat_4th = (v1 * b4[0] + v2 * b4[1] + v3 * b4[2] + v4 * b4[3] + v5 * b4[4]) * dt @@ -202,7 +198,7 @@ def AdvectionAnalytical(particles, fieldset): # pragma: no cover withW = True if "W" in [f.name for f in fieldset.fields.values()] else False withTime = True if len(fieldset.U.grid.time) > 1 else False tau, zeta, eta, xsi, ti, zi, yi, xi = fieldset.U._search_indices( - particles.depth, particles.lat, particles.lon, particles=particles + particles.z, particles.lat, particles.lon, particles=particles ) ds_t = dt if withTime: @@ -350,7 +346,7 @@ def compute_rs(r, B, delta, s_min): if withW: rs_z = compute_rs(zeta, B_z, delta_z, s_min) - particles.ddepth += (1.0 - rs_z) * pz[0] + rs_z * pz[1] - particles.depth + particles.dz += (1.0 - rs_z) * pz[0] + rs_z * pz[1] - particles.z if particles.dt > 0: particles.dt = max(direction * s_min * (dxdy * dz), 1e-7).astype("timedelta64[s]") diff --git a/parcels/kernels/advectiondiffusion.py b/parcels/kernels/advectiondiffusion.py index 6ad4ad2588..396280c56c 100644 --- a/parcels/kernels/advectiondiffusion.py +++ b/parcels/kernels/advectiondiffusion.py @@ -28,22 +28,18 @@ def AdvectionDiffusionM1(particles, fieldset): # pragma: no cover dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) - Kxp1 = fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon + fieldset.dres, particles] - Kxm1 = fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon - fieldset.dres, particles] + Kxp1 = fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon + fieldset.dres, particles] + Kxm1 = fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon - fieldset.dres, particles] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) - u, v = fieldset.UV[particles.time, particles.depth, particles.lat, particles.lon, particles] - bx = np.sqrt(2 * fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon, particles]) + u, v = fieldset.UV[particles.time, particles.z, particles.lat, particles.lon, particles] + bx = np.sqrt(2 * fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon, particles]) - Kyp1 = fieldset.Kh_meridional[ - particles.time, particles.depth, particles.lat + fieldset.dres, particles.lon, particles - ] - Kym1 = fieldset.Kh_meridional[ - particles.time, particles.depth, particles.lat - fieldset.dres, particles.lon, particles - ] + Kyp1 = fieldset.Kh_meridional[particles.time, particles.z, particles.lat + fieldset.dres, particles.lon, particles] + Kym1 = fieldset.Kh_meridional[particles.time, particles.z, particles.lat - fieldset.dres, particles.lon, particles] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) - by = np.sqrt(2 * fieldset.Kh_meridional[particles.time, particles.depth, particles.lat, particles.lon, particles]) + by = np.sqrt(2 * fieldset.Kh_meridional[particles.time, particles.z, particles.lat, particles.lon, particles]) # Particle positions are updated only after evaluating all terms. particles.dlon += u * dt + 0.5 * dKdx * (dWx**2 + dt) + bx * dWx @@ -68,23 +64,19 @@ def AdvectionDiffusionEM(particles, fieldset): # pragma: no cover dWx = np.random.normal(0, np.sqrt(np.fabs(dt))) dWy = np.random.normal(0, np.sqrt(np.fabs(dt))) - u, v = fieldset.UV[particles.time, particles.depth, particles.lat, particles.lon, particles] + u, v = fieldset.UV[particles.time, particles.z, particles.lat, particles.lon, particles] - Kxp1 = fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon + fieldset.dres, particles] - Kxm1 = fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon - fieldset.dres, particles] + Kxp1 = fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon + fieldset.dres, particles] + Kxm1 = fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon - fieldset.dres, particles] dKdx = (Kxp1 - Kxm1) / (2 * fieldset.dres) ax = u + dKdx - bx = np.sqrt(2 * fieldset.Kh_zonal[particles.time, particles.depth, particles.lat, particles.lon, particles]) - - Kyp1 = fieldset.Kh_meridional[ - particles.time, particles.depth, particles.lat + fieldset.dres, particles.lon, particles - ] - Kym1 = fieldset.Kh_meridional[ - particles.time, particles.depth, particles.lat - fieldset.dres, particles.lon, particles - ] + bx = np.sqrt(2 * fieldset.Kh_zonal[particles.time, particles.z, particles.lat, particles.lon, particles]) + + Kyp1 = fieldset.Kh_meridional[particles.time, particles.z, particles.lat + fieldset.dres, particles.lon, particles] + Kym1 = fieldset.Kh_meridional[particles.time, particles.z, particles.lat - fieldset.dres, particles.lon, particles] dKdy = (Kyp1 - Kym1) / (2 * fieldset.dres) ay = v + dKdy - by = np.sqrt(2 * fieldset.Kh_meridional[particles.time, particles.depth, particles.lat, particles.lon, particles]) + by = np.sqrt(2 * fieldset.Kh_meridional[particles.time, particles.z, particles.lat, particles.lon, particles]) # Particle positions are updated only after evaluating all terms. particles.dlon += ax * dt + bx * dWx diff --git a/tests/test_advection.py b/tests/test_advection.py index c4e8a82ce9..486bfe4582 100644 --- a/tests/test_advection.py +++ b/tests/test_advection.py @@ -108,7 +108,7 @@ def test_advection_zonal_periodic(): def test_horizontal_advection_in_3D_flow(npart=10): - """Flat 2D zonal flow that increases linearly with depth from 0 m/s to 1 m/s.""" + """Flat 2D zonal flow that increases linearly with z from 0 m/s to 1 m/s.""" ds = simple_UV_dataset(mesh="flat") ds["U"].data[:] = 1.0 grid = XGrid.from_dataset(ds) @@ -118,10 +118,10 @@ def test_horizontal_advection_in_3D_flow(npart=10): UV = VectorField("UV", U, V) fieldset = FieldSet([U, V, UV]) - pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.linspace(0.1, 0.9, npart)) + pset = ParticleSet(fieldset, lon=np.zeros(npart), lat=np.zeros(npart), z=np.linspace(0.1, 0.9, npart)) pset.execute(AdvectionRK4, runtime=np.timedelta64(2, "h"), dt=np.timedelta64(15, "m")) - expected_lon = pset.depth * (pset.time - fieldset.time_interval.left) / np.timedelta64(1, "s") + expected_lon = pset.z * (pset.time - fieldset.time_interval.left) / np.timedelta64(1, "s") np.testing.assert_allclose(pset.lon, expected_lon, atol=1.0e-1) @@ -155,8 +155,8 @@ def SubmergeParticle(particles, fieldset): # pragma: no cover (u, v) = fieldset.UV[particles[inds]] particles[inds].dlon = u * dt particles[inds].dlat = v * dt - particles[inds].ddepth = 0.0 - particles[inds].depth = 0 + particles[inds].dz = 0.0 + particles[inds].z = 0 particles[inds].state = StatusCode.Evaluate kernels = [AdvectionRK4_3D] @@ -164,12 +164,12 @@ def SubmergeParticle(particles, fieldset): # pragma: no cover kernels.append(SubmergeParticle) kernels.append(DeleteParticle) - pset = ParticleSet(fieldset=fieldset, lon=0.5, lat=0.5, depth=0.9) + pset = ParticleSet(fieldset=fieldset, lon=0.5, lat=0.5, z=0.9) pset.execute(kernels, runtime=np.timedelta64(11, "s"), dt=np.timedelta64(1, "s")) if direction == "up" and wErrorThroughSurface: np.testing.assert_allclose(pset.lon[0], 0.6, atol=1e-5) - np.testing.assert_allclose(pset.depth[0], 0, atol=1e-5) + np.testing.assert_allclose(pset.z[0], 0, atol=1e-5) else: assert len(pset) == 0 @@ -220,7 +220,7 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read fieldset = FieldSet(fields) x0, y0, z0 = 2, 8, -4 - pset = ParticleSet(fieldset, lon=x0, lat=y0, depth=z0) + pset = ParticleSet(fieldset, lon=x0, lat=y0, z=z0) kernel = AdvectionRK4 if w is None else AdvectionRK4_3D pset.execute(kernel, runtime=np.timedelta64(5, "s"), dt=np.timedelta64(1, "s")) @@ -228,7 +228,7 @@ def test_length1dimensions(u, v, w): # TODO: Refactor this test to be more read np.testing.assert_allclose(np.array([p.lon - x0 for p in pset]), 4 * u, atol=1e-6) np.testing.assert_allclose(np.array([p.lat - y0 for p in pset]), 4 * v, atol=1e-6) if w: - np.testing.assert_allclose(np.array([p.depth - z0 for p in pset]), 4 * w, atol=1e-6) + np.testing.assert_allclose(np.array([p.z - z0 for p in pset]), 4 * w, atol=1e-6) def test_radialrotation(npart=10): @@ -286,13 +286,13 @@ def test_moving_eddy(method, rtol): fieldset.add_field(Field("Kh", ds["Kh"], grid, interp_method=XLinear), "Kh_meridional") fieldset.add_constant("dres", 0.1) - start_lon, start_lat, start_depth = 12000, 12500, 12500 + start_lon, start_lat, start_z = 12000, 12500, 12500 dt = np.timedelta64(30, "m") if method == "RK45": fieldset.add_constant("RK45_tol", rtol) - pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, depth=start_depth, time=np.timedelta64(0, "s")) + pset = ParticleSet(fieldset, lon=start_lon, lat=start_lat, z=start_z, time=np.timedelta64(0, "s")) pset.execute(kernel[method], dt=dt, endtime=np.timedelta64(1, "h")) def truth_moving(x_0, y_0, t): @@ -305,7 +305,7 @@ def truth_moving(x_0, y_0, t): np.testing.assert_allclose(pset.lon, exp_lon, rtol=rtol) np.testing.assert_allclose(pset.lat, exp_lat, rtol=rtol) if method == "RK4_3D": - np.testing.assert_allclose(pset.depth, exp_lat, rtol=rtol) + np.testing.assert_allclose(pset.z, exp_lat, rtol=rtol) @pytest.mark.parametrize( @@ -385,7 +385,7 @@ def test_stommelgyre_fieldset(method, rtol, grid_type): ) def UpdateP(particles, fieldset): # pragma: no cover - particles.p = fieldset.P[particles.time, particles.depth, particles.lat, particles.lon] + particles.p = fieldset.P[particles.time, particles.z, particles.lat, particles.lon] particles.p_start = np.where(particles.time == 0, particles.p, particles.p_start) pset = ParticleSet(fieldset, pclass=SampleParticle, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) @@ -424,7 +424,7 @@ def test_peninsula_fieldset(method, rtol, grid_type): ) def UpdateP(particles, fieldset): # pragma: no cover - particles.p = fieldset.P[particles.time, particles.depth, particles.lat, particles.lon] + particles.p = fieldset.P[particles.time, particles.z, particles.lat, particles.lon] particles.p_start = np.where(particles.time == 0, particles.p, particles.p_start) pset = ParticleSet(fieldset, pclass=SampleParticle, lon=start_lon, lat=start_lat, time=np.timedelta64(0, "s")) @@ -551,7 +551,7 @@ def test_nemo_3D_curvilinear_fieldset(method): npart = 10 lons = np.linspace(1.9, 3.4, npart) lats = np.linspace(52.5, 51.6, npart) - pset = parcels.ParticleSet(fieldset, lon=lons, lat=lats, depth=np.ones_like(lons)) + pset = parcels.ParticleSet(fieldset, lon=lons, lat=lats, z=np.ones_like(lons)) pset.execute(kernel[method], runtime=np.timedelta64(4, "D"), dt=np.timedelta64(6, "h")) @@ -559,6 +559,4 @@ def test_nemo_3D_curvilinear_fieldset(method): np.testing.assert_equal(round_and_hash_float_array([p.lon for p in pset], decimals=5), 29977383852960156017546) elif method == "RK4_3D": # TODO check why decimals needs to be so low in RK4_3D (compare to v3) - np.testing.assert_equal( - round_and_hash_float_array([p.depth for p in pset], decimals=1), 29747210774230389239432 - ) + np.testing.assert_equal(round_and_hash_float_array([p.z for p in pset], decimals=1), 29747210774230389239432) diff --git a/tests/test_diffusion.py b/tests/test_diffusion.py index c7ecaf0057..495a7ac2ca 100644 --- a/tests/test_diffusion.py +++ b/tests/test_diffusion.py @@ -96,16 +96,16 @@ def test_randomexponential(lambd): # Set random seed np.random.seed(1234) - pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart)) + pset = ParticleSet(fieldset=fieldset, lon=np.zeros(npart), lat=np.zeros(npart), z=np.zeros(npart)) def vertical_randomexponential(particles, fieldset): # pragma: no cover - # Kernel for random exponential variable in depth direction - particles.depth = np.random.exponential(scale=1 / fieldset.lambd, size=len(particles)) + # Kernel for random exponential variable in z direction + particles.z = np.random.exponential(scale=1 / fieldset.lambd, size=len(particles)) pset.execute(vertical_randomexponential, runtime=np.timedelta64(1, "s"), dt=np.timedelta64(1, "s")) expected_mean = 1.0 / fieldset.lambd - assert np.allclose(np.mean(pset.depth), expected_mean, rtol=0.1) + assert np.allclose(np.mean(pset.z), expected_mean, rtol=0.1) @pytest.mark.parametrize("mu", [0.8 * np.pi, np.pi]) @@ -123,7 +123,7 @@ def test_randomvonmises(mu, kappa): AngleParticle = Particle.add_variable(Variable("angle")) pset = ParticleSet( - fieldset=fieldset, pclass=AngleParticle, lon=np.zeros(npart), lat=np.zeros(npart), depth=np.zeros(npart) + fieldset=fieldset, pclass=AngleParticle, lon=np.zeros(npart), lat=np.zeros(npart), z=np.zeros(npart) ) def vonmises(particles, fieldset): # pragma: no cover diff --git a/tests/test_fieldset.py b/tests/test_fieldset.py index c96682ed0c..85d082d69b 100644 --- a/tests/test_fieldset.py +++ b/tests/test_fieldset.py @@ -44,12 +44,12 @@ def test_fieldset_add_constant_field(fieldset): # Get a point in the domain time = ds["time"].mean() - depth = ds["depth"].mean() + z = ds["depth"].mean() lat = ds["lat"].mean() lon = ds["lon"].mean() pytest.xfail(reason="Not yet implemented interpolation.") - assert fieldset.test_constant_field[time, depth, lat, lon] == 1.0 + assert fieldset.test_constant_field[time, z, lat, lon] == 1.0 def test_fieldset_add_field(fieldset): diff --git a/tests/test_interpolation.py b/tests/test_interpolation.py index 3b9478d3cb..20ba10d1c5 100644 --- a/tests/test_interpolation.py +++ b/tests/test_interpolation.py @@ -205,7 +205,7 @@ def test_interp_regression_v3(interp_name): x, y, z = np.meshgrid(np.linspace(0, 1, 7), np.linspace(0, 1, 13), np.linspace(0, 1, 5)) TestP = Particle.add_variable(Variable("pid", dtype=np.int32, initial=0)) - pset = ParticleSet(fieldset, pclass=TestP, lon=x, lat=y, depth=z, pid=np.arange(x.size)) + pset = ParticleSet(fieldset, pclass=TestP, lon=x, lat=y, z=z, pid=np.arange(x.size)) def DeleteParticle(particle, fieldset, time): if particle.state >= 50: diff --git a/tests/test_particlefile.py b/tests/test_particlefile.py index fbe222f95b..0fc2320bec 100755 --- a/tests/test_particlefile.py +++ b/tests/test_particlefile.py @@ -464,7 +464,7 @@ def test_pfile_write_custom_particle(): def test_pfile_set_towrite_False(fieldset, tmp_zarrfile): npart = 10 pset = ParticleSet(fieldset, pclass=Particle, lon=np.linspace(0, 1, npart), lat=0.5 * np.ones(npart)) - pset.set_variable_write_status("depth", False) + pset.set_variable_write_status("z", False) pset.set_variable_write_status("lat", False) pfile = pset.ParticleFile(tmp_zarrfile, outputdt=1) @@ -480,5 +480,5 @@ def Update_lon(particles, fieldset): # pragma: no cover ds.close() # For pytest purposes, we need to reset to original status - pset.set_variable_write_status("depth", True) + pset.set_variable_write_status("z", True) pset.set_variable_write_status("lat", True) diff --git a/tests/test_particleset_execute.py b/tests/test_particleset_execute.py index 1685144124..7619c841d2 100644 --- a/tests/test_particleset_execute.py +++ b/tests/test_particleset_execute.py @@ -130,7 +130,7 @@ def test_pset_execute_invalid_arguments(fieldset, fieldset_no_time_interval): ], ) def test_particleset_runtime_type(fieldset, runtime, expectation): - pset = ParticleSet(fieldset, lon=[0.2], lat=[5.0], depth=[50.0], pclass=Particle) + pset = ParticleSet(fieldset, lon=[0.2], lat=[5.0], z=[50.0], pclass=Particle) with expectation: pset.execute(runtime=runtime, dt=np.timedelta64(10, "s"), pyfunc=DoNothing) @@ -146,7 +146,7 @@ def test_particleset_runtime_type(fieldset, runtime, expectation): ], ) def test_particleset_endtime_type(fieldset, endtime, expectation): - pset = ParticleSet(fieldset, lon=[0.2], lat=[5.0], depth=[50.0], pclass=Particle) + pset = ParticleSet(fieldset, lon=[0.2], lat=[5.0], z=[50.0], pclass=Particle) with expectation: pset.execute(endtime=endtime, dt=np.timedelta64(10, "m"), pyfunc=DoNothing) @@ -270,7 +270,7 @@ def test_some_particles_throw_outofbounds(zonal_flow_fieldset): def test_delete_on_all_errors(fieldset): def MoveRight(particles, fieldset): # pragma: no cover particles.dlon += 1 - fieldset.U[particles.time, particles.depth, particles.lat, particles.lon, particles] + fieldset.U[particles.time, particles.z, particles.lat, particles.lon, particles] def DeleteAllErrorParticles(particles, fieldset): # pragma: no cover particles[particles.state > 20].state = StatusCode.Delete @@ -285,7 +285,7 @@ def test_some_particles_throw_outoftime(fieldset): pset = ParticleSet(fieldset, lon=np.zeros_like(time), lat=np.zeros_like(time), time=time) def FieldAccessOutsideTime(particles, fieldset): # pragma: no cover - fieldset.U[particles.time + np.timedelta64(400, "D"), particles.depth, particles.lat, particles.lon, particles] + fieldset.U[particles.time + np.timedelta64(400, "D"), particles.z, particles.lat, particles.lon, particles] with pytest.raises(TimeExtrapolationError): pset.execute(FieldAccessOutsideTime, runtime=np.timedelta64(1, "D"), dt=np.timedelta64(10, "D")) @@ -302,7 +302,7 @@ def NaNInterpolator(field, ti, position, tau, t, z, y, x): # pragma: no cover return np.nan * np.zeros_like(x) def SampleU(particles, fieldset): # pragma: no cover - fieldset.U[particles.time, particles.depth, particles.lat, particles.lon, particles] + fieldset.U[particles.time, particles.z, particles.lat, particles.lon, particles] fieldset.U.interp_method = NaNInterpolator pset = ParticleSet(fieldset, lon=[0, 2], lat=[0, 0]) @@ -325,7 +325,7 @@ def test_execution_recover_out_of_bounds(fieldset): npart = 2 def MoveRight(particles, fieldset): # pragma: no cover - fieldset.U[particles.time, particles.depth, particles.lat, particles.lon + 0.1, particles] + fieldset.U[particles.time, particles.z, particles.lat, particles.lon + 0.1, particles] particles.dlon += 0.1 def MoveLeft(particles, fieldset): # pragma: no cover @@ -466,7 +466,7 @@ def test_uxstommelgyre_pset_execute(): fieldset, lon=[30.0], lat=[5.0], - depth=[50.0], + z=[50.0], time=[np.timedelta64(0, "s")], pclass=Particle, ) @@ -512,7 +512,7 @@ def test_uxstommelgyre_multiparticle_pset_execute(): fieldset, lon=[30.0, 32.0], lat=[5.0, 5.0], - depth=[50.0, 50.0], + z=[50.0, 50.0], time=[np.timedelta64(0, "s")], pclass=Particle, ) @@ -551,7 +551,7 @@ def test_uxstommelgyre_pset_execute_output(): fieldset, lon=[30.0], lat=[5.0], - depth=[50.0], + z=[50.0], time=[0.0], pclass=Particle, )