diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6e28cee81fd..6ebd66bdf69 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,7 @@ # https://pre-commit.com/ ci: autoupdate_schedule: monthly + autoupdate_commit_msg: 'Update pre-commit hooks' exclude: 'xarray/datatree_.*' repos: - repo: https://github.com/pre-commit/pre-commit-hooks diff --git a/asv_bench/benchmarks/dataset_io.py b/asv_bench/benchmarks/dataset_io.py index 0956be67dad..2661ec5cfba 100644 --- a/asv_bench/benchmarks/dataset_io.py +++ b/asv_bench/benchmarks/dataset_io.py @@ -724,7 +724,7 @@ class PerformanceBackend(xr.backends.BackendEntrypoint): def open_dataset( self, filename_or_obj: str | os.PathLike | None, - drop_variables: tuple[str] = None, + drop_variables: tuple[str, ...] = None, *, mask_and_scale=True, decode_times=True, diff --git a/asv_bench/benchmarks/groupby.py b/asv_bench/benchmarks/groupby.py index 74ff3cb5756..a5261d5106a 100644 --- a/asv_bench/benchmarks/groupby.py +++ b/asv_bench/benchmarks/groupby.py @@ -174,7 +174,7 @@ def setup(self, use_cftime, use_flox): # GH9426 - deep-copying CFTime object arrays is weirdly slow asda = xr.DataArray(time) labeled_time = [] - for year, month in zip(asda.dt.year, asda.dt.month): + for year, month in zip(asda.dt.year, asda.dt.month, strict=True): labeled_time.append(cftime.datetime(year, month, 1)) self.da = xr.DataArray( diff --git a/asv_bench/benchmarks/rolling.py b/asv_bench/benchmarks/rolling.py index 579f4f00fbc..a19d17ff09a 100644 --- a/asv_bench/benchmarks/rolling.py +++ b/asv_bench/benchmarks/rolling.py @@ -64,7 +64,7 @@ def time_rolling_long(self, func, pandas, use_bottleneck): def time_rolling_np(self, window_, min_periods, use_bottleneck): with xr.set_options(use_bottleneck=use_bottleneck): self.ds.rolling(x=window_, center=False, min_periods=min_periods).reduce( - getattr(np, "nansum") + np.nansum ).load() @parameterized( diff --git a/doc/user-guide/testing.rst b/doc/user-guide/testing.rst index d82d9d7d7d9..434c0790139 100644 --- a/doc/user-guide/testing.rst +++ b/doc/user-guide/testing.rst @@ -193,7 +193,7 @@ different type: .. ipython:: python - def sparse_random_arrays(shape: tuple[int]) -> sparse._coo.core.COO: + def sparse_random_arrays(shape: tuple[int, ...]) -> sparse._coo.core.COO: """Strategy which generates random sparse.COO arrays""" if shape is None: shape = npst.array_shapes() diff --git a/properties/test_pandas_roundtrip.py b/properties/test_pandas_roundtrip.py index 9e0d4640171..3f507e3f341 100644 --- a/properties/test_pandas_roundtrip.py +++ b/properties/test_pandas_roundtrip.py @@ -80,7 +80,7 @@ def test_roundtrip_dataarray(data, arr) -> None: tuple ) ) - coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape)} + coords = {name: np.arange(n) for (name, n) in zip(names, arr.shape, strict=True)} original = xr.DataArray(arr, dims=names, coords=coords) roundtripped = xr.DataArray(original.to_pandas()) xr.testing.assert_identical(original, roundtripped) diff --git a/pyproject.toml b/pyproject.toml index 5a0029f3624..9808dbf709a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,14 +84,13 @@ source = ["xarray"] exclude_lines = ["pragma: no cover", "if TYPE_CHECKING"] [tool.mypy] -enable_error_code = "redundant-self" +enable_error_code = ["ignore-without-code", "redundant-self", "redundant-expr"] exclude = [ 'build', 'xarray/util/generate_.*\.py', 'xarray/datatree_/doc/.*\.py', ] files = "xarray" -show_error_codes = true show_error_context = true warn_redundant_casts = true warn_unused_configs = true @@ -240,7 +239,6 @@ extend-exclude = [ "doc", "_typed_ops.pyi", ] -target-version = "py310" [tool.ruff.lint] # E402: module level import not at top of file @@ -249,13 +247,13 @@ target-version = "py310" extend-safe-fixes = [ "TID252", # absolute imports ] -ignore = [ +extend-ignore = [ "E402", "E501", "E731", "UP007", ] -select = [ +extend-select = [ "F", # Pyflakes "E", # Pycodestyle "W", diff --git a/xarray/backends/api.py b/xarray/backends/api.py index 9eb6d78b055..1f6b6076799 100644 --- a/xarray/backends/api.py +++ b/xarray/backends/api.py @@ -51,7 +51,7 @@ try: from dask.delayed import Delayed except ImportError: - Delayed = None # type: ignore + Delayed = None # type: ignore[assignment, misc] from io import BufferedIOBase from xarray.backends.common import BackendEntrypoint @@ -1113,7 +1113,7 @@ def open_mfdataset( list(combined_ids_paths.keys()), list(combined_ids_paths.values()), ) - elif combine == "by_coords" and concat_dim is not None: + elif concat_dim is not None: raise ValueError( "When combine='by_coords', passing a value for `concat_dim` has no " "effect. To manually combine along a specific dimension you should " @@ -1432,7 +1432,7 @@ def to_netcdf( store.sync() return target.getvalue() finally: - if not multifile and compute: + if not multifile and compute: # type: ignore[redundant-expr] store.close() if not compute: @@ -1585,8 +1585,9 @@ def save_mfdataset( multifile=True, **kwargs, ) - for ds, path, group in zip(datasets, paths, groups) - ] + for ds, path, group in zip(datasets, paths, groups, strict=True) + ], + strict=True, ) try: @@ -1600,7 +1601,10 @@ def save_mfdataset( import dask return dask.delayed( - [dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)] + [ + dask.delayed(_finalize_store)(w, s) + for w, s in zip(writes, stores, strict=True) + ] ) diff --git a/xarray/backends/common.py b/xarray/backends/common.py index 38cba9af212..dd169cdbc7e 100644 --- a/xarray/backends/common.py +++ b/xarray/backends/common.py @@ -431,7 +431,7 @@ def set_dimensions(self, variables, unlimited_dims=None): for v in unlimited_dims: # put unlimited_dims first dims[v] = None for v in variables.values(): - dims.update(dict(zip(v.dims, v.shape))) + dims.update(dict(zip(v.dims, v.shape, strict=True))) for dim, length in dims.items(): if dim in existing_dims and length != existing_dims[dim]: diff --git a/xarray/backends/file_manager.py b/xarray/backends/file_manager.py index 86d84f532b1..9caaf013494 100644 --- a/xarray/backends/file_manager.py +++ b/xarray/backends/file_manager.py @@ -276,7 +276,7 @@ def __getstate__(self): def __setstate__(self, state) -> None: """Restore from a pickle.""" opener, args, mode, kwargs, lock, manager_id = state - self.__init__( # type: ignore + self.__init__( # type: ignore[misc] opener, *args, mode=mode, kwargs=kwargs, lock=lock, manager_id=manager_id ) diff --git a/xarray/backends/h5netcdf_.py b/xarray/backends/h5netcdf_.py index 0b7ebbbeb0c..b252d9136d2 100644 --- a/xarray/backends/h5netcdf_.py +++ b/xarray/backends/h5netcdf_.py @@ -208,7 +208,9 @@ def open_store_variable(self, name, var): "shuffle": var.shuffle, } if var.chunks: - encoding["preferred_chunks"] = dict(zip(var.dimensions, var.chunks)) + encoding["preferred_chunks"] = dict( + zip(var.dimensions, var.chunks, strict=True) + ) # Convert h5py-style compression options to NetCDF4-Python # style, if possible if var.compression == "gzip": diff --git a/xarray/backends/netCDF4_.py b/xarray/backends/netCDF4_.py index ec2fe25216a..af2c15495d7 100644 --- a/xarray/backends/netCDF4_.py +++ b/xarray/backends/netCDF4_.py @@ -278,7 +278,9 @@ def _extract_nc4_variable_encoding( chunksizes = encoding["chunksizes"] chunks_too_big = any( c > d and dim not in unlimited_dims - for c, d, dim in zip(chunksizes, variable.shape, variable.dims) + for c, d, dim in zip( + chunksizes, variable.shape, variable.dims, strict=False + ) ) has_original_shape = "original_shape" in encoding changed_shape = ( @@ -446,7 +448,9 @@ def open_store_variable(self, name: str, var): else: encoding["contiguous"] = False encoding["chunksizes"] = tuple(chunking) - encoding["preferred_chunks"] = dict(zip(var.dimensions, chunking)) + encoding["preferred_chunks"] = dict( + zip(var.dimensions, chunking, strict=True) + ) # TODO: figure out how to round-trip "endian-ness" without raising # warnings from netCDF4 # encoding['endian'] = var.endian() diff --git a/xarray/backends/plugins.py b/xarray/backends/plugins.py index 5eb7f879ee5..8b707633a6d 100644 --- a/xarray/backends/plugins.py +++ b/xarray/backends/plugins.py @@ -199,7 +199,7 @@ def get_backend(engine: str | type[BackendEntrypoint]) -> BackendEntrypoint: "https://docs.xarray.dev/en/stable/getting-started-guide/installing.html" ) backend = engines[engine] - elif isinstance(engine, type) and issubclass(engine, BackendEntrypoint): + elif issubclass(engine, BackendEntrypoint): backend = engine() else: raise TypeError( diff --git a/xarray/backends/zarr.py b/xarray/backends/zarr.py index 242507f9c20..31b367a178b 100644 --- a/xarray/backends/zarr.py +++ b/xarray/backends/zarr.py @@ -186,7 +186,7 @@ def _determine_zarr_chunks(enc_chunks, var_chunks, ndim, name, safe_chunks): # TODO: incorporate synchronizer to allow writes from multiple dask # threads if var_chunks and enc_chunks_tuple: - for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks): + for zchunk, dchunks in zip(enc_chunks_tuple, var_chunks, strict=True): for dchunk in dchunks[:-1]: if dchunk % zchunk: base_error = ( @@ -548,13 +548,13 @@ def open_store_variable(self, name, zarr_array=None): encoding = { "chunks": zarr_array.chunks, - "preferred_chunks": dict(zip(dimensions, zarr_array.chunks)), + "preferred_chunks": dict(zip(dimensions, zarr_array.chunks, strict=True)), "compressor": zarr_array.compressor, "filters": zarr_array.filters, } # _FillValue needs to be in attributes, not encoding, so it will get # picked up by decode_cf - if getattr(zarr_array, "fill_value") is not None: + if zarr_array.fill_value is not None: attributes["_FillValue"] = zarr_array.fill_value return Variable(dimensions, data, attributes, encoding) @@ -576,7 +576,7 @@ def get_dimensions(self): dimensions = {} for k, v in self.zarr_group.arrays(): dim_names, _ = _get_zarr_dims_and_attrs(v, DIMENSION_KEY, try_nczarr) - for d, s in zip(dim_names, v.shape): + for d, s in zip(dim_names, v.shape, strict=True): if d in dimensions and dimensions[d] != s: raise ValueError( f"found conflicting lengths for dimension {d} " diff --git a/xarray/coding/calendar_ops.py b/xarray/coding/calendar_ops.py index 52a487ca46d..22a19a63871 100644 --- a/xarray/coding/calendar_ops.py +++ b/xarray/coding/calendar_ops.py @@ -198,7 +198,7 @@ def convert_calendar( _convert_to_new_calendar_with_new_day_of_year( date, newdoy, calendar, use_cftime ) - for date, newdoy in zip(time.variable._data.array, new_doy) + for date, newdoy in zip(time.variable._data.array, new_doy, strict=True) ], dims=(dim,), name=dim, diff --git a/xarray/coding/times.py b/xarray/coding/times.py index 70df8c6c390..cfdecd28a27 100644 --- a/xarray/coding/times.py +++ b/xarray/coding/times.py @@ -204,7 +204,7 @@ def _unpack_time_units_and_ref_date(units: str) -> tuple[str, pd.Timestamp]: def _decode_cf_datetime_dtype( - data, units: str, calendar: str, use_cftime: bool | None + data, units: str, calendar: str | None, use_cftime: bool | None ) -> np.dtype: # Verify that at least the first and last date can be decoded # successfully. Otherwise, tracebacks end up swallowed by @@ -704,7 +704,7 @@ def _cast_to_dtype_if_safe(num: np.ndarray, dtype: np.dtype) -> np.ndarray: def encode_cf_datetime( - dates: T_DuckArray, # type: ignore + dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, @@ -726,7 +726,7 @@ def encode_cf_datetime( def _eagerly_encode_cf_datetime( - dates: T_DuckArray, # type: ignore + dates: T_DuckArray, # type: ignore[misc] units: str | None = None, calendar: str | None = None, dtype: np.dtype | None = None, @@ -809,7 +809,7 @@ def _eagerly_encode_cf_datetime( def _encode_cf_datetime_within_map_blocks( - dates: T_DuckArray, # type: ignore + dates: T_DuckArray, # type: ignore[misc] units: str, calendar: str, dtype: np.dtype, @@ -859,7 +859,7 @@ def _lazily_encode_cf_datetime( def encode_cf_timedelta( - timedeltas: T_DuckArray, # type: ignore + timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, ) -> tuple[T_DuckArray, str]: @@ -871,7 +871,7 @@ def encode_cf_timedelta( def _eagerly_encode_cf_timedelta( - timedeltas: T_DuckArray, # type: ignore + timedeltas: T_DuckArray, # type: ignore[misc] units: str | None = None, dtype: np.dtype | None = None, allow_units_modification: bool = True, @@ -923,7 +923,7 @@ def _eagerly_encode_cf_timedelta( def _encode_cf_timedelta_within_map_blocks( - timedeltas: T_DuckArray, # type:ignore + timedeltas: T_DuckArray, # type: ignore[misc] units: str, dtype: np.dtype, ) -> T_DuckArray: diff --git a/xarray/core/alignment.py b/xarray/core/alignment.py index a28376d2890..d6cdd45bb49 100644 --- a/xarray/core/alignment.py +++ b/xarray/core/alignment.py @@ -405,6 +405,7 @@ def align_indexes(self) -> None: zip( [joined_index] + matching_indexes, [joined_index_vars] + matching_index_vars, + strict=True, ) ) need_reindex = self._need_reindex(dims, cmp_indexes) @@ -412,7 +413,7 @@ def align_indexes(self) -> None: if len(matching_indexes) > 1: need_reindex = self._need_reindex( dims, - list(zip(matching_indexes, matching_index_vars)), + list(zip(matching_indexes, matching_index_vars, strict=True)), ) else: need_reindex = False @@ -557,7 +558,7 @@ def reindex_all(self) -> None: self.results = tuple( self._reindex_one(obj, matching_indexes) for obj, matching_indexes in zip( - self.objects, self.objects_matching_indexes + self.objects, self.objects_matching_indexes, strict=True ) ) @@ -952,7 +953,7 @@ def is_alignable(obj): fill_value=fill_value, ) - for position, key, aligned_obj in zip(positions, keys, aligned): + for position, key, aligned_obj in zip(positions, keys, aligned, strict=True): if key is no_key: out[position] = aligned_obj else: diff --git a/xarray/core/combine.py b/xarray/core/combine.py index 4b4a07ddc77..c7dff9d249d 100644 --- a/xarray/core/combine.py +++ b/xarray/core/combine.py @@ -139,7 +139,8 @@ def _infer_concat_order_from_coords(datasets): # Append positions along extra dimension to structure which # encodes the multi-dimensional concatenation order tile_ids = [ - tile_id + (position,) for tile_id, position in zip(tile_ids, order) + tile_id + (position,) + for tile_id, position in zip(tile_ids, order, strict=True) ] if len(datasets) > 1 and not concat_dims: @@ -148,7 +149,7 @@ def _infer_concat_order_from_coords(datasets): "order the datasets for concatenation" ) - combined_ids = dict(zip(tile_ids, datasets)) + combined_ids = dict(zip(tile_ids, datasets, strict=True)) return combined_ids, concat_dims @@ -349,7 +350,7 @@ def _nested_combine( combined_ids = _infer_concat_order_from_positions(datasets) else: # Already sorted so just use the ids already passed - combined_ids = dict(zip(ids, datasets)) + combined_ids = dict(zip(ids, datasets, strict=True)) # Check that the inferred shape is combinable _check_shape_tile_ids(combined_ids) diff --git a/xarray/core/common.py b/xarray/core/common.py index 1ed1398746f..f043b7be3dd 100644 --- a/xarray/core/common.py +++ b/xarray/core/common.py @@ -254,7 +254,7 @@ def sizes(self: Any) -> Mapping[Hashable, int]: -------- Dataset.sizes """ - return Frozen(dict(zip(self.dims, self.shape))) + return Frozen(dict(zip(self.dims, self.shape, strict=True))) class AttrAccessMixin: diff --git a/xarray/core/computation.py b/xarray/core/computation.py index 3e91efc1ede..91a184d55cd 100644 --- a/xarray/core/computation.py +++ b/xarray/core/computation.py @@ -71,9 +71,9 @@ class _UFuncSignature: Attributes ---------- - input_core_dims : tuple[tuple] + input_core_dims : tuple[tuple, ...] Core dimension names on each input variable. - output_core_dims : tuple[tuple] + output_core_dims : tuple[tuple, ...] Core dimension names on each output variable. """ @@ -326,7 +326,7 @@ def apply_dataarray_vfunc( variable, coords=coords, indexes=indexes, name=name, fastpath=True ) for variable, coords, indexes in zip( - result_var, result_coords, result_indexes + result_var, result_coords, result_indexes, strict=True ) ) else: @@ -407,7 +407,7 @@ def _unpack_dict_tuples( ) -> tuple[dict[Hashable, Variable], ...]: out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs)) for name, values in result_vars.items(): - for value, results_dict in zip(values, out): + for value, results_dict in zip(values, out, strict=True): results_dict[name] = value return out @@ -422,7 +422,7 @@ def _check_core_dims(signature, variable_args, name): """ missing = [] for i, (core_dims, variable_arg) in enumerate( - zip(signature.input_core_dims, variable_args) + zip(signature.input_core_dims, variable_args, strict=True) ): # Check whether all the dims are on the variable. Note that we need the # `hasattr` to check for a dims property, to protect against the case where @@ -454,7 +454,7 @@ def apply_dict_of_variables_vfunc( grouped_by_name = collect_dict_values(args, names, fill_value) result_vars = {} - for name, variable_args in zip(names, grouped_by_name): + for name, variable_args in zip(names, grouped_by_name, strict=True): core_dim_present = _check_core_dims(signature, variable_args, name) if core_dim_present is True: result_vars[name] = func(*variable_args) @@ -546,7 +546,7 @@ def apply_dataset_vfunc( if signature.num_outputs > 1: out = tuple( _fast_dataset(*args) - for args in zip(result_vars, list_of_coords, list_of_indexes) + for args in zip(result_vars, list_of_coords, list_of_indexes, strict=True) ) else: (coord_vars,) = list_of_coords @@ -616,11 +616,13 @@ def apply_groupby_func(func, *args): iterator = itertools.repeat(arg) iterators.append(iterator) - applied: Iterator = (func(*zipped_args) for zipped_args in zip(*iterators)) + applied: Iterator = ( + func(*zipped_args) for zipped_args in zip(*iterators, strict=False) + ) applied_example, applied = peek_at(applied) combine = first_groupby._combine # type: ignore[attr-defined] if isinstance(applied_example, tuple): - combined = tuple(combine(output) for output in zip(*applied)) + combined = tuple(combine(output) for output in zip(*applied, strict=True)) else: combined = combine(applied) return combined @@ -637,7 +639,7 @@ def unified_dim_sizes( "broadcasting cannot handle duplicate " f"dimensions on a variable: {list(var.dims)}" ) - for dim, size in zip(var.dims, var.shape): + for dim, size in zip(var.dims, var.shape, strict=True): if dim not in exclude_dims: if dim not in dim_sizes: dim_sizes[dim] = size @@ -741,7 +743,7 @@ def apply_variable_ufunc( if isinstance(arg, Variable) else arg ) - for arg, core_dims in zip(args, signature.input_core_dims) + for arg, core_dims in zip(args, signature.input_core_dims, strict=True) ] if any(is_chunked_array(array) for array in input_data): @@ -766,7 +768,7 @@ def apply_variable_ufunc( allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None) if allow_rechunk is None: for n, (data, core_dims) in enumerate( - zip(input_data, signature.input_core_dims) + zip(input_data, signature.input_core_dims, strict=True) ): if is_chunked_array(data): # core dimensions cannot span multiple chunks @@ -848,7 +850,7 @@ def func(*arrays): ) output: list[Variable] = [] - for dims, data in zip(output_dims, result_data): + for dims, data in zip(output_dims, result_data, strict=True): data = as_compatible_data(data) if data.ndim != len(dims): raise ValueError( @@ -2179,7 +2181,7 @@ def _calc_idxminmax( # Handle chunked arrays (e.g. dask). if is_chunked_array(array.data): chunkmanager = get_chunked_array_type(array.data) - chunks = dict(zip(array.dims, array.chunks)) + chunks = dict(zip(array.dims, array.chunks, strict=True)) dask_coord = chunkmanager.from_array(array[dim].data, chunks=chunks[dim]) data = dask_coord[duck_array_ops.ravel(indx.data)] res = indx.copy(data=duck_array_ops.reshape(data, indx.shape)) @@ -2268,7 +2270,7 @@ def unify_chunks(*objects: Dataset | DataArray) -> tuple[Dataset | DataArray, .. _, chunked_data = chunkmanager.unify_chunks(*unify_chunks_args) chunked_data_iter = iter(chunked_data) out: list[Dataset | DataArray] = [] - for obj, ds in zip(objects, datasets): + for obj, ds in zip(objects, datasets, strict=True): for k, v in ds._variables.items(): if v.chunks is not None: ds._variables[k] = v.copy(data=next(chunked_data_iter)) diff --git a/xarray/core/concat.py b/xarray/core/concat.py index 182cf8a23a1..1133d8cc373 100644 --- a/xarray/core/concat.py +++ b/xarray/core/concat.py @@ -400,7 +400,9 @@ def process_subset_opt(opt, subset): equals[k] = False # computed variables are not to be re-computed # again in the future - for ds, v in zip(datasets[1:], computed): + for ds, v in zip( + datasets[1:], computed, strict=False + ): ds.variables[k].data = v.data break else: @@ -583,7 +585,7 @@ def ensure_common_dims(vars, concat_dim_lengths): common_dims = tuple(utils.OrderedSet(d for v in vars for d in v.dims)) if dim_name not in common_dims: common_dims = (dim_name,) + common_dims - for var, dim_len in zip(vars, concat_dim_lengths): + for var, dim_len in zip(vars, concat_dim_lengths, strict=True): if var.dims != common_dims: common_shape = tuple(dims_sizes.get(d, dim_len) for d in common_dims) var = var.set_dims(common_dims, common_shape) diff --git a/xarray/core/coordinates.py b/xarray/core/coordinates.py index 3b852b962bf..8840ad7f8c3 100644 --- a/xarray/core/coordinates.py +++ b/xarray/core/coordinates.py @@ -877,7 +877,7 @@ def __delitem__(self, key: Hashable) -> None: assert_no_index_corrupted(self._data.xindexes, {key}) del self._data._coords[key] - if self._data._indexes is not None and key in self._data._indexes: + if key in self._data._indexes: del self._data._indexes[key] def _ipython_key_completions_(self): diff --git a/xarray/core/dataarray.py b/xarray/core/dataarray.py index a0e34e8f9cc..4b6185edf38 100644 --- a/xarray/core/dataarray.py +++ b/xarray/core/dataarray.py @@ -126,7 +126,7 @@ def _check_coords_dims(shape, coords, dim): - sizes = dict(zip(dim, shape)) + sizes = dict(zip(dim, shape, strict=True)) for k, v in coords.items(): if any(d not in dim for d in v.dims): raise ValueError( @@ -175,7 +175,7 @@ def _infer_coords_and_dims( if utils.is_dict_like(coords): dims = list(coords.keys()) else: - for n, (dim, coord) in enumerate(zip(dims, coords)): + for n, (dim, coord) in enumerate(zip(dims, coords, strict=True)): coord = as_variable( coord, name=dims[n], auto_convert=False ).to_index_variable() @@ -202,7 +202,7 @@ def _infer_coords_and_dims( if new_coords[k].dims == (k,): new_coords[k] = new_coords[k].to_index_variable() elif coords is not None: - for dim, coord in zip(dims_tuple, coords): + for dim, coord in zip(dims_tuple, coords, strict=True): var = as_variable(coord, name=dim, auto_convert=False) var.dims = (dim,) new_coords[dim] = var.to_index_variable() @@ -254,14 +254,14 @@ def __getitem__(self, key) -> T_DataArray: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) - key = dict(zip(self.data_array.dims, labels)) + key = dict(zip(self.data_array.dims, labels, strict=True)) return self.data_array.sel(key) def __setitem__(self, key, value) -> None: if not utils.is_dict_like(key): # expand the indexer so we can handle Ellipsis labels = indexing.expanded_indexer(key, self.data_array.ndim) - key = dict(zip(self.data_array.dims, labels)) + key = dict(zip(self.data_array.dims, labels, strict=True)) dim_indexers = map_index_queries(self.data_array, key).dim_indexers self.data_array[dim_indexers] = value @@ -441,7 +441,7 @@ def __init__( name: Hashable | None = None, attrs: Mapping | None = None, # internal parameters - indexes: Mapping[Any, Index] | None = None, + indexes: Mapping[Hashable, Index] | None = None, fastpath: bool = False, ) -> None: if fastpath: @@ -489,7 +489,7 @@ def __init__( assert isinstance(coords, dict) self._coords = coords self._name = name - self._indexes = indexes # type: ignore[assignment] + self._indexes = dict(indexes) self._close = None @@ -539,7 +539,7 @@ def _replace_maybe_drop_dims( indexes = self._indexes elif variable.dims == self.dims: # Shape has changed (e.g. from reduce(..., keepdims=True) - new_sizes = dict(zip(self.dims, variable.shape)) + new_sizes = dict(zip(self.dims, variable.shape, strict=True)) coords = { k: v for k, v in self._coords.items() @@ -878,7 +878,7 @@ def _item_key_to_dict(self, key: Any) -> Mapping[Hashable, Any]: if utils.is_dict_like(key): return key key = indexing.expanded_indexer(key, self.ndim) - return dict(zip(self.dims, key)) + return dict(zip(self.dims, key, strict=True)) def _getitem_coord(self, key: Any) -> Self: from xarray.core.dataset import _get_virtual_variable @@ -886,7 +886,7 @@ def _getitem_coord(self, key: Any) -> Self: try: var = self._coords[key] except KeyError: - dim_sizes = dict(zip(self.dims, self.shape)) + dim_sizes = dict(zip(self.dims, self.shape, strict=True)) _, key, var = _get_virtual_variable(self._coords, key, dim_sizes) return self._replace_maybe_drop_dims(var, name=key) @@ -1439,7 +1439,7 @@ def chunk( "It will raise an error in the future. Instead use a dict with dimension names as keys.", category=DeprecationWarning, ) - chunk_mapping = dict(zip(self.dims, chunks)) + chunk_mapping = dict(zip(self.dims, chunks, strict=True)) else: chunk_mapping = either_dict_or_kwargs(chunks, chunks_kwargs, "chunk") @@ -3922,7 +3922,7 @@ def to_dataframe( ds = self._to_dataset_whole(name=unique_name) if dim_order is None: - ordered_dims = dict(zip(self.dims, self.shape)) + ordered_dims = dict(zip(self.dims, self.shape, strict=True)) else: ordered_dims = ds._normalize_dim_order(dim_order=dim_order) @@ -4144,7 +4144,7 @@ def to_netcdf( # No problems with the name - so we're fine! dataset = self.to_dataset() - return to_netcdf( # type: ignore # mypy cannot resolve the overloads:( + return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( dataset, path, mode=mode, diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index 3458ac27b16..08885e3cd8d 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -243,13 +243,13 @@ def _get_chunk(var: Variable, chunks, chunkmanager: ChunkManagerEntrypoint): # Determine the explicit requested chunks. preferred_chunks = var.encoding.get("preferred_chunks", {}) preferred_chunk_shape = tuple( - preferred_chunks.get(dim, size) for dim, size in zip(dims, shape) + preferred_chunks.get(dim, size) for dim, size in zip(dims, shape, strict=True) ) if isinstance(chunks, Number) or (chunks == "auto"): chunks = dict.fromkeys(dims, chunks) chunk_shape = tuple( chunks.get(dim, None) or preferred_chunk_sizes - for dim, preferred_chunk_sizes in zip(dims, preferred_chunk_shape) + for dim, preferred_chunk_sizes in zip(dims, preferred_chunk_shape, strict=True) ) chunk_shape = chunkmanager.normalize_chunks( @@ -259,7 +259,7 @@ def _get_chunk(var: Variable, chunks, chunkmanager: ChunkManagerEntrypoint): # Warn where requested chunks break preferred chunks, provided that the variable # contains data. if var.size: - for dim, size, chunk_sizes in zip(dims, shape, chunk_shape): + for dim, size, chunk_sizes in zip(dims, shape, chunk_shape, strict=True): try: preferred_chunk_sizes = preferred_chunks[dim] except KeyError: @@ -285,7 +285,7 @@ def _get_chunk(var: Variable, chunks, chunkmanager: ChunkManagerEntrypoint): "degrade performance. Instead, consider rechunking after loading." ) - return dict(zip(dims, chunk_shape)) + return dict(zip(dims, chunk_shape, strict=True)) def _maybe_chunk( @@ -871,7 +871,7 @@ def load(self, **kwargs) -> Self: *lazy_data.values(), **kwargs ) - for k, data in zip(lazy_data, evaluated_data): + for k, data in zip(lazy_data, evaluated_data, strict=False): self.variables[k].data = data # load everything else sequentially @@ -1054,7 +1054,7 @@ def _persist_inplace(self, **kwargs) -> Self: # evaluate all the dask arrays simultaneously evaluated_data = dask.persist(*lazy_data.values(), **kwargs) - for k, data in zip(lazy_data, evaluated_data): + for k, data in zip(lazy_data, evaluated_data, strict=False): self.variables[k].data = data return self @@ -1654,11 +1654,13 @@ def __setitem__( f"setting ({len(value)})" ) if isinstance(value, Dataset): - self.update(dict(zip(keylist, value.data_vars.values()))) + self.update( + dict(zip(keylist, value.data_vars.values(), strict=True)) + ) elif isinstance(value, DataArray): raise ValueError("Cannot assign single DataArray to multiple keys") else: - self.update(dict(zip(keylist, value))) + self.update(dict(zip(keylist, value, strict=True))) else: raise ValueError(f"Unsupported key-type {type(key)}") @@ -2333,7 +2335,7 @@ def to_netcdf( encoding = {} from xarray.backends.api import to_netcdf - return to_netcdf( # type: ignore # mypy cannot resolve the overloads:( + return to_netcdf( # type: ignore[return-value] # mypy cannot resolve the overloads:( self, path, mode=mode, @@ -3050,7 +3052,7 @@ def isel( coord_names.remove(name) continue variables[name] = var - dims.update(zip(var.dims, var.shape)) + dims.update(zip(var.dims, var.shape, strict=True)) return self._construct_direct( variables=variables, @@ -4274,7 +4276,7 @@ def _rename_indexes( new_index_vars = new_index.create_variables( { new: self._variables[old] - for old, new in zip(coord_names, new_coord_names) + for old, new in zip(coord_names, new_coord_names, strict=True) } ) variables.update(new_index_vars) @@ -4781,9 +4783,9 @@ def expand_dims( raise ValueError("axis should not contain duplicate values") # We need to sort them to make sure `axis` equals to the # axis positions of the result array. - zip_axis_dim = sorted(zip(axis_pos, dim.items())) + zip_axis_dim = sorted(zip(axis_pos, dim.items(), strict=True)) - all_dims = list(zip(v.dims, v.shape)) + all_dims = list(zip(v.dims, v.shape, strict=True)) for d, c in zip_axis_dim: all_dims.insert(d, c) variables[k] = v.set_dims(dict(all_dims)) @@ -7326,7 +7328,7 @@ def _to_dataframe(self, ordered_dims: Mapping[Any, int]): ] index = self.coords.to_index([*ordered_dims]) broadcasted_df = pd.DataFrame( - dict(zip(non_extension_array_columns, data)), index=index + dict(zip(non_extension_array_columns, data, strict=True)), index=index ) for extension_array_column in extension_array_columns: extension_array = self.variables[extension_array_column].data.array @@ -7501,10 +7503,10 @@ def from_dataframe(cls, dataframe: pd.DataFrame, sparse: bool = False) -> Self: if isinstance(idx, pd.MultiIndex): dims = tuple( - name if name is not None else "level_%i" % n + name if name is not None else "level_%i" % n # type: ignore[redundant-expr] for n, name in enumerate(idx.names) ) - for dim, lev in zip(dims, idx.levels): + for dim, lev in zip(dims, idx.levels, strict=True): xr_idx = PandasIndex(lev, dim) indexes[dim] = xr_idx index_vars.update(xr_idx.create_variables()) @@ -9636,7 +9638,7 @@ def argmin(self, dim: Hashable | None = None, **kwargs) -> Self: ): # Return int index if single dimension is passed, and is not part of a # sequence - argmin_func = getattr(duck_array_ops, "argmin") + argmin_func = duck_array_ops.argmin return self.reduce( argmin_func, dim=None if dim is None else [dim], **kwargs ) @@ -9729,7 +9731,7 @@ def argmax(self, dim: Hashable | None = None, **kwargs) -> Self: ): # Return int index if single dimension is passed, and is not part of a # sequence - argmax_func = getattr(duck_array_ops, "argmax") + argmax_func = duck_array_ops.argmax return self.reduce( argmax_func, dim=None if dim is None else [dim], **kwargs ) @@ -10028,7 +10030,7 @@ def curvefit( f"dimensions {preserved_dims}." ) for param, (lb, ub) in bounds.items(): - for label, bound in zip(("Lower", "Upper"), (lb, ub)): + for label, bound in zip(("Lower", "Upper"), (lb, ub), strict=True): if isinstance(bound, DataArray): unexpected = set(bound.dims) - set(preserved_dims) if unexpected: diff --git a/xarray/core/datatree.py b/xarray/core/datatree.py index bc818eb5671..b12d861624a 100644 --- a/xarray/core/datatree.py +++ b/xarray/core/datatree.py @@ -716,7 +716,7 @@ def __bool__(self) -> bool: return bool(self._data_variables) or bool(self._children) def __iter__(self) -> Iterator[str]: - return itertools.chain(self._data_variables, self._children) # type: ignore + return itertools.chain(self._data_variables, self._children) # type: ignore[arg-type] def __array__(self, dtype=None, copy=None): raise TypeError( @@ -1265,7 +1265,7 @@ def equals(self, other: DataTree, from_root: bool = True) -> bool: return all( [ node.ds.equals(other_node.ds) - for node, other_node in zip(self.subtree, other.subtree) + for node, other_node in zip(self.subtree, other.subtree, strict=True) ] ) @@ -1295,7 +1295,7 @@ def identical(self, other: DataTree, from_root=True) -> bool: return all( node.ds.identical(other_node.ds) - for node, other_node in zip(self.subtree, other.subtree) + for node, other_node in zip(self.subtree, other.subtree, strict=True) ) def filter(self: DataTree, filterfunc: Callable[[DataTree], bool]) -> DataTree: diff --git a/xarray/core/datatree_mapping.py b/xarray/core/datatree_mapping.py index 17630466016..1a581629ab8 100644 --- a/xarray/core/datatree_mapping.py +++ b/xarray/core/datatree_mapping.py @@ -157,6 +157,7 @@ def _map_over_subtree(*args, **kwargs) -> DataTree | tuple[DataTree, ...]: first_tree.subtree, *args_as_tree_length_iterables, *list(kwargs_as_tree_length_iterables.values()), + strict=False, ): node_args_as_datasetviews = [ a.ds if isinstance(a, DataTree) else a for a in all_node_args[:n_args] @@ -168,6 +169,7 @@ def _map_over_subtree(*args, **kwargs) -> DataTree | tuple[DataTree, ...]: v.ds if isinstance(v, DataTree) else v for v in all_node_args[n_args:] ], + strict=True, ) ) func_with_error_context = _handle_errors_with_path_context( diff --git a/xarray/core/datatree_ops.py b/xarray/core/datatree_ops.py index a44700e2bf8..9e87cda191c 100644 --- a/xarray/core/datatree_ops.py +++ b/xarray/core/datatree_ops.py @@ -214,7 +214,7 @@ def method_name(self, *args, **kwargs): new_method_docstring = insert_doc_addendum( orig_method_docstring, _MAPPED_DOCSTRING_ADDENDUM ) - setattr(target_cls_dict[method_name], "__doc__", new_method_docstring) + target_cls_dict[method_name].__doc__ = new_method_docstring def insert_doc_addendum(docstring: str | None, addendum: str) -> str | None: diff --git a/xarray/core/dtypes.py b/xarray/core/dtypes.py index b39f7628fd3..7464c1e8a89 100644 --- a/xarray/core/dtypes.py +++ b/xarray/core/dtypes.py @@ -64,7 +64,7 @@ def maybe_promote(dtype: np.dtype) -> tuple[np.dtype, Any]: if isdtype(dtype, "real floating"): dtype_ = dtype fill_value = np.nan - elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.timedelta64): + elif np.issubdtype(dtype, np.timedelta64): # See https://github.com/numpy/numpy/issues/10685 # np.timedelta64 is a subclass of np.integer # Check np.timedelta64 before np.integer @@ -76,7 +76,7 @@ def maybe_promote(dtype: np.dtype) -> tuple[np.dtype, Any]: elif isdtype(dtype, "complex floating"): dtype_ = dtype fill_value = np.nan + np.nan * 1j - elif isinstance(dtype, np.dtype) and np.issubdtype(dtype, np.datetime64): + elif np.issubdtype(dtype, np.datetime64): dtype_ = dtype fill_value = np.datetime64("NaT") else: @@ -200,7 +200,7 @@ def isdtype(dtype, kind: str | tuple[str, ...], xp=None) -> bool: # numpy>=2 and pandas extensions arrays are implemented in # Xarray via the array API if not isinstance(kind, str) and not ( - isinstance(kind, tuple) and all(isinstance(k, str) for k in kind) + isinstance(kind, tuple) and all(isinstance(k, str) for k in kind) # type: ignore[redundant-expr] ): raise TypeError(f"kind must be a string or a tuple of strings: {repr(kind)}") diff --git a/xarray/core/formatting.py b/xarray/core/formatting.py index 657c9a2dbfb..110f80f8f5f 100644 --- a/xarray/core/formatting.py +++ b/xarray/core/formatting.py @@ -474,7 +474,7 @@ def prefixes(length: int) -> list[str]: preformatted = [ pretty_print(f" {prefix} {name}", col_width) - for prefix, name in zip(prefixes(len(names)), names) + for prefix, name in zip(prefixes(len(names)), names, strict=True) ] head, *tail = preformatted @@ -862,7 +862,7 @@ def extra_items_repr(extra_keys, mapping, ab_side, kwargs): temp = [ "\n".join([var_s, attr_s]) if attr_s else var_s - for var_s, attr_s in zip(temp, attrs_summary) + for var_s, attr_s in zip(temp, attrs_summary, strict=True) ] # TODO: It should be possible recursively use _diff_mapping_repr @@ -877,7 +877,9 @@ def extra_items_repr(extra_keys, mapping, ab_side, kwargs): # ) # temp += [newdiff] - diff_items += [ab_side + s[1:] for ab_side, s in zip(("L", "R"), temp)] + diff_items += [ + ab_side + s[1:] for ab_side, s in zip(("L", "R"), temp, strict=True) + ] if diff_items: summary += [f"Differing {title.lower()}:"] + diff_items @@ -941,7 +943,7 @@ def diff_array_repr(a, b, compat): temp = [wrap_indent(short_array_repr(obj), start=" ") for obj in (a, b)] diff_data_repr = [ ab_side + "\n" + ab_data_repr - for ab_side, ab_data_repr in zip(("L", "R"), temp) + for ab_side, ab_data_repr in zip(("L", "R"), temp, strict=True) ] summary += ["Differing values:"] + diff_data_repr @@ -966,7 +968,7 @@ def diff_treestructure(a: DataTree, b: DataTree, require_names_equal: bool) -> s # Walking nodes in "level-order" fashion means walking down from the root breadth-first. # Checking for isomorphism by walking in this way implicitly assumes that the tree is an ordered tree # (which it is so long as children are stored in a tuple or list rather than in a set). - for node_a, node_b in zip(LevelOrderIter(a), LevelOrderIter(b)): + for node_a, node_b in zip(LevelOrderIter(a), LevelOrderIter(b), strict=True): path_a, path_b = node_a.path, node_b.path if require_names_equal and node_a.name != node_b.name: @@ -1013,7 +1015,7 @@ def diff_nodewise_summary(a: DataTree, b: DataTree, compat): compat_str = _compat_to_str(compat) summary = [] - for node_a, node_b in zip(a.subtree, b.subtree): + for node_a, node_b in zip(a.subtree, b.subtree, strict=True): a_ds, b_ds = node_a.ds, node_b.ds if not a_ds._all_compat(b_ds, compat): diff --git a/xarray/core/formatting_html.py b/xarray/core/formatting_html.py index b19ce02683f..34c7a93bd7a 100644 --- a/xarray/core/formatting_html.py +++ b/xarray/core/formatting_html.py @@ -303,7 +303,7 @@ def _obj_repr(obj, header_components, sections): def array_repr(arr) -> str: - dims = OrderedDict((k, v) for k, v in zip(arr.dims, arr.shape)) + dims = OrderedDict((k, v) for k, v in zip(arr.dims, arr.shape, strict=True)) if hasattr(arr, "xindexes"): indexed_dims = arr.xindexes.dims else: diff --git a/xarray/core/groupby.py b/xarray/core/groupby.py index d3bf33be0ca..a5e520b98b6 100644 --- a/xarray/core/groupby.py +++ b/xarray/core/groupby.py @@ -197,7 +197,7 @@ def __array__(self) -> np.ndarray: return np.arange(self.size) @property - def shape(self) -> tuple[int]: + def shape(self) -> tuple[int, ...]: return (self.size,) @property @@ -458,7 +458,7 @@ def factorize(self) -> EncodedGroups: ) # NaNs; as well as values outside the bins are coded by -1 # Restore these after the raveling - mask = functools.reduce(np.logical_or, [(code == -1) for code in broadcasted_codes]) # type: ignore + mask = functools.reduce(np.logical_or, [(code == -1) for code in broadcasted_codes]) # type: ignore[arg-type] _flatcodes[mask] = -1 midx = pd.MultiIndex.from_product( @@ -646,7 +646,11 @@ def groups(self) -> dict[GroupKey, GroupIndex]: # provided to mimic pandas.groupby if self._groups is None: self._groups = dict( - zip(self.encoded.unique_coord.data, self.encoded.group_indices) + zip( + self.encoded.unique_coord.data, + self.encoded.group_indices, + strict=True, + ) ) return self._groups @@ -660,7 +664,7 @@ def __len__(self) -> int: return self._len def __iter__(self) -> Iterator[tuple[GroupKey, T_Xarray]]: - return zip(self.encoded.unique_coord.data, self._iter_grouped()) + return zip(self.encoded.unique_coord.data, self._iter_grouped(), strict=True) def __repr__(self) -> str: text = ( @@ -845,7 +849,7 @@ def _flox_reduce( obj = self._original_obj variables = ( {k: v.variable for k, v in obj.data_vars.items()} - if isinstance(obj, Dataset) + if isinstance(obj, Dataset) # type: ignore[redundant-expr] # seems to be a mypy bug else obj._coords ) diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index d92ef72246e..35870064db5 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -644,7 +644,7 @@ def from_variables( # preserve wrapped pd.Index (if any) # accessing `.data` can load data from disk, so we only access if needed - data = getattr(var._data, "array") if hasattr(var._data, "array") else var.data + data = var._data.array if hasattr(var._data, "array") else var.data # multi-index level variable: get level index if isinstance(var._data, PandasMultiIndexingAdapter): level = var._data.level @@ -1024,14 +1024,16 @@ def stack( _check_dim_compat(variables, all_dims="different") level_indexes = [safe_cast_to_index(var) for var in variables.values()] - for name, idx in zip(variables, level_indexes): + for name, idx in zip(variables, level_indexes, strict=True): if isinstance(idx, pd.MultiIndex): raise ValueError( f"cannot create a multi-index along stacked dimension {dim!r} " f"from variable {name!r} that wraps a multi-index" ) - split_labels, levels = zip(*[lev.factorize() for lev in level_indexes]) + split_labels, levels = zip( + *[lev.factorize() for lev in level_indexes], strict=True + ) labels_mesh = np.meshgrid(*split_labels, indexing="ij") labels = [x.ravel() for x in labels_mesh] @@ -1051,7 +1053,7 @@ def unstack(self) -> tuple[dict[Hashable, Index], pd.MultiIndex]: ) new_indexes: dict[Hashable, Index] = {} - for name, lev in zip(clean_index.names, clean_index.levels): + for name, lev in zip(clean_index.names, clean_index.levels, strict=True): idx = PandasIndex( lev.copy(), name, coord_dtype=self.level_coords_dtype[name] ) @@ -1258,7 +1260,9 @@ def sel(self, labels, method=None, tolerance=None) -> IndexSelResult: else: levels = [self.index.names[i] for i in range(len(label))] indexer, new_index = self.index.get_loc_level(label, level=levels) - scalar_coord_values.update({k: v for k, v in zip(levels, label)}) + scalar_coord_values.update( + {k: v for k, v in zip(levels, label, strict=True)} + ) else: label_array = normalize_label(label) @@ -1360,7 +1364,8 @@ def rename(self, name_dict, dims_dict): new_dim = dims_dict.get(self.dim, self.dim) new_level_coords_dtype = { - k: v for k, v in zip(new_names, self.level_coords_dtype.values()) + k: v + for k, v in zip(new_names, self.level_coords_dtype.values(), strict=True) } return self._replace( index, dim=new_dim, level_coords_dtype=new_level_coords_dtype diff --git a/xarray/core/indexing.py b/xarray/core/indexing.py index 1f5444b6baa..06b4b9a475f 100644 --- a/xarray/core/indexing.py +++ b/xarray/core/indexing.py @@ -617,7 +617,7 @@ def __init__(self, array: Any, key: ExplicitIndexer | None = None): self.key = key shape: _Shape = () - for size, k in zip(self.array.shape, self.key.tuple): + for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, slice): shape += (len(range(*k.indices(size))),) elif isinstance(k, np.ndarray): @@ -627,7 +627,7 @@ def __init__(self, array: Any, key: ExplicitIndexer | None = None): def _updated_key(self, new_key: ExplicitIndexer) -> BasicIndexer | OuterIndexer: iter_new_key = iter(expanded_indexer(new_key.tuple, self.ndim)) full_key = [] - for size, k in zip(self.array.shape, self.key.tuple): + for size, k in zip(self.array.shape, self.key.tuple, strict=True): if isinstance(k, integer_types): full_key.append(k) else: @@ -907,7 +907,7 @@ def _outer_to_vectorized_indexer( n_dim = len([k for k in key if not isinstance(k, integer_types)]) i_dim = 0 new_key = [] - for k, size in zip(key, shape): + for k, size in zip(key, shape, strict=True): if isinstance(k, integer_types): new_key.append(np.array(k).reshape((1,) * n_dim)) else: # np.ndarray or slice @@ -1127,10 +1127,10 @@ def _decompose_vectorized_indexer( # convert negative indices indexer_elems = [ np.where(k < 0, k + s, k) if isinstance(k, np.ndarray) else k - for k, s in zip(indexer.tuple, shape) + for k, s in zip(indexer.tuple, shape, strict=True) ] - for k, s in zip(indexer_elems, shape): + for k, s in zip(indexer_elems, shape, strict=True): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, @@ -1207,7 +1207,7 @@ def _decompose_outer_indexer( assert isinstance(indexer, OuterIndexer | BasicIndexer) if indexing_support == IndexingSupport.VECTORIZED: - for k, s in zip(indexer.tuple, shape): + for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, slice): # If it is a slice, then we will slice it as-is # (but make its step positive) in the backend, @@ -1222,7 +1222,7 @@ def _decompose_outer_indexer( # make indexer positive pos_indexer: list[np.ndarray | int | np.number] = [] - for k, s in zip(indexer.tuple, shape): + for k, s in zip(indexer.tuple, shape, strict=False): if isinstance(k, np.ndarray): pos_indexer.append(np.where(k < 0, k + s, k)) elif isinstance(k, integer_types) and k < 0: @@ -1244,7 +1244,7 @@ def _decompose_outer_indexer( ] array_index = np.argmax(np.array(gains)) if len(gains) > 0 else None - for i, (k, s) in enumerate(zip(indexer_elems, shape)): + for i, (k, s) in enumerate(zip(indexer_elems, shape, strict=False)): if isinstance(k, np.ndarray) and i != array_index: # np.ndarray key is converted to slice that covers the entire # entries of this key. @@ -1265,7 +1265,7 @@ def _decompose_outer_indexer( return (OuterIndexer(tuple(backend_indexer)), OuterIndexer(tuple(np_indexer))) if indexing_support == IndexingSupport.OUTER: - for k, s in zip(indexer_elems, shape): + for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, slice): # slice: convert positive step slice for backend bk_slice, np_slice = _decompose_slice(k, s) @@ -1287,7 +1287,7 @@ def _decompose_outer_indexer( # basic indexer assert indexing_support == IndexingSupport.BASIC - for k, s in zip(indexer_elems, shape): + for k, s in zip(indexer_elems, shape, strict=False): if isinstance(k, np.ndarray): # np.ndarray key is converted to slice that covers the entire # entries of this key. @@ -1315,7 +1315,7 @@ def _arrayize_vectorized_indexer( n_dim = arrays[0].ndim if len(arrays) > 0 else 0 i_dim = 0 new_key = [] - for v, size in zip(indexer.tuple, shape): + for v, size in zip(indexer.tuple, shape, strict=True): if isinstance(v, np.ndarray): new_key.append(np.reshape(v, v.shape + (1,) * len(slices))) else: # slice @@ -1333,7 +1333,7 @@ def _chunked_array_with_chunks_hint( if len(chunks) < array.ndim: raise ValueError("not enough chunks in hint") new_chunks = [] - for chunk, size in zip(chunks, array.shape): + for chunk, size in zip(chunks, array.shape, strict=False): new_chunks.append(chunk if size > 1 else (1,)) return chunkmanager.from_array(array, new_chunks) # type: ignore[arg-type] @@ -1399,7 +1399,7 @@ def create_mask( base_mask = _masked_result_drop_slice(key, data) slice_shape = tuple( np.arange(*k.indices(size)).size - for k, size in zip(key, shape) + for k, size in zip(key, shape, strict=False) if isinstance(k, slice) ) expanded_mask = base_mask[(Ellipsis,) + (np.newaxis,) * len(slice_shape)] @@ -1711,7 +1711,7 @@ def _convert_scalar(self, item): # a NumPy array. return to_0d_array(item) - def _prepare_key(self, key: tuple[Any, ...]) -> tuple[Any, ...]: + def _prepare_key(self, key: Any | tuple[Any, ...]) -> tuple[Any, ...]: if isinstance(key, tuple) and len(key) == 1: # unpack key so it can index a pandas.Index object (pandas.Index # objects don't like tuples) diff --git a/xarray/core/merge.py b/xarray/core/merge.py index b3b50ec5ef7..bd927a188df 100644 --- a/xarray/core/merge.py +++ b/xarray/core/merge.py @@ -66,7 +66,7 @@ def broadcast_dimension_size(variables: list[Variable]) -> dict[Hashable, int]: """ dims: dict[Hashable, int] = {} for var in variables: - for dim, size in zip(var.dims, var.shape): + for dim, size in zip(var.dims, var.shape, strict=True): if dim in dims and size != dims[dim]: raise ValueError(f"index {dim!r} not aligned") dims[dim] = size diff --git a/xarray/core/missing.py b/xarray/core/missing.py index 187a93d322f..55e754010da 100644 --- a/xarray/core/missing.py +++ b/xarray/core/missing.py @@ -624,7 +624,7 @@ def interp(var, indexes_coords, method: InterpOptions, **kwargs): # target dimensions dims = list(indexes_coords) - x, new_x = zip(*[indexes_coords[d] for d in dims]) + x, new_x = zip(*[indexes_coords[d] for d in dims], strict=True) destination = broadcast_variables(*new_x) # transpose to make the interpolated axis to the last position @@ -710,7 +710,9 @@ def interp_func(var, x, new_x, method: InterpOptions, kwargs): _, rechunked = chunkmanager.unify_chunks(*args) - args = tuple(elem for pair in zip(rechunked, args[1::2]) for elem in pair) + args = tuple( + elem for pair in zip(rechunked, args[1::2], strict=True) for elem in pair + ) new_x = rechunked[1 + (len(rechunked) - 1) // 2 :] @@ -798,11 +800,13 @@ def _chunked_aware_interpnd(var, *coords, interp_func, interp_kwargs, localize=T # _localize expect var to be a Variable var = Variable([f"dim_{dim}" for dim in range(len(var.shape))], var) - indexes_coords = {_x.dims[0]: (_x, _new_x) for _x, _new_x in zip(x, new_x)} + indexes_coords = { + _x.dims[0]: (_x, _new_x) for _x, _new_x in zip(x, new_x, strict=True) + } # simple speed up for the local interpolation var, indexes_coords = _localize(var, indexes_coords) - x, new_x = zip(*[indexes_coords[d] for d in indexes_coords]) + x, new_x = zip(*[indexes_coords[d] for d in indexes_coords], strict=True) # put var back as a ndarray var = var.data diff --git a/xarray/core/options.py b/xarray/core/options.py index f31413a2a1a..a00aa363014 100644 --- a/xarray/core/options.py +++ b/xarray/core/options.py @@ -1,7 +1,7 @@ from __future__ import annotations import warnings -from typing import TYPE_CHECKING, Literal, TypedDict +from typing import TYPE_CHECKING, Any, Literal, TypedDict from xarray.core.utils import FrozenDict @@ -92,7 +92,7 @@ class T_Options(TypedDict): _DISPLAY_OPTIONS = frozenset(["text", "html"]) -def _positive_integer(value: int) -> bool: +def _positive_integer(value: Any) -> bool: return isinstance(value, int) and value > 0 diff --git a/xarray/core/parallel.py b/xarray/core/parallel.py index 9c68ee3a1c5..12be026e539 100644 --- a/xarray/core/parallel.py +++ b/xarray/core/parallel.py @@ -29,7 +29,7 @@ class ExpectedDict(TypedDict): def unzip(iterable): - return zip(*iterable) + return zip(*iterable, strict=True) def assert_chunks_compatible(a: Dataset, b: Dataset): @@ -345,7 +345,7 @@ def _wrapper( converted_args = [ dataset_to_dataarray(arg) if is_array else arg - for is_array, arg in zip(arg_is_array, args) + for is_array, arg in zip(arg_is_array, args, strict=True) ] result = func(*converted_args, **kwargs) @@ -440,7 +440,10 @@ def _wrapper( merged_coordinates = merge([arg.coords for arg in aligned]).coords _, npargs = unzip( - sorted(list(zip(xarray_indices, xarray_objs)) + others, key=lambda x: x[0]) + sorted( + list(zip(xarray_indices, xarray_objs, strict=True)) + others, + key=lambda x: x[0], + ) ) # check that chunk sizes are compatible @@ -534,7 +537,7 @@ def _wrapper( # iterate over all possible chunk combinations for chunk_tuple in itertools.product(*ichunk.values()): # mapping from dimension name to chunk index - chunk_index = dict(zip(ichunk.keys(), chunk_tuple)) + chunk_index = dict(zip(ichunk.keys(), chunk_tuple, strict=True)) blocked_args = [ ( @@ -544,7 +547,7 @@ def _wrapper( if isxr else arg ) - for isxr, arg in zip(is_xarray, npargs) + for isxr, arg in zip(is_xarray, npargs, strict=True) ] # raise nice error messages in _wrapper diff --git a/xarray/core/rolling.py b/xarray/core/rolling.py index f7dd1210919..072012e5f51 100644 --- a/xarray/core/rolling.py +++ b/xarray/core/rolling.py @@ -133,7 +133,7 @@ def __repr__(self) -> str: attrs = [ "{k}->{v}{c}".format(k=k, v=w, c="(center)" if c else "") - for k, w, c in zip(self.dim, self.window, self.center) + for k, w, c in zip(self.dim, self.window, self.center, strict=True) ] return "{klass} [{attrs}]".format( klass=self.__class__.__name__, attrs=",".join(attrs) @@ -303,7 +303,7 @@ def __iter__(self) -> Iterator[tuple[DataArray, DataArray]]: starts = stops - window0 starts[: window0 - offset] = 0 - for label, start, stop in zip(self.window_labels, starts, stops): + for label, start, stop in zip(self.window_labels, starts, stops, strict=True): window = self.obj.isel({dim0: slice(start, stop)}) counts = window.count(dim=[dim0]) @@ -424,7 +424,9 @@ def _construct( attrs=attrs, name=obj.name, ) - return result.isel({d: slice(None, None, s) for d, s in zip(self.dim, strides)}) + return result.isel( + {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} + ) def reduce( self, func: Callable, keep_attrs: bool | None = None, **kwargs: Any @@ -520,7 +522,7 @@ def _counts(self, keep_attrs: bool | None) -> DataArray: counts = ( self.obj.notnull(keep_attrs=keep_attrs) .rolling( - {d: w for d, w in zip(self.dim, self.window)}, + {d: w for d, w in zip(self.dim, self.window, strict=True)}, center={d: self.center[i] for i, d in enumerate(self.dim)}, ) .construct(rolling_dim, fill_value=False, keep_attrs=keep_attrs) @@ -887,7 +889,7 @@ def construct( # Need to stride coords as well. TODO: is there a better way? coords = self.obj.isel( - {d: slice(None, None, s) for d, s in zip(self.dim, strides)} + {d: slice(None, None, s) for d, s in zip(self.dim, strides, strict=True)} ).coords attrs = self.obj.attrs if keep_attrs else {} diff --git a/xarray/core/utils.py b/xarray/core/utils.py index 8c2ac1de68d..68d17fc3614 100644 --- a/xarray/core/utils.py +++ b/xarray/core/utils.py @@ -186,7 +186,7 @@ def equivalent(first: T, second: T) -> bool: def list_equiv(first: Sequence[T], second: Sequence[T]) -> bool: if len(first) != len(second): return False - for f, s in zip(first, second): + for f, s in zip(first, second, strict=True): if not equivalent(f, s): return False return True @@ -992,7 +992,7 @@ def __get__(self, obj: None | object, cls) -> type[_Accessor] | _Accessor: if obj is None: return self._accessor - return self._accessor(obj) # type: ignore # assume it is a valid accessor! + return self._accessor(obj) # type: ignore[call-arg] # assume it is a valid accessor! # Singleton type, as per https://github.com/python/typing/pull/240 diff --git a/xarray/core/variable.py b/xarray/core/variable.py index 1f2911a9930..d84a03c3677 100644 --- a/xarray/core/variable.py +++ b/xarray/core/variable.py @@ -319,7 +319,7 @@ def as_compatible_data( # validate whether the data is valid data types. data = np.asarray(data) - if isinstance(data, np.ndarray) and data.dtype.kind in "OMm": + if data.dtype.kind in "OMm": data = _possibly_convert_objects(data) return _maybe_wrap_data(data) @@ -647,7 +647,7 @@ def _broadcast_indexes(self, key): # If all key is 1-dimensional and there are no duplicate labels, # key can be mapped as an OuterIndexer. dims = [] - for k, d in zip(key, self.dims): + for k, d in zip(key, self.dims, strict=True): if isinstance(k, Variable): if len(k.dims) > 1: return self._broadcast_indexes_vectorized(key) @@ -661,13 +661,15 @@ def _broadcast_indexes(self, key): def _broadcast_indexes_basic(self, key): dims = tuple( - dim for k, dim in zip(key, self.dims) if not isinstance(k, integer_types) + dim + for k, dim in zip(key, self.dims, strict=True) + if not isinstance(k, integer_types) ) return dims, BasicIndexer(key), None def _validate_indexers(self, key): """Make sanity checks""" - for dim, k in zip(self.dims, key): + for dim, k in zip(self.dims, key, strict=True): if not isinstance(k, BASIC_INDEXING_TYPES): if not isinstance(k, Variable): if not is_duck_array(k): @@ -706,7 +708,7 @@ def _broadcast_indexes_outer(self, key): # drop dim if k is integer or if k is a 0d dask array dims = tuple( k.dims[0] if isinstance(k, Variable) else dim - for k, dim in zip(key, self.dims) + for k, dim in zip(key, self.dims, strict=True) if (not isinstance(k, integer_types) and not is_0d_dask_array(k)) ) @@ -729,7 +731,7 @@ def _broadcast_indexes_outer(self, key): def _broadcast_indexes_vectorized(self, key): variables = [] out_dims_set = OrderedSet() - for dim, value in zip(self.dims, key): + for dim, value in zip(self.dims, key, strict=True): if isinstance(value, slice): out_dims_set.add(dim) else: @@ -751,7 +753,7 @@ def _broadcast_indexes_vectorized(self, key): variable_dims.update(variable.dims) slices = [] - for i, (dim, value) in enumerate(zip(self.dims, key)): + for i, (dim, value) in enumerate(zip(self.dims, key, strict=True)): if isinstance(value, slice): if dim in variable_dims: # We only convert slice objects to variables if they share @@ -1134,7 +1136,7 @@ def _pad_options_dim_to_index( if fill_with_shape: return [ (n, n) if d not in pad_option else pad_option[d] - for d, n in zip(self.dims, self.data.shape) + for d, n in zip(self.dims, self.data.shape, strict=True) ] return [(0, 0) if d not in pad_option else pad_option[d] for d in self.dims] @@ -1377,7 +1379,7 @@ def set_dims(self, dim, shape=None): # writeable if possible expanded_data = self.data elif shape is not None: - dims_map = dict(zip(dim, shape)) + dims_map = dict(zip(dim, shape, strict=True)) tmp_shape = tuple(dims_map[d] for d in expanded_dims) expanded_data = duck_array_ops.broadcast_to(self.data, tmp_shape) else: @@ -1527,13 +1529,13 @@ def _unstack_once( # unstacking a dense multitindexed array to a sparse array from sparse import COO - codes = zip(*index.codes) + codes = zip(*index.codes, strict=True) if reordered.ndim == 1: indexes = codes else: sizes = itertools.product(*[range(s) for s in reordered.shape[:-1]]) tuple_indexes = itertools.product(sizes, codes) - indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore + indexes = map(lambda x: list(itertools.chain(*x)), tuple_indexes) # type: ignore[assignment] data = COO( coords=np.array(list(indexes)).T, @@ -2061,7 +2063,9 @@ def rolling_window( if utils.is_scalar(dim): for name, arg in zip( - ["window", "window_dim", "center"], [window, window_dim, center] + ["window", "window_dim", "center"], + [window, window_dim, center], + strict=True, ): if not utils.is_scalar(arg): raise ValueError( @@ -2089,7 +2093,7 @@ def rolling_window( ) pads = {} - for d, win, cent in zip(dim, window, center): + for d, win, cent in zip(dim, window, center, strict=True): if cent: start = win // 2 # 10 -> 5, 9 -> 4 end = win - 1 - start @@ -2399,7 +2403,7 @@ def _unravel_argminmax( result = { d: Variable(dims=result_dims, data=i) - for d, i in zip(dim, result_unravelled_indices) + for d, i in zip(dim, result_unravelled_indices, strict=True) } if keep_attrs is None: @@ -2870,7 +2874,7 @@ def _unified_dims(variables): var_dims = var.dims _raise_if_any_duplicate_dimensions(var_dims, err_context="Broadcasting") - for d, s in zip(var_dims, var.shape): + for d, s in zip(var_dims, var.shape, strict=True): if d not in all_dims: all_dims[d] = s elif all_dims[d] != s: @@ -2998,7 +3002,7 @@ def calculate_dimensions(variables: Mapping[Any, Variable]) -> dict[Hashable, in last_used = {} scalar_vars = {k for k, v in variables.items() if not v.dims} for k, var in variables.items(): - for dim, size in zip(var.dims, var.shape): + for dim, size in zip(var.dims, var.shape, strict=True): if dim in scalar_vars: raise ValueError( f"dimension {dim!r} already exists as a scalar variable" diff --git a/xarray/datatree_/docs/source/conf.py b/xarray/datatree_/docs/source/conf.py index 430dbb5bf6d..c32f2b126ed 100644 --- a/xarray/datatree_/docs/source/conf.py +++ b/xarray/datatree_/docs/source/conf.py @@ -17,9 +17,9 @@ import os import sys -import sphinx_autosummary_accessors # type: ignore +import sphinx_autosummary_accessors # type: ignore[import-not-found] -import datatree # type: ignore +import datatree # type: ignore[import-not-found] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the diff --git a/xarray/groupers.py b/xarray/groupers.py index 9c24a96077f..e4cb884e6de 100644 --- a/xarray/groupers.py +++ b/xarray/groupers.py @@ -443,7 +443,7 @@ def factorize(self, group: T_Group) -> EncodedGroups: full_index, first_items, codes_ = self._get_index_and_items() sbins = first_items.values.astype(np.int64) group_indices: GroupIndices = tuple( - [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:])] + [slice(i, j) for i, j in zip(sbins[:-1], sbins[1:], strict=True)] + [slice(sbins[-1], None)] ) diff --git a/xarray/namedarray/core.py b/xarray/namedarray/core.py index fb29d8c0b24..0d1a50a8d3c 100644 --- a/xarray/namedarray/core.py +++ b/xarray/namedarray/core.py @@ -737,14 +737,14 @@ def chunksizes( """ data = self._data if isinstance(data, _chunkedarray): - return dict(zip(self.dims, data.chunks)) + return dict(zip(self.dims, data.chunks, strict=True)) else: return {} @property def sizes(self) -> dict[_Dim, _IntOrUnknown]: """Ordered mapping from dimension names to lengths.""" - return dict(zip(self.dims, self.shape)) + return dict(zip(self.dims, self.shape, strict=True)) def chunk( self, @@ -948,7 +948,7 @@ def _nonzero(self: T_NamedArrayInteger) -> tuple[T_NamedArrayInteger, ...]: _attrs = self.attrs return tuple( cast("T_NamedArrayInteger", self._new((dim,), nz, _attrs)) - for nz, dim in zip(nonzeros, self.dims) + for nz, dim in zip(nonzeros, self.dims, strict=True) ) def __repr__(self) -> str: @@ -1038,8 +1038,8 @@ def permute_dims( # or dims are in same order return self.copy(deep=False) - axes_result = self.get_axis_num(dims) - axes = (axes_result,) if isinstance(axes_result, int) else axes_result + axes = self.get_axis_num(dims) + assert isinstance(axes, tuple) return permute_dims(self, axes) diff --git a/xarray/namedarray/utils.py b/xarray/namedarray/utils.py index e3a4f6ba1ad..606e72acd0e 100644 --- a/xarray/namedarray/utils.py +++ b/xarray/namedarray/utils.py @@ -20,8 +20,8 @@ from dask.array.core import Array as DaskArray from dask.typing import DaskCollection except ImportError: - DaskArray = NDArray # type: ignore - DaskCollection: Any = NDArray # type: ignore + DaskArray = NDArray # type: ignore[assignment, misc] + DaskCollection: Any = NDArray # type: ignore[no-redef] from xarray.namedarray._typing import _Dim, duckarray diff --git a/xarray/plot/dataarray_plot.py b/xarray/plot/dataarray_plot.py index ae10c3e9920..b759f0bb944 100644 --- a/xarray/plot/dataarray_plot.py +++ b/xarray/plot/dataarray_plot.py @@ -210,7 +210,7 @@ def _prepare_plot1d_data( plts.update( {k: darray.coords[v] for k, v in coords_to_plot.items() if v is not None} ) - plts = dict(zip(plts.keys(), broadcast(*(plts.values())))) + plts = dict(zip(plts.keys(), broadcast(*(plts.values())), strict=True)) return plts @@ -1089,7 +1089,9 @@ def _add_labels( """Set x, y, z labels.""" add_labels = [add_labels] * 3 if isinstance(add_labels, bool) else add_labels axes: tuple[Literal["x", "y", "z"], ...] = ("x", "y", "z") - for axis, add_label, darray, suffix in zip(axes, add_labels, darrays, suffixes): + for axis, add_label, darray, suffix in zip( + axes, add_labels, darrays, suffixes, strict=True + ): if darray is None: continue diff --git a/xarray/plot/facetgrid.py b/xarray/plot/facetgrid.py index 4c0d9b96a03..4e43ad2826c 100644 --- a/xarray/plot/facetgrid.py +++ b/xarray/plot/facetgrid.py @@ -362,7 +362,7 @@ def map_dataarray( rgb=kwargs.get("rgb", None), ) - for d, ax in zip(self.name_dicts.flat, self.axs.flat): + for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] @@ -505,7 +505,10 @@ def map_plot1d( # Plot the data for each subplot: for add_lbls, d, ax in zip( - add_labels_.reshape((self.axs.size, -1)), name_dicts.flat, self.axs.flat + add_labels_.reshape((self.axs.size, -1)), + name_dicts.flat, + self.axs.flat, + strict=True, ): func_kwargs["add_labels"] = add_lbls # None is the sentinel value @@ -571,7 +574,7 @@ def map_dataarray_line( ) -> T_FacetGrid: from xarray.plot.dataarray_plot import _infer_line_data - for d, ax in zip(self.name_dicts.flat, self.axs.flat): + for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] @@ -638,7 +641,7 @@ def map_dataset( raise ValueError("Please provide scale.") # TODO: come up with an algorithm for reasonable scale choice - for d, ax in zip(self.name_dicts.flat, self.axs.flat): + for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # None is the sentinel value if d is not None: subset = self.data.loc[d] @@ -672,7 +675,7 @@ def _finalize_grid(self, *axlabels: Hashable) -> None: self.set_titles() self.fig.tight_layout() - for ax, namedict in zip(self.axs.flat, self.name_dicts.flat): + for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is None: ax.set_visible(False) @@ -824,7 +827,7 @@ def _set_lims( # Set limits: for ax in self.axs.flat: for (axis, data_limit), parameter_limit in zip( - lims_largest.items(), (x, y, z) + lims_largest.items(), (x, y, z), strict=True ): set_lim = getattr(ax, f"set_{axis}lim", None) if set_lim: @@ -834,7 +837,7 @@ def set_axis_labels(self, *axlabels: Hashable) -> None: """Set axis labels on the left column and bottom row of the grid.""" from xarray.core.dataarray import DataArray - for var, axis in zip(axlabels, ["x", "y", "z"]): + for var, axis in zip(axlabels, ["x", "y", "z"], strict=False): if var is not None: if isinstance(var, DataArray): getattr(self, f"set_{axis}labels")(label_from_attrs(var)) @@ -893,7 +896,7 @@ def set_titles( nicetitle = functools.partial(_nicetitle, maxchar=maxchar, template=template) if self._single_group: - for d, ax in zip(self.name_dicts.flat, self.axs.flat): + for d, ax in zip(self.name_dicts.flat, self.axs.flat, strict=True): # Only label the ones with data if d is not None: coord, value = list(d.items()).pop() @@ -902,7 +905,7 @@ def set_titles( else: # The row titles on the right edge of the grid for index, (ax, row_name, handle) in enumerate( - zip(self.axs[:, -1], self.row_names, self.row_labels) + zip(self.axs[:, -1], self.row_names, self.row_labels, strict=True) ): title = nicetitle(coord=self._row_var, value=row_name, maxchar=maxchar) if not handle: @@ -921,7 +924,7 @@ def set_titles( # The column titles on the top row for index, (ax, col_name, handle) in enumerate( - zip(self.axs[0, :], self.col_names, self.col_labels) + zip(self.axs[0, :], self.col_names, self.col_labels, strict=True) ): title = nicetitle(coord=self._col_var, value=col_name, maxchar=maxchar) if not handle: @@ -992,7 +995,7 @@ def map( """ import matplotlib.pyplot as plt - for ax, namedict in zip(self.axs.flat, self.name_dicts.flat): + for ax, namedict in zip(self.axs.flat, self.name_dicts.flat, strict=True): if namedict is not None: data = self.data.loc[namedict] plt.sca(ax) diff --git a/xarray/plot/utils.py b/xarray/plot/utils.py index 306ad1327c4..22d447316ca 100644 --- a/xarray/plot/utils.py +++ b/xarray/plot/utils.py @@ -45,7 +45,7 @@ try: import matplotlib.pyplot as plt except ImportError: - plt: Any = None # type: ignore + plt: Any = None # type: ignore[no-redef] ROBUST_PERCENTILE = 2.0 @@ -577,8 +577,12 @@ def _interval_to_double_bound_points( xarray1 = np.array([x.left for x in xarray]) xarray2 = np.array([x.right for x in xarray]) - xarray_out = np.array(list(itertools.chain.from_iterable(zip(xarray1, xarray2)))) - yarray_out = np.array(list(itertools.chain.from_iterable(zip(yarray, yarray)))) + xarray_out = np.array( + list(itertools.chain.from_iterable(zip(xarray1, xarray2, strict=True))) + ) + yarray_out = np.array( + list(itertools.chain.from_iterable(zip(yarray, yarray, strict=True))) + ) return xarray_out, yarray_out @@ -1148,7 +1152,7 @@ def _get_color_and_size(value): kw = dict(markeredgewidth=self.get_linewidths()[0], alpha=self.get_alpha()) kw.update(kwargs) - for val, lab in zip(values, label_values): + for val, lab in zip(values, label_values, strict=True): color, size = _get_color_and_size(val) if isinstance(self, mpl.collections.PathCollection): @@ -1347,7 +1351,7 @@ def _parse_size( widths = np.asarray(min_width + scl * (max_width - min_width)) if scl.mask.any(): widths[scl.mask] = 0 - sizes = dict(zip(levels, widths)) + sizes = dict(zip(levels, widths, strict=True)) return pd.Series(sizes) @@ -1606,7 +1610,7 @@ def _lookup(self) -> pd.Series: if self._values_unique is None: raise ValueError("self.data can't be None.") - return pd.Series(dict(zip(self._values_unique, self._data_unique))) + return pd.Series(dict(zip(self._values_unique, self._data_unique, strict=True))) def _lookup_arr(self, x) -> np.ndarray: # Use reindex to be less sensitive to float errors. reindex only @@ -1818,7 +1822,9 @@ def _guess_coords_to_plot( # one of related mpl kwargs has been used. This should have similar behaviour as # * plt.plot(x, y) -> Multiple lines with different colors if y is 2d. # * plt.plot(x, y, color="red") -> Multiple red lines if y is 2d. - for k, dim, ign_kws in zip(default_guess, available_coords, ignore_guess_kwargs): + for k, dim, ign_kws in zip( + default_guess, available_coords, ignore_guess_kwargs, strict=False + ): if coords_to_plot.get(k, None) is None and all( kwargs.get(ign_kw, None) is None for ign_kw in ign_kws ): diff --git a/xarray/tests/test_array_api.py b/xarray/tests/test_array_api.py index 03c77e2365e..3ebb67cf1f3 100644 --- a/xarray/tests/test_array_api.py +++ b/xarray/tests/test_array_api.py @@ -68,7 +68,7 @@ def test_broadcast(arrays: tuple[xr.DataArray, xr.DataArray]) -> None: expected = xr.broadcast(np_arr, np_arr2) actual = xr.broadcast(xp_arr, xp_arr2) assert len(actual) == len(expected) - for a, e in zip(actual, expected): + for a, e in zip(actual, expected, strict=True): assert isinstance(a.data, Array) assert_equal(a, e) diff --git a/xarray/tests/test_assertions.py b/xarray/tests/test_assertions.py index 20b5e163662..2f5a8739b28 100644 --- a/xarray/tests/test_assertions.py +++ b/xarray/tests/test_assertions.py @@ -11,7 +11,7 @@ try: from dask.array import from_array as dask_from_array except ImportError: - dask_from_array = lambda x: x # type: ignore + dask_from_array = lambda x: x # type: ignore[assignment, misc] try: import pint diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 17b4d2c5ba7..13258fcf6ea 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -1223,7 +1223,9 @@ def test_invalid_dataarray_names_raise(self) -> None: ve = (ValueError, "string must be length 1 or") data = np.random.random((2, 2)) da = xr.DataArray(data) - for name, (error, msg) in zip([0, (4, 5), True, ""], [te, te, te, ve]): + for name, (error, msg) in zip( + [0, (4, 5), True, ""], [te, te, te, ve], strict=True + ): ds = Dataset({name: da}) with pytest.raises(error) as excinfo: with self.roundtrip(ds): @@ -1708,7 +1710,7 @@ def test_base_chunking_uses_disk_chunk_sizes(self) -> None: open_kwargs={"chunks": {}}, ) as ds: for chunksizes, expected in zip( - ds["image"].data.chunks, (1, y_chunksize, x_chunksize) + ds["image"].data.chunks, (1, y_chunksize, x_chunksize), strict=True ): assert all(np.asanyarray(chunksizes) == expected) @@ -5944,7 +5946,7 @@ def test_zarr_region_index_write(self, tmp_path): ds.to_zarr(tmp_path / "test.zarr") region: Mapping[str, slice] | Literal["auto"] - for region in [region_slice, "auto"]: # type: ignore + for region in [region_slice, "auto"]: # type: ignore[assignment] with patch.object( ZarrStore, "set_variables", diff --git a/xarray/tests/test_backends_api.py b/xarray/tests/test_backends_api.py index 592065f34de..3a4b1d76287 100644 --- a/xarray/tests/test_backends_api.py +++ b/xarray/tests/test_backends_api.py @@ -86,7 +86,7 @@ def explicit_chunks(chunks, shape): if isinstance(chunk, Number) else chunk ) - for chunk, size in zip(chunks, shape) + for chunk, size in zip(chunks, shape, strict=True) ) @@ -104,7 +104,9 @@ def create_dataset(self, shape, pref_chunks): self.var_name: xr.Variable( dims, np.empty(shape, dtype=np.dtype("V1")), - encoding={"preferred_chunks": dict(zip(dims, pref_chunks))}, + encoding={ + "preferred_chunks": dict(zip(dims, pref_chunks, strict=True)) + }, ) } ) @@ -164,7 +166,7 @@ def test_split_chunks(self, shape, pref_chunks, req_chunks): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, - chunks=dict(zip(initial[self.var_name].dims, req_chunks)), + chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) @@ -196,6 +198,6 @@ def test_join_chunks(self, shape, pref_chunks, req_chunks): final = xr.open_dataset( initial, engine=PassThroughBackendEntrypoint, - chunks=dict(zip(initial[self.var_name].dims, req_chunks)), + chunks=dict(zip(initial[self.var_name].dims, req_chunks, strict=True)), ) self.check_dataset(initial, final, explicit_chunks(req_chunks, shape)) diff --git a/xarray/tests/test_backends_datatree.py b/xarray/tests/test_backends_datatree.py index 604f27317b9..e84c77e54ed 100644 --- a/xarray/tests/test_backends_datatree.py +++ b/xarray/tests/test_backends_datatree.py @@ -64,7 +64,7 @@ def test_netcdf_encoding(self, tmpdir, simple_datatree): assert roundtrip_dt["/set2/a"].encoding["zlib"] == comp["zlib"] assert roundtrip_dt["/set2/a"].encoding["complevel"] == comp["complevel"] - enc["/not/a/group"] = {"foo": "bar"} # type: ignore + enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match="unexpected encoding group.*"): original_dt.to_netcdf(filepath, encoding=enc, engine=self.engine) @@ -253,7 +253,7 @@ def test_zarr_encoding(self, tmpdir, simple_datatree): print(roundtrip_dt["/set2/a"].encoding) assert roundtrip_dt["/set2/a"].encoding["compressor"] == comp["compressor"] - enc["/not/a/group"] = {"foo": "bar"} # type: ignore + enc["/not/a/group"] = {"foo": "bar"} # type: ignore[dict-item] with pytest.raises(ValueError, match="unexpected encoding group.*"): original_dt.to_zarr(filepath, encoding=enc, engine="zarr") diff --git a/xarray/tests/test_backends_file_manager.py b/xarray/tests/test_backends_file_manager.py index cede3e66fcf..ab1ac4a06d9 100644 --- a/xarray/tests/test_backends_file_manager.py +++ b/xarray/tests/test_backends_file_manager.py @@ -53,7 +53,7 @@ def test_file_manager_autoclose(warn_for_unclosed_files) -> None: if warn_for_unclosed_files: ctx = pytest.warns(RuntimeWarning) else: - ctx = assert_no_warnings() # type: ignore + ctx = assert_no_warnings() # type: ignore[assignment] with set_options(warn_for_unclosed_files=warn_for_unclosed_files): with ctx: diff --git a/xarray/tests/test_backends_lru_cache.py b/xarray/tests/test_backends_lru_cache.py index 5735e0327a0..fead97b7d57 100644 --- a/xarray/tests/test_backends_lru_cache.py +++ b/xarray/tests/test_backends_lru_cache.py @@ -33,7 +33,7 @@ def test_trivial() -> None: def test_invalid() -> None: with pytest.raises(TypeError): - LRUCache(maxsize=None) # type: ignore + LRUCache(maxsize=None) # type: ignore[arg-type] with pytest.raises(ValueError): LRUCache(maxsize=-1) diff --git a/xarray/tests/test_cftime_offsets.py b/xarray/tests/test_cftime_offsets.py index c4f2f51bd33..11e56e2adad 100644 --- a/xarray/tests/test_cftime_offsets.py +++ b/xarray/tests/test_cftime_offsets.py @@ -425,7 +425,9 @@ def test_neq(a, b): ] -@pytest.mark.parametrize(("a", "b"), zip(_EQ_TESTS_B, _EQ_TESTS_B_COPY), ids=_id_func) +@pytest.mark.parametrize( + ("a", "b"), zip(_EQ_TESTS_B, _EQ_TESTS_B_COPY, strict=True), ids=_id_func +) def test_eq(a, b): assert a == b @@ -572,7 +574,9 @@ def test_sub_error(offset, calendar): offset - initial -@pytest.mark.parametrize(("a", "b"), zip(_EQ_TESTS_A, _EQ_TESTS_B), ids=_id_func) +@pytest.mark.parametrize( + ("a", "b"), zip(_EQ_TESTS_A, _EQ_TESTS_B, strict=True), ids=_id_func +) def test_minus_offset(a, b): result = b - a expected = a @@ -581,7 +585,7 @@ def test_minus_offset(a, b): @pytest.mark.parametrize( ("a", "b"), - list(zip(np.roll(_EQ_TESTS_A, 1), _EQ_TESTS_B)) # type: ignore[arg-type] + list(zip(np.roll(_EQ_TESTS_A, 1), _EQ_TESTS_B, strict=True)) # type: ignore[arg-type] + [(YearEnd(month=1), YearEnd(month=2))], ids=_id_func, ) diff --git a/xarray/tests/test_combine.py b/xarray/tests/test_combine.py index 1c48dca825d..41ad75b0fea 100644 --- a/xarray/tests/test_combine.py +++ b/xarray/tests/test_combine.py @@ -249,7 +249,10 @@ def create_combined_ids(): def _create_combined_ids(shape): tile_ids = _create_tile_ids(shape) nums = range(len(tile_ids)) - return {tile_id: create_test_data(num) for tile_id, num in zip(tile_ids, nums)} + return { + tile_id: create_test_data(num) + for tile_id, num in zip(tile_ids, nums, strict=True) + } def _create_tile_ids(shape): diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index ab3108e7056..3a50a3f1724 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -578,7 +578,7 @@ def func(*x): variables = [xr.Variable("x", a) for a in arrays] data_arrays = [ xr.DataArray(v, {"x": c, "y": ("x", range(len(c)))}) - for v, c in zip(variables, [["a"], ["b", "c"]]) + for v, c in zip(variables, [["a"], ["b", "c"]], strict=True) ] datasets = [xr.Dataset({"data": data_array}) for data_array in data_arrays] @@ -1190,7 +1190,7 @@ def test_apply_dask() -> None: # unknown setting for dask array handling with pytest.raises(ValueError): - apply_ufunc(identity, array, dask="unknown") # type: ignore + apply_ufunc(identity, array, dask="unknown") # type: ignore[arg-type] def dask_safe_identity(x): return apply_ufunc(identity, x, dask="allowed") diff --git a/xarray/tests/test_concat.py b/xarray/tests/test_concat.py index e0dc105c925..7f7f14c8f16 100644 --- a/xarray/tests/test_concat.py +++ b/xarray/tests/test_concat.py @@ -276,7 +276,10 @@ def test_concat_multiple_datasets_missing_vars(include_day: bool) -> None: expected[name][i : i + 1, ...] = np.nan # set up the test data - datasets = [ds.drop_vars(varname) for ds, varname in zip(datasets, vars_to_drop)] + datasets = [ + ds.drop_vars(varname) + for ds, varname in zip(datasets, vars_to_drop, strict=True) + ] actual = concat(datasets, dim="day") @@ -1326,12 +1329,12 @@ def test_concat_preserve_coordinate_order() -> None: actual = concat([ds1, ds2], dim="time") # check dimension order - for act, exp in zip(actual.dims, expected.dims): + for act, exp in zip(actual.dims, expected.dims, strict=True): assert act == exp assert actual.sizes[act] == expected.sizes[exp] # check coordinate order - for act, exp in zip(actual.coords, expected.coords): + for act, exp in zip(actual.coords, expected.coords, strict=True): assert act == exp assert_identical(actual.coords[act], expected.coords[exp]) @@ -1345,12 +1348,12 @@ def test_concat_typing_check() -> None: TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): - concat([ds, da], dim="foo") # type: ignore + concat([ds, da], dim="foo") # type: ignore[type-var] with pytest.raises( TypeError, match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's", ): - concat([da, ds], dim="foo") # type: ignore + concat([da, ds], dim="foo") # type: ignore[type-var] def test_concat_not_all_indexes() -> None: diff --git a/xarray/tests/test_coordinates.py b/xarray/tests/test_coordinates.py index f88e554d333..b167332d38b 100644 --- a/xarray/tests/test_coordinates.py +++ b/xarray/tests/test_coordinates.py @@ -64,7 +64,7 @@ def test_init_index_error(self) -> None: Coordinates(indexes={"x": idx}) with pytest.raises(TypeError, match=".* is not an `xarray.indexes.Index`"): - Coordinates(coords={"x": ("x", [1, 2, 3])}, indexes={"x": "not_an_xarray_index"}) # type: ignore + Coordinates(coords={"x": ("x", [1, 2, 3])}, indexes={"x": "not_an_xarray_index"}) # type: ignore[dict-item] def test_init_dim_sizes_conflict(self) -> None: with pytest.raises(ValueError): diff --git a/xarray/tests/test_dask.py b/xarray/tests/test_dask.py index 4cb0e3fc593..062f0525593 100644 --- a/xarray/tests/test_dask.py +++ b/xarray/tests/test_dask.py @@ -104,7 +104,8 @@ def test_chunk(self): self.assertLazyAndIdentical(self.eager_var, rechunked) expected_chunksizes = { - dim: chunks for dim, chunks in zip(self.lazy_var.dims, expected) + dim: chunks + for dim, chunks in zip(self.lazy_var.dims, expected, strict=True) } assert rechunked.chunksizes == expected_chunksizes @@ -354,7 +355,8 @@ def test_chunk(self) -> None: self.assertLazyAndIdentical(self.eager_array, rechunked) expected_chunksizes = { - dim: chunks for dim, chunks in zip(self.lazy_array.dims, expected) + dim: chunks + for dim, chunks in zip(self.lazy_array.dims, expected, strict=True) } assert rechunked.chunksizes == expected_chunksizes @@ -362,7 +364,8 @@ def test_chunk(self) -> None: lazy_dataset = self.lazy_array.to_dataset() eager_dataset = self.eager_array.to_dataset() expected_chunksizes = { - dim: chunks for dim, chunks in zip(lazy_dataset.dims, expected) + dim: chunks + for dim, chunks in zip(lazy_dataset.dims, expected, strict=True) } rechunked = lazy_dataset.chunk(chunks) @@ -737,7 +740,7 @@ def test_dataarray_getattr(self): nonindex_coord = build_dask_array("coord") a = DataArray(data, dims=["x"], coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): - getattr(a, "NOTEXIST") + a.NOTEXIST assert kernel_call_count == 0 def test_dataset_getattr(self): @@ -747,7 +750,7 @@ def test_dataset_getattr(self): nonindex_coord = build_dask_array("coord") ds = Dataset(data_vars={"a": ("x", data)}, coords={"y": ("x", nonindex_coord)}) with suppress(AttributeError): - getattr(ds, "NOTEXIST") + ds.NOTEXIST assert kernel_call_count == 0 def test_values(self): diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index 3d1bb065193..49df5dcde2d 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -283,7 +283,7 @@ def test_sizes(self) -> None: assert array.sizes == {"x": 3, "y": 4} assert tuple(array.sizes) == array.dims with pytest.raises(TypeError): - array.sizes["foo"] = 5 # type: ignore + array.sizes["foo"] = 5 # type: ignore[index] def test_encoding(self) -> None: expected = {"foo": "bar"} @@ -575,8 +575,8 @@ def test_equals_and_identical(self) -> None: def test_equals_failures(self) -> None: orig = DataArray(np.arange(5.0), {"a": 42}, dims="x") assert not orig.equals(np.arange(5)) # type: ignore[arg-type] - assert not orig.identical(123) # type: ignore - assert not orig.broadcast_equals({1: 2}) # type: ignore + assert not orig.identical(123) # type: ignore[arg-type] + assert not orig.broadcast_equals({1: 2}) # type: ignore[arg-type] def test_broadcast_equals(self) -> None: a = DataArray([0, 0], {"y": 0}, dims="x") @@ -889,7 +889,7 @@ def test_chunk(self) -> None: first_dask_name = blocked.data.name with pytest.warns(DeprecationWarning): - blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore + blocked = unblocked.chunk(chunks=((2, 1), (2, 2))) # type: ignore[arg-type] assert blocked.chunks == ((2, 1), (2, 2)) assert blocked.data.name != first_dask_name @@ -2226,7 +2226,7 @@ def from_variables(cls, variables, options): indexed = da.set_xindex("foo", IndexWithOptions, opt=1) assert "foo" in indexed.xindexes - assert getattr(indexed.xindexes["foo"], "opt") == 1 + assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_dataset_getitem(self) -> None: dv = self.ds["foo"] @@ -2707,7 +2707,7 @@ def test_drop_index_labels(self) -> None: assert_identical(actual, expected) with pytest.warns(DeprecationWarning): - arr.drop([0, 1, 3], dim="y", errors="ignore") # type: ignore + arr.drop([0, 1, 3], dim="y", errors="ignore") # type: ignore[arg-type] def test_drop_index_positions(self) -> None: arr = DataArray(np.random.randn(2, 3), dims=["x", "y"]) @@ -2913,7 +2913,8 @@ def test_reduce_out(self) -> None: @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize( - "axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]]) + "axis, dim", + zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True), ) def test_quantile(self, q, axis, dim, skipna, compute_backend) -> None: va = self.va.copy(deep=True) @@ -4055,7 +4056,7 @@ def test_dot(self) -> None: with pytest.raises(NotImplementedError): da.dot(dm3.to_dataset(name="dm")) with pytest.raises(TypeError): - da.dot(dm3.values) # type: ignore + da.dot(dm3.values) # type: ignore[type-var] def test_dot_align_coords(self) -> None: # GH 3694 @@ -4520,7 +4521,7 @@ def test_query( # test error handling with pytest.raises(ValueError): - aa.query("a > 5") # type: ignore # must be dict or kwargs + aa.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): aa.query(x=(a > 5)) # must be query string with pytest.raises(UndefinedVariableError): @@ -5342,7 +5343,7 @@ def test_min( minindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(minindex, nanindex) + for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(minindex) @@ -5387,7 +5388,7 @@ def test_max( maxindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(maxindex, nanindex) + for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ ar.isel(y=yi).isel(x=indi, drop=True) for yi, indi in enumerate(maxindex) @@ -5439,7 +5440,7 @@ def test_argmin( minindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(minindex, nanindex) + for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) @@ -5492,7 +5493,7 @@ def test_argmax( maxindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(maxindex, nanindex) + for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) @@ -5588,7 +5589,7 @@ def test_idxmin( # skipna=False minindex3 = [ x if y is None or ar0.dtype.kind == "O" else y - for x, y in zip(minindex0, nanindex) + for x, y in zip(minindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) @@ -5730,7 +5731,7 @@ def test_idxmax( # skipna=False maxindex3 = [ x if y is None or ar0.dtype.kind == "O" else y - for x, y in zip(maxindex0, nanindex) + for x, y in zip(maxindex0, nanindex, strict=True) ] expected3list = [ coordarr0.isel(y=yi).isel(x=indi, drop=True) @@ -5830,7 +5831,7 @@ def test_argmin_dim( minindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(minindex, nanindex) + for x, y in zip(minindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) @@ -5897,7 +5898,7 @@ def test_argmax_dim( maxindex = [ x if y is None or ar.dtype.kind == "O" else y - for x, y in zip(maxindex, nanindex) + for x, y in zip(maxindex, nanindex, strict=True) ] expected2list = [ indarr.isel(y=yi).isel(x=indi, drop=True) @@ -6650,7 +6651,7 @@ def test_to_and_from_iris(self) -> None: ), ) - for coord, original_key in zip((actual.coords()), original.coords): + for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( @@ -6726,7 +6727,7 @@ def test_to_and_from_iris_dask(self) -> None: ), ) - for coord, original_key in zip((actual.coords()), original.coords): + for coord, original_key in zip((actual.coords()), original.coords, strict=True): original_coord = original.coords[original_key] assert coord.var_name == original_coord.name assert_array_equal( @@ -7159,7 +7160,7 @@ def test_result_as_expected(self) -> None: def test_error_on_ellipsis_without_list(self) -> None: da = DataArray([[1, 2], [1, 2]], dims=("x", "y")) with pytest.raises(ValueError): - da.stack(flat=...) # type: ignore + da.stack(flat=...) # type: ignore[arg-type] def test_nD_coord_dataarray() -> None: diff --git a/xarray/tests/test_dataset.py b/xarray/tests/test_dataset.py index 960de3ec29e..fc2b2251c2c 100644 --- a/xarray/tests/test_dataset.py +++ b/xarray/tests/test_dataset.py @@ -582,7 +582,7 @@ def test_constructor_pandas_single(self) -> None: for a in das: pandas_obj = a.to_pandas() - ds_based_on_pandas = Dataset(pandas_obj) # type: ignore # TODO: improve typing of __init__ + ds_based_on_pandas = Dataset(pandas_obj) # type: ignore[arg-type] # TODO: improve typing of __init__ for dim in ds_based_on_pandas.data_vars: assert isinstance(dim, int) assert_array_equal(ds_based_on_pandas[dim], pandas_obj[dim]) @@ -3212,7 +3212,7 @@ def test_rename_perserve_attrs_encoding(self) -> None: # test propagate attrs/encoding to new variable(s) created from Index object original = Dataset(coords={"x": ("x", [0, 1, 2])}) expected = Dataset(coords={"y": ("y", [0, 1, 2])}) - for ds, dim in zip([original, expected], ["x", "y"]): + for ds, dim in zip([original, expected], ["x", "y"], strict=True): ds[dim].attrs = {"foo": "bar"} ds[dim].encoding = {"foo": "bar"} @@ -3713,7 +3713,7 @@ def test_set_xindex(self) -> None: class NotAnIndex: ... with pytest.raises(TypeError, match=".*not a subclass of xarray.Index"): - ds.set_xindex("foo", NotAnIndex) # type: ignore + ds.set_xindex("foo", NotAnIndex) # type: ignore[arg-type] with pytest.raises(ValueError, match="those variables don't exist"): ds.set_xindex("not_a_coordinate", PandasIndex) @@ -3740,7 +3740,7 @@ def from_variables(cls, variables, options): return cls(options["opt"]) indexed = ds.set_xindex("foo", IndexWithOptions, opt=1) - assert getattr(indexed.xindexes["foo"], "opt") == 1 + assert indexed.xindexes["foo"].opt == 1 # type: ignore[attr-defined] def test_stack(self) -> None: ds = Dataset( @@ -6450,8 +6450,8 @@ def test_full_like(self) -> None: expected = ds.copy(deep=True) # https://github.com/python/mypy/issues/3004 - expected["d1"].values = [2, 2, 2] # type: ignore - expected["d2"].values = [2.0, 2.0, 2.0] # type: ignore + expected["d1"].values = [2, 2, 2] # type: ignore[assignment] + expected["d2"].values = [2.0, 2.0, 2.0] # type: ignore[assignment] assert expected["d1"].dtype == int assert expected["d2"].dtype == float assert_identical(expected, actual) @@ -6459,8 +6459,8 @@ def test_full_like(self) -> None: # override dtype actual = full_like(ds, fill_value=True, dtype=bool) expected = ds.copy(deep=True) - expected["d1"].values = [True, True, True] # type: ignore - expected["d2"].values = [True, True, True] # type: ignore + expected["d1"].values = [True, True, True] # type: ignore[assignment] + expected["d2"].values = [True, True, True] # type: ignore[assignment] assert expected["d1"].dtype == bool assert expected["d2"].dtype == bool assert_identical(expected, actual) @@ -6975,7 +6975,7 @@ def test_query(self, backend, engine, parser) -> None: # test error handling with pytest.raises(ValueError): - ds.query("a > 5") # type: ignore # must be dict or kwargs + ds.query("a > 5") # type: ignore[arg-type] # must be dict or kwargs with pytest.raises(ValueError): ds.query(x=(a > 5)) with pytest.raises(IndexError): @@ -7615,4 +7615,4 @@ def test_transpose_error() -> None: "transpose requires dim to be passed as multiple arguments. Expected `'y', 'x'`. Received `['y', 'x']` instead" ), ): - ds.transpose(["y", "x"]) # type: ignore + ds.transpose(["y", "x"]) # type: ignore[arg-type] diff --git a/xarray/tests/test_datatree.py b/xarray/tests/test_datatree.py index f1f74d240f0..6d208a5cf98 100644 --- a/xarray/tests/test_datatree.py +++ b/xarray/tests/test_datatree.py @@ -40,7 +40,7 @@ def test_data_arg(self): assert_identical(tree.to_dataset(), ds) with pytest.raises(TypeError): - DataTree(data=xr.DataArray(42, name="foo")) # type: ignore + DataTree(data=xr.DataArray(42, name="foo")) # type: ignore[arg-type] class TestFamilyTree: @@ -344,7 +344,9 @@ def test_copy(self, create_test_datatree): for copied in [dt.copy(deep=False), copy(dt)]: assert_identical(dt, copied) - for node, copied_node in zip(dt.root.subtree, copied.root.subtree): + for node, copied_node in zip( + dt.root.subtree, copied.root.subtree, strict=True + ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. @@ -385,7 +387,9 @@ def test_deepcopy(self, create_test_datatree): for copied in [dt.copy(deep=True), deepcopy(dt)]: assert_identical(dt, copied) - for node, copied_node in zip(dt.root.subtree, copied.root.subtree): + for node, copied_node in zip( + dt.root.subtree, copied.root.subtree, strict=True + ): assert node.encoding == copied_node.encoding # Note: IndexVariable objects with string dtype are always # copied because of xarray.core.util.safe_cast_to_index. @@ -665,7 +669,7 @@ def test_insertion_order(self): def test_array_values(self): data = {"foo": xr.DataArray(1, name="bar")} with pytest.raises(TypeError): - DataTree.from_dict(data) # type: ignore + DataTree.from_dict(data) # type: ignore[arg-type] class TestDatasetView: @@ -741,7 +745,7 @@ def test_attribute_access(self, create_test_datatree): assert key in dir(dt) # dims - assert_equal(dt["a"]["y"], getattr(dt.a, "y")) + assert_equal(dt["a"]["y"], dt.a.y) assert "y" in dir(dt["a"]) # children @@ -951,7 +955,7 @@ def test_inconsistent_child_indexes(self): ) dt = DataTree() - dt.ds = xr.Dataset(coords={"x": [1.0]}) # type: ignore + dt.ds = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment] dt["/b"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b"].ds = xr.Dataset(coords={"x": [2.0]}) @@ -986,7 +990,7 @@ def test_inconsistent_grandchild_indexes(self): ) dt = DataTree() - dt.ds = xr.Dataset(coords={"x": [1.0]}) # type: ignore + dt.ds = xr.Dataset(coords={"x": [1.0]}) # type: ignore[assignment] dt["/b/c"] = DataTree() with pytest.raises(ValueError, match=expected_msg): dt["/b/c"].ds = xr.Dataset(coords={"x": [2.0]}) diff --git a/xarray/tests/test_duck_array_ops.py b/xarray/tests/test_duck_array_ops.py index 3bbae55b105..da263f1b30e 100644 --- a/xarray/tests/test_duck_array_ops.py +++ b/xarray/tests/test_duck_array_ops.py @@ -112,7 +112,9 @@ def test_first(self): array([[8, 5, 2, nan], [nan, 13, 14, 15]]), array([[2, 5, 8], [13, 17, 21]]), ] - for axis, expected in zip([0, 1, 2, -3, -2, -1], 2 * expected_results): + for axis, expected in zip( + [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True + ): actual = first(self.x, axis) assert_array_equal(expected, actual) @@ -133,7 +135,9 @@ def test_last(self): array([[8, 9, 10, nan], [nan, 21, 18, 15]]), array([[2, 6, 10], [15, 18, 21]]), ] - for axis, expected in zip([0, 1, 2, -3, -2, -1], 2 * expected_results): + for axis, expected in zip( + [0, 1, 2, -3, -2, -1], 2 * expected_results, strict=True + ): actual = last(self.x, axis) assert_array_equal(expected, actual) diff --git a/xarray/tests/test_extensions.py b/xarray/tests/test_extensions.py index 7cfffd68620..92df269cb4f 100644 --- a/xarray/tests/test_extensions.py +++ b/xarray/tests/test_extensions.py @@ -51,12 +51,12 @@ def foo(self): # check descriptor assert ds.demo.__doc__ == "Demo accessor." # TODO: typing doesn't seem to work with accessors - assert xr.Dataset.demo.__doc__ == "Demo accessor." # type: ignore + assert xr.Dataset.demo.__doc__ == "Demo accessor." # type: ignore[attr-defined] assert isinstance(ds.demo, DemoAccessor) - assert xr.Dataset.demo is DemoAccessor # type: ignore + assert xr.Dataset.demo is DemoAccessor # type: ignore[attr-defined] # ensure we can remove it - del xr.Dataset.demo # type: ignore + del xr.Dataset.demo # type: ignore[attr-defined] assert not hasattr(xr.Dataset, "demo") with pytest.warns(Warning, match="overriding a preexisting attribute"): diff --git a/xarray/tests/test_formatting.py b/xarray/tests/test_formatting.py index e7076151314..a2fef9d9b6b 100644 --- a/xarray/tests/test_formatting.py +++ b/xarray/tests/test_formatting.py @@ -615,7 +615,7 @@ def test_array_scalar_format(self) -> None: # Test numpy arrays raises: var = xr.DataArray([0.1, 0.2]) - with pytest.raises(NotImplementedError) as excinfo: # type: ignore + with pytest.raises(NotImplementedError) as excinfo: # type: ignore[assignment] format(var, ".2f") assert "Using format_spec is only supported" in str(excinfo.value) @@ -652,7 +652,9 @@ def test_datatree_print_node_with_data(self): "Data variables", "*empty*", ] - for expected_line, printed_line in zip(expected, printout.splitlines()): + for expected_line, printed_line in zip( + expected, printout.splitlines(), strict=True + ): assert expected_line in printed_line def test_datatree_printout_nested_node(self): @@ -844,7 +846,7 @@ def test__mapping_repr(display_max_rows, n_vars, n_attr) -> None: attrs = {k: 2 for k in b} coords = {_c: np.array([0, 1], dtype=np.uint64) for _c in c} data_vars = dict() - for v, _c in zip(a, coords.items()): + for v, _c in zip(a, coords.items(), strict=True): data_vars[v] = xr.DataArray( name=v, data=np.array([3, 4], dtype=np.uint64), diff --git a/xarray/tests/test_groupby.py b/xarray/tests/test_groupby.py index 906a015544b..fa6172c5d66 100644 --- a/xarray/tests/test_groupby.py +++ b/xarray/tests/test_groupby.py @@ -788,7 +788,7 @@ def test_groupby_dataset() -> None: ("b", data.isel(x=[1])), ("c", data.isel(x=[2])), ] - for actual1, expected1 in zip(groupby, expected_items): + for actual1, expected1 in zip(groupby, expected_items, strict=True): assert actual1[0] == expected1[0] assert_equal(actual1[1], expected1[1]) @@ -1235,12 +1235,12 @@ def test_stack_groupby_unsorted_coord(self) -> None: def test_groupby_iter(self) -> None: for (act_x, act_dv), (exp_x, exp_ds) in zip( - self.dv.groupby("y"), self.ds.groupby("y") + self.dv.groupby("y"), self.ds.groupby("y"), strict=True ): assert exp_x == act_x assert_identical(exp_ds["foo"], act_dv) for (_, exp_dv), (_, act_dv) in zip( - self.dv.groupby("x"), self.dv.groupby("x") + self.dv.groupby("x"), self.dv.groupby("x"), strict=True ): assert_identical(exp_dv, act_dv) @@ -1706,7 +1706,7 @@ def test_groupby_bins_multidim(self) -> None: bincoord = np.array( [ pd.Interval(left, right, closed="right") - for left, right in zip(bins[:-1], bins[1:]) + for left, right in zip(bins[:-1], bins[1:], strict=True) ], dtype=object, ) @@ -2723,7 +2723,7 @@ def test_multiple_groupers_string(as_dataset) -> None: ) if as_dataset: - obj = obj.to_dataset() # type: ignore + obj = obj.to_dataset() # type: ignore[assignment] expected = obj.groupby(labels1=UniqueGrouper(), labels2=UniqueGrouper()).mean() actual = obj.groupby(("labels1", "labels2")).mean() @@ -2733,9 +2733,9 @@ def test_multiple_groupers_string(as_dataset) -> None: # warning & type error in the future with pytest.warns(FutureWarning): with pytest.raises(TypeError): - obj.groupby("labels1", "labels2") # type: ignore + obj.groupby("labels1", "labels2") # type: ignore[arg-type, misc] with pytest.raises(ValueError): - obj.groupby("labels1", foo="bar") # type: ignore + obj.groupby("labels1", foo="bar") # type: ignore[arg-type] with pytest.raises(ValueError): obj.groupby("labels1", foo=UniqueGrouper()) diff --git a/xarray/tests/test_indexes.py b/xarray/tests/test_indexes.py index 48e254b037b..cf14e5c8f43 100644 --- a/xarray/tests/test_indexes.py +++ b/xarray/tests/test_indexes.py @@ -674,13 +674,15 @@ def test_copy_indexes(self, indexes) -> None: copied, index_vars = indexes.copy_indexes() assert copied.keys() == indexes.keys() - for new, original in zip(copied.values(), indexes.values()): + for new, original in zip(copied.values(), indexes.values(), strict=True): assert new.equals(original) # check unique index objects preserved assert copied["z"] is copied["one"] is copied["two"] assert index_vars.keys() == indexes.variables.keys() - for new, original in zip(index_vars.values(), indexes.variables.values()): + for new, original in zip( + index_vars.values(), indexes.variables.values(), strict=True + ): assert_identical(new, original) diff --git a/xarray/tests/test_interp.py b/xarray/tests/test_interp.py index 7151c669fbc..5c03881242b 100644 --- a/xarray/tests/test_interp.py +++ b/xarray/tests/test_interp.py @@ -406,7 +406,7 @@ def test_errors(use_dask: bool) -> None: for method in ["akima", "spline"]: with pytest.raises(ValueError): - da.interp(x=[0.5, 1.5], method=method) # type: ignore + da.interp(x=[0.5, 1.5], method=method) # type: ignore[arg-type] # not sorted if use_dask: @@ -421,9 +421,9 @@ def test_errors(use_dask: bool) -> None: # invalid method with pytest.raises(ValueError): - da.interp(x=[2, 0], method="boo") # type: ignore + da.interp(x=[2, 0], method="boo") # type: ignore[arg-type] with pytest.raises(ValueError): - da.interp(y=[2, 0], method="boo") # type: ignore + da.interp(y=[2, 0], method="boo") # type: ignore[arg-type] # object-type DataArray cannot be interpolated da = xr.DataArray(["a", "b", "c"], dims="x", coords={"x": [0, 1, 2]}) diff --git a/xarray/tests/test_missing.py b/xarray/tests/test_missing.py index bd75f633b82..bf90074a7cc 100644 --- a/xarray/tests/test_missing.py +++ b/xarray/tests/test_missing.py @@ -600,9 +600,8 @@ def test_get_clean_interp_index_cf_calendar(cf_da, calendar): @requires_cftime -@pytest.mark.parametrize( - ("calendar", "freq"), zip(["gregorian", "proleptic_gregorian"], ["1D", "1ME", "1Y"]) -) +@pytest.mark.parametrize("calendar", ["gregorian", "proleptic_gregorian"]) +@pytest.mark.parametrize("freq", ["1D", "1ME", "1YE"]) def test_get_clean_interp_index_dt(cf_da, calendar, freq): """In the gregorian case, the index should be proportional to normal datetimes.""" g = cf_da(calendar, freq=freq) diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py index c410f3a2fd5..3d47f3e1803 100644 --- a/xarray/tests/test_plot.py +++ b/xarray/tests/test_plot.py @@ -1146,6 +1146,7 @@ def test_norm_sets_vmin_vmax(self) -> None: ], ["neither", "neither", "both", "max", "min"], [7, None, None, None, None], + strict=True, ): test_min = vmin if norm.vmin is None else norm.vmin test_max = vmax if norm.vmax is None else norm.vmax @@ -1167,7 +1168,7 @@ def setUp(self): y = np.arange(start=9, stop=-7, step=-3) xy = np.dstack(np.meshgrid(x, y)) distance = np.linalg.norm(xy, axis=2) - self.darray = DataArray(distance, list(zip(("y", "x"), (y, x)))) + self.darray = DataArray(distance, list(zip(("y", "x"), (y, x), strict=True))) self.data_min = distance.min() self.data_max = distance.max() yield @@ -1862,7 +1863,7 @@ def test_dont_infer_interval_breaks_for_cartopy(self) -> None: # Regression for GH 781 ax = plt.gca() # Simulate a Cartopy Axis - setattr(ax, "projection", True) + ax.projection = True # type: ignore[attr-defined] artist = self.plotmethod(x="x2d", y="y2d", ax=ax) assert isinstance(artist, mpl.collections.QuadMesh) # Let cartopy handle the axis limits and artist size @@ -2208,7 +2209,7 @@ def test_no_args(self) -> None: def test_names_appear_somewhere(self) -> None: self.darray.name = "testvar" self.g.map_dataarray(xplt.contourf, "x", "y") - for k, ax in zip("abc", self.g.axs.flat): + for k, ax in zip("abc", self.g.axs.flat, strict=True): assert f"z = {k}" == ax.get_title() alltxt = text_in_fig() @@ -2450,11 +2451,15 @@ def test_title_kwargs(self) -> None: g.set_titles(template="{value}", weight="bold") # Rightmost column titles should be bold - for label, ax in zip(self.darray.coords["row"].values, g.axs[:, -1]): + for label, ax in zip( + self.darray.coords["row"].values, g.axs[:, -1], strict=True + ): assert property_in_axes_text("weight", "bold", label, ax) # Top row titles should be bold - for label, ax in zip(self.darray.coords["col"].values, g.axs[0, :]): + for label, ax in zip( + self.darray.coords["col"].values, g.axs[0, :], strict=True + ): assert property_in_axes_text("weight", "bold", label, ax) @pytest.mark.slow @@ -2465,21 +2470,29 @@ def test_default_labels(self) -> None: g.map_dataarray(xplt.imshow, "x", "y") # Rightmost column should be labeled - for label, ax in zip(self.darray.coords["row"].values, g.axs[:, -1]): + for label, ax in zip( + self.darray.coords["row"].values, g.axs[:, -1], strict=True + ): assert substring_in_axes(label, ax) # Top row should be labeled - for label, ax in zip(self.darray.coords["col"].values, g.axs[0, :]): + for label, ax in zip( + self.darray.coords["col"].values, g.axs[0, :], strict=True + ): assert substring_in_axes(label, ax) # ensure that row & col labels can be changed g.set_titles("abc={value}") - for label, ax in zip(self.darray.coords["row"].values, g.axs[:, -1]): + for label, ax in zip( + self.darray.coords["row"].values, g.axs[:, -1], strict=True + ): assert substring_in_axes(f"abc={label}", ax) # previous labels were "row=row0" etc. assert substring_not_in_axes("row=", ax) - for label, ax in zip(self.darray.coords["col"].values, g.axs[0, :]): + for label, ax in zip( + self.darray.coords["col"].values, g.axs[0, :], strict=True + ): assert substring_in_axes(f"abc={label}", ax) # previous labels were "col=row0" etc. assert substring_not_in_axes("col=", ax) @@ -2534,11 +2547,15 @@ def test_unnamed_args(self) -> None: def test_default_labels(self) -> None: g = self.darray.plot(row="row", col="col", hue="hue") # type: ignore[call-arg] # Rightmost column should be labeled - for label, ax in zip(self.darray.coords["row"].values, g.axs[:, -1]): + for label, ax in zip( + self.darray.coords["row"].values, g.axs[:, -1], strict=True + ): assert substring_in_axes(label, ax) # Top row should be labeled - for label, ax in zip(self.darray.coords["col"].values, g.axs[0, :]): + for label, ax in zip( + self.darray.coords["col"].values, g.axs[0, :], strict=True + ): assert substring_in_axes(str(label), ax) # Leftmost column should have array name @@ -2784,7 +2801,7 @@ def test_default_labels(self) -> None: g = self.ds.plot.scatter(x="A", y="B", row="row", col="col", hue="hue") # Top row should be labeled - for label, ax in zip(self.ds.coords["col"].values, g.axs[0, :]): + for label, ax in zip(self.ds.coords["col"].values, g.axs[0, :], strict=True): assert substring_in_axes(str(label), ax) # Bottom row should have name of x array name and units diff --git a/xarray/tests/test_rolling.py b/xarray/tests/test_rolling.py index 79869e63ae7..9d880969a82 100644 --- a/xarray/tests/test_rolling.py +++ b/xarray/tests/test_rolling.py @@ -312,7 +312,7 @@ def test_rolling_count_correct(self, compute_backend) -> None: DataArray([np.nan, np.nan, 2, 3, 3, 4, 5, 5, 5, 5, 5], dims="time"), ] - for kwarg, expected in zip(kwargs, expecteds): + for kwarg, expected in zip(kwargs, expecteds, strict=True): result = da.rolling(**kwarg).count() assert_equal(result, expected) diff --git a/xarray/tests/test_strategies.py b/xarray/tests/test_strategies.py index 79ae4769005..9fdf46b0d85 100644 --- a/xarray/tests/test_strategies.py +++ b/xarray/tests/test_strategies.py @@ -138,7 +138,9 @@ def fixed_array_strategy_fn(*, shape=None, dtype=None): return st.just(arr) dim_names = data.draw(dimension_names(min_dims=arr.ndim, max_dims=arr.ndim)) - dim_sizes = {name: size for name, size in zip(dim_names, arr.shape)} + dim_sizes = { + name: size for name, size in zip(dim_names, arr.shape, strict=True) + } var = data.draw( variables( diff --git a/xarray/tests/test_treenode.py b/xarray/tests/test_treenode.py index 3996edba659..d9d581cc314 100644 --- a/xarray/tests/test_treenode.py +++ b/xarray/tests/test_treenode.py @@ -304,7 +304,7 @@ def test_ancestors(self): _, leaf_f = create_test_tree() ancestors = leaf_f.ancestors expected = ["a", "b", "e", "f"] - for node, expected_name in zip(ancestors, expected): + for node, expected_name in zip(ancestors, expected, strict=True): assert node.name == expected_name def test_subtree(self): @@ -321,7 +321,7 @@ def test_subtree(self): "g", "i", ] - for node, expected_name in zip(subtree, expected): + for node, expected_name in zip(subtree, expected, strict=True): assert node.name == expected_name def test_descendants(self): @@ -337,7 +337,7 @@ def test_descendants(self): "g", "i", ] - for node, expected_name in zip(descendants, expected): + for node, expected_name in zip(descendants, expected, strict=True): assert node.name == expected_name def test_leaves(self): @@ -349,7 +349,7 @@ def test_leaves(self): "g", "i", ] - for node, expected_name in zip(leaves, expected): + for node, expected_name in zip(leaves, expected, strict=True): assert node.name == expected_name def test_levels(self): @@ -387,7 +387,7 @@ def test_render_nodetree(self): john_nodes = john_repr.splitlines() assert len(john_nodes) == len(expected_nodes) - for expected_node, repr_node in zip(expected_nodes, john_nodes): + for expected_node, repr_node in zip(expected_nodes, john_nodes, strict=True): assert expected_node == repr_node diff --git a/xarray/tests/test_units.py b/xarray/tests/test_units.py index 232a35d8ea0..ced569ffeab 100644 --- a/xarray/tests/test_units.py +++ b/xarray/tests/test_units.py @@ -1997,7 +1997,7 @@ def test_masking(self, func, unit, error, dtype): def test_squeeze(self, dim, dtype): shape = (2, 1, 3, 1, 1, 2) names = list("abcdef") - dim_lengths = dict(zip(names, shape)) + dim_lengths = dict(zip(names, shape, strict=True)) array = np.ones(shape=shape) * unit_registry.m variable = xr.Variable(names, array) @@ -3429,7 +3429,7 @@ def test_drop_sel(self, raw_values, unit, error, dtype): ) def test_squeeze(self, shape, dim, dtype): names = "xyzt" - dim_lengths = dict(zip(names, shape)) + dim_lengths = dict(zip(names, shape, strict=False)) names = "xyzt" array = np.arange(10 * 20).astype(dtype).reshape(shape) * unit_registry.J data_array = xr.DataArray(data=array, dims=tuple(names[: len(shape)])) @@ -3659,7 +3659,10 @@ def test_to_unstacked_dataset(self, dtype): expected = attach_units( func(strip_units(data_array)), - {"y": y.units, **dict(zip(x.magnitude, [array.units] * len(y)))}, + { + "y": y.units, + **dict(zip(x.magnitude, [array.units] * len(y), strict=True)), + }, ).rename({elem.magnitude: elem for elem in x}) actual = func(data_array) @@ -5072,7 +5075,7 @@ def test_head_tail_thin(self, func, variant, dtype): ) def test_squeeze(self, shape, dim, dtype): names = "xyzt" - dim_lengths = dict(zip(names, shape)) + dim_lengths = dict(zip(names, shape, strict=False)) array1 = ( np.linspace(0, 1, 10 * 20).astype(dtype).reshape(shape) * unit_registry.degK ) diff --git a/xarray/tests/test_variable.py b/xarray/tests/test_variable.py index 5f7ee266774..9e2e12fc045 100644 --- a/xarray/tests/test_variable.py +++ b/xarray/tests/test_variable.py @@ -1017,7 +1017,7 @@ def test_nd_rolling(self, center, dims): fill_value=np.nan, ) expected = x - for dim, win, cent in zip(dims, window, center): + for dim, win, cent in zip(dims, window, center, strict=True): expected = expected.rolling_window( dim=dim, window=win, @@ -1806,7 +1806,8 @@ def raise_if_called(*args, **kwargs): @pytest.mark.parametrize("skipna", [True, False, None]) @pytest.mark.parametrize("q", [0.25, [0.50], [0.25, 0.75]]) @pytest.mark.parametrize( - "axis, dim", zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]]) + "axis, dim", + zip([None, 0, [0], [0, 1]], [None, "x", ["x"], ["x", "y"]], strict=True), ) def test_quantile(self, q, axis, dim, skipna): d = self.d.copy() diff --git a/xarray/tests/test_weighted.py b/xarray/tests/test_weighted.py index f3337d70a76..93a200c07a6 100644 --- a/xarray/tests/test_weighted.py +++ b/xarray/tests/test_weighted.py @@ -24,7 +24,7 @@ def test_weighted_non_DataArray_weights(as_dataset: bool) -> None: data = data.to_dataset(name="data") with pytest.raises(ValueError, match=r"`weights` must be a DataArray"): - data.weighted([1, 2]) # type: ignore + data.weighted([1, 2]) # type: ignore[arg-type] @pytest.mark.parametrize("as_dataset", (True, False)) diff --git a/xarray/util/deprecation_helpers.py b/xarray/util/deprecation_helpers.py index c8e594508d9..0cdee2bd564 100644 --- a/xarray/util/deprecation_helpers.py +++ b/xarray/util/deprecation_helpers.py @@ -108,7 +108,9 @@ def inner(*args, **kwargs): stacklevel=2, ) - zip_args = zip(kwonly_args[:n_extra_args], args[-n_extra_args:]) + zip_args = zip( + kwonly_args[:n_extra_args], args[-n_extra_args:], strict=True + ) kwargs.update({name: arg for name, arg in zip_args}) return func(*args[:-n_extra_args], **kwargs) @@ -142,4 +144,4 @@ def wrapper(*args, **kwargs): # We're quite confident we're just returning `T` from this function, so it's fine to ignore typing # within the function. - return wrapper # type: ignore + return wrapper # type: ignore[return-value]