diff --git a/pyproject.toml b/pyproject.toml index 01281a9ce72..368a332626a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -258,6 +258,7 @@ extend-select = [ "PERF", # Perflint "W", # pycodestyle warnings "PGH", # pygrep-hooks + "PLE", # Pylint Errors "UP", # pyupgrade "FURB", # refurb "RUF", diff --git a/xarray/backends/common.py b/xarray/backends/common.py index 58a98598a5b..cc680ecfc2b 100644 --- a/xarray/backends/common.py +++ b/xarray/backends/common.py @@ -338,11 +338,10 @@ def add(self, source, target, region=None): self.sources.append(source) self.targets.append(target) self.regions.append(region) + elif region: + target[region] = source else: - if region: - target[region] = source - else: - target[...] = source + target[...] = source def sync(self, compute=True, chunkmanager_store_kwargs=None): if self.sources: diff --git a/xarray/coding/cftime_offsets.py b/xarray/coding/cftime_offsets.py index bb70da34f18..510e9dafad8 100644 --- a/xarray/coding/cftime_offsets.py +++ b/xarray/coding/cftime_offsets.py @@ -279,9 +279,8 @@ def _adjust_n_years(other, n, month, reference_day): if n > 0: if other.month < month or (other.month == month and other.day < reference_day): n -= 1 - else: - if other.month > month or (other.month == month and other.day > reference_day): - n += 1 + elif other.month > month or (other.month == month and other.day > reference_day): + n += 1 return n @@ -353,12 +352,11 @@ def roll_qtrday( # pretend to roll back if on same month but # before compare_day n -= 1 - else: - if months_since > 0 or ( - months_since == 0 and other.day > _get_day_of_month(other, day_option) - ): - # make sure to roll forward, so negate - n += 1 + elif months_since > 0 or ( + months_since == 0 and other.day > _get_day_of_month(other, day_option) + ): + # make sure to roll forward, so negate + n += 1 return n @@ -815,13 +813,12 @@ def delta_to_tick(delta: timedelta | pd.Timedelta) -> Tick: return Minute(n=seconds // 60) else: return Second(n=seconds) + # Regardless of the days and seconds this will always be a Millisecond + # or Microsecond object + elif delta.microseconds % 1_000 == 0: + return Millisecond(n=delta.microseconds // 1_000) else: - # Regardless of the days and seconds this will always be a Millisecond - # or Microsecond object - if delta.microseconds % 1_000 == 0: - return Millisecond(n=delta.microseconds // 1_000) - else: - return Microsecond(n=delta.microseconds) + return Microsecond(n=delta.microseconds) def to_cftime_datetime(date_str_or_date, calendar=None): @@ -1615,11 +1612,10 @@ def date_range_like(source, calendar, use_cftime=None): source_calendar = "standard" source_start = default_precision_timestamp(source_start) source_end = default_precision_timestamp(source_end) - else: - if isinstance(source, CFTimeIndex): - source_calendar = source.calendar - else: # DataArray - source_calendar = source.dt.calendar + elif isinstance(source, CFTimeIndex): + source_calendar = source.calendar + else: # DataArray + source_calendar = source.dt.calendar if calendar == source_calendar and is_np_datetime_like(source.dtype) ^ use_cftime: return source diff --git a/xarray/coding/times.py b/xarray/coding/times.py index e727a0f3664..2112170f7fc 100644 --- a/xarray/coding/times.py +++ b/xarray/coding/times.py @@ -579,9 +579,8 @@ def decode_cf_datetime( "'time_unit' or specify 'use_cftime=True'.", SerializationWarning, ) - else: - if _is_standard_calendar(calendar): - dates = cftime_to_nptime(dates, time_unit=time_unit) + elif _is_standard_calendar(calendar): + dates = cftime_to_nptime(dates, time_unit=time_unit) elif use_cftime: dates = _decode_datetime_with_cftime(flat_num_dates, units, calendar) else: diff --git a/xarray/computation/apply_ufunc.py b/xarray/computation/apply_ufunc.py index 41f4adb5fd5..fc783c8d881 100644 --- a/xarray/computation/apply_ufunc.py +++ b/xarray/computation/apply_ufunc.py @@ -445,17 +445,16 @@ def apply_dict_of_variables_vfunc( core_dim_present = _check_core_dims(signature, variable_args, name) if core_dim_present is True: result_vars[name] = func(*variable_args) + elif on_missing_core_dim == "raise": + raise ValueError(core_dim_present) + elif on_missing_core_dim == "copy": + result_vars[name] = variable_args[0] + elif on_missing_core_dim == "drop": + pass else: - if on_missing_core_dim == "raise": - raise ValueError(core_dim_present) - elif on_missing_core_dim == "copy": - result_vars[name] = variable_args[0] - elif on_missing_core_dim == "drop": - pass - else: - raise ValueError( - f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}" - ) + raise ValueError( + f"Invalid value for `on_missing_core_dim`: {on_missing_core_dim!r}" + ) if signature.num_outputs > 1: return _unpack_dict_tuples(result_vars, signature.num_outputs) @@ -809,11 +808,10 @@ def func(*arrays): raise ValueError( f"unknown setting for chunked array handling in apply_ufunc: {dask}" ) - else: - if vectorize: - func = _vectorize( - func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims - ) + elif vectorize: + func = _vectorize( + func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims + ) result_data = func(*input_data) diff --git a/xarray/computation/rolling.py b/xarray/computation/rolling.py index b1327f39355..519d1f7eae6 100644 --- a/xarray/computation/rolling.py +++ b/xarray/computation/rolling.py @@ -1253,18 +1253,17 @@ def wrapped_func( for c, v in self.obj.coords.items(): if c == self.obj.name: coords[c] = reduced + elif any(d in self.windows for d in v.dims): + coords[c] = v.variable.coarsen( + self.windows, + self.coord_func[c], + self.boundary, + self.side, + keep_attrs, + **kwargs, + ) else: - if any(d in self.windows for d in v.dims): - coords[c] = v.variable.coarsen( - self.windows, - self.coord_func[c], - self.boundary, - self.side, - keep_attrs, - **kwargs, - ) - else: - coords[c] = v + coords[c] = v return DataArray( reduced, dims=self.obj.dims, coords=coords, name=self.obj.name ) diff --git a/xarray/conventions.py b/xarray/conventions.py index 4d8e8a96ad8..c9cd2a5dcdc 100644 --- a/xarray/conventions.py +++ b/xarray/conventions.py @@ -226,17 +226,16 @@ def decode_cf_variable( DeprecationWarning, ) decode_times = CFDatetimeCoder(use_cftime=use_cftime) - else: - if use_cftime is not None: - raise TypeError( - "Usage of 'use_cftime' as a kwarg is not allowed " - "if a 'CFDatetimeCoder' instance is passed to " - "'decode_times'. Please set 'use_cftime' " - "when initializing 'CFDatetimeCoder' instead.\n" - "Example usage:\n" - " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" - " ds = xr.open_dataset(decode_times=time_coder)\n", - ) + elif use_cftime is not None: + raise TypeError( + "Usage of 'use_cftime' as a kwarg is not allowed " + "if a 'CFDatetimeCoder' instance is passed to " + "'decode_times'. Please set 'use_cftime' " + "when initializing 'CFDatetimeCoder' instead.\n" + "Example usage:\n" + " time_coder = xr.coders.CFDatetimeCoder(use_cftime=True)\n" + " ds = xr.open_dataset(decode_times=time_coder)\n", + ) var = decode_times.decode(var, name=name) if decode_endianness and not var.dtype.isnative: diff --git a/xarray/core/dataset.py b/xarray/core/dataset.py index bb97c9f5e5b..917255137fa 100644 --- a/xarray/core/dataset.py +++ b/xarray/core/dataset.py @@ -2904,9 +2904,8 @@ def sel( for k, v in query_results.variables.items(): if v.dims: no_scalar_variables[k] = v - else: - if k in self._coord_names: - query_results.drop_coords.append(k) + elif k in self._coord_names: + query_results.drop_coords.append(k) query_results.variables = no_scalar_variables result = self.isel(indexers=query_results.dim_indexers, drop=drop) @@ -4552,26 +4551,25 @@ def expand_dims( for d, c in zip_axis_dim: all_dims.insert(d, c) variables[k] = v.set_dims(dict(all_dims)) - else: - if k not in variables: - if k in coord_names and create_index_for_new_dim: - # If dims includes a label of a non-dimension coordinate, - # it will be promoted to a 1D coordinate with a single value. - index, index_vars = create_default_index_implicit(v.set_dims(k)) - indexes[k] = index - variables.update(index_vars) - else: - if create_index_for_new_dim: - warnings.warn( - f"No index created for dimension {k} because variable {k} is not a coordinate. " - f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.", - UserWarning, - stacklevel=2, - ) + elif k not in variables: + if k in coord_names and create_index_for_new_dim: + # If dims includes a label of a non-dimension coordinate, + # it will be promoted to a 1D coordinate with a single value. + index, index_vars = create_default_index_implicit(v.set_dims(k)) + indexes[k] = index + variables.update(index_vars) + else: + if create_index_for_new_dim: + warnings.warn( + f"No index created for dimension {k} because variable {k} is not a coordinate. " + f"To create an index for {k}, please first call `.set_coords('{k}')` on this object.", + UserWarning, + stacklevel=2, + ) - # create 1D variable without creating a new index - new_1d_var = v.set_dims(k) - variables.update({k: new_1d_var}) + # create 1D variable without creating a new index + new_1d_var = v.set_dims(k) + variables.update({k: new_1d_var}) return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes @@ -4890,9 +4888,8 @@ def set_xindex( index_cls = PandasIndex else: index_cls = PandasMultiIndex - else: - if not issubclass(index_cls, Index): - raise TypeError(f"{index_cls} is not a subclass of xarray.Index") + elif not issubclass(index_cls, Index): + raise TypeError(f"{index_cls} is not a subclass of xarray.Index") invalid_coords = set(coord_names) - self._coord_names @@ -6744,34 +6741,33 @@ def reduce( if name in self.coords: if not reduce_dims: variables[name] = var - else: - if ( - # Some reduction functions (e.g. std, var) need to run on variables - # that don't have the reduce dims: PR5393 - not is_extension_array_dtype(var.dtype) - and ( - not reduce_dims - or not numeric_only - or np.issubdtype(var.dtype, np.number) - or (var.dtype == np.bool_) - ) - ): - # prefer to aggregate over axis=None rather than - # axis=(0, 1) if they will be equivalent, because - # the former is often more efficient - # keep single-element dims as list, to support Hashables - reduce_maybe_single = ( - None - if len(reduce_dims) == var.ndim and var.ndim != 1 - else reduce_dims - ) - variables[name] = var.reduce( - func, - dim=reduce_maybe_single, - keep_attrs=keep_attrs, - keepdims=keepdims, - **kwargs, - ) + elif ( + # Some reduction functions (e.g. std, var) need to run on variables + # that don't have the reduce dims: PR5393 + not is_extension_array_dtype(var.dtype) + and ( + not reduce_dims + or not numeric_only + or np.issubdtype(var.dtype, np.number) + or (var.dtype == np.bool_) + ) + ): + # prefer to aggregate over axis=None rather than + # axis=(0, 1) if they will be equivalent, because + # the former is often more efficient + # keep single-element dims as list, to support Hashables + reduce_maybe_single = ( + None + if len(reduce_dims) == var.ndim and var.ndim != 1 + else reduce_dims + ) + variables[name] = var.reduce( + func, + dim=reduce_maybe_single, + keep_attrs=keep_attrs, + keepdims=keepdims, + **kwargs, + ) coord_names = {k for k in self.coords if k in variables} indexes = {k: v for k, v in self._indexes.items() if k in variables} @@ -7970,8 +7966,6 @@ def sortby( variables = variables(self) if not isinstance(variables, list): variables = [variables] - else: - variables = variables arrays = [v if isinstance(v, DataArray) else self[v] for v in variables] aligned_vars = align(self, *arrays, join="left") aligned_self = cast("Self", aligned_vars[0]) @@ -8395,25 +8389,24 @@ def _integrate_one(self, coord, datetime_unit=None, cumulative=False): if dim not in v.dims or cumulative: variables[k] = v coord_names.add(k) - else: - if k in self.data_vars and dim in v.dims: - coord_data = to_like_array(coord_var.data, like=v.data) - if _contains_datetime_like_objects(v): - v = datetime_to_numeric(v, datetime_unit=datetime_unit) - if cumulative: - integ = duck_array_ops.cumulative_trapezoid( - v.data, coord_data, axis=v.get_axis_num(dim) - ) - v_dims = v.dims - else: - integ = duck_array_ops.trapz( - v.data, coord_data, axis=v.get_axis_num(dim) - ) - v_dims = list(v.dims) - v_dims.remove(dim) - variables[k] = Variable(v_dims, integ) + elif k in self.data_vars and dim in v.dims: + coord_data = to_like_array(coord_var.data, like=v.data) + if _contains_datetime_like_objects(v): + v = datetime_to_numeric(v, datetime_unit=datetime_unit) + if cumulative: + integ = duck_array_ops.cumulative_trapezoid( + v.data, coord_data, axis=v.get_axis_num(dim) + ) + v_dims = v.dims else: - variables[k] = v + integ = duck_array_ops.trapz( + v.data, coord_data, axis=v.get_axis_num(dim) + ) + v_dims = list(v.dims) + v_dims.remove(dim) + variables[k] = Variable(v_dims, integ) + else: + variables[k] = v indexes = {k: v for k, v in self._indexes.items() if k in variables} return self._replace_with_new_dims( variables, coord_names=coord_names, indexes=indexes diff --git a/xarray/core/duck_array_ops.py b/xarray/core/duck_array_ops.py index b84156878d7..51ebfdd8177 100644 --- a/xarray/core/duck_array_ops.py +++ b/xarray/core/duck_array_ops.py @@ -185,16 +185,15 @@ def isnull(data): # bool_ is for backwards compat with numpy<2, and cupy dtype = xp.bool_ if hasattr(xp, "bool_") else xp.bool return full_like(data, dtype=dtype, fill_value=False) + # at this point, array should have dtype=object + elif isinstance(data, np.ndarray) or is_extension_array_dtype(data): + return pandas_isnull(data) else: - # at this point, array should have dtype=object - if isinstance(data, np.ndarray) or is_extension_array_dtype(data): - return pandas_isnull(data) - else: - # Not reachable yet, but intended for use with other duck array - # types. For full consistency with pandas, we should accept None as - # a null value as well as NaN, but it isn't clear how to do this - # with duck typing. - return data != data + # Not reachable yet, but intended for use with other duck array + # types. For full consistency with pandas, we should accept None as + # a null value as well as NaN, but it isn't clear how to do this + # with duck typing. + return data != data def notnull(data): diff --git a/xarray/core/indexes.py b/xarray/core/indexes.py index b9abf4fef2d..0286231940c 100644 --- a/xarray/core/indexes.py +++ b/xarray/core/indexes.py @@ -850,23 +850,18 @@ def sel( "'tolerance' is not supported when indexing using a CategoricalIndex." ) indexer = self.index.get_loc(label_value) + elif method is not None: + indexer = get_indexer_nd(self.index, label_array, method, tolerance) + if np.any(indexer < 0): + raise KeyError(f"not all values found in index {coord_name!r}") else: - if method is not None: - indexer = get_indexer_nd( - self.index, label_array, method, tolerance - ) - if np.any(indexer < 0): - raise KeyError( - f"not all values found in index {coord_name!r}" - ) - else: - try: - indexer = self.index.get_loc(label_value) - except KeyError as e: - raise KeyError( - f"not all values found in index {coord_name!r}. " - "Try setting the `method` keyword argument (example: method='nearest')." - ) from e + try: + indexer = self.index.get_loc(label_value) + except KeyError as e: + raise KeyError( + f"not all values found in index {coord_name!r}. " + "Try setting the `method` keyword argument (example: method='nearest')." + ) from e elif label_array.dtype.kind == "b": indexer = label_array diff --git a/xarray/core/resample_cftime.py b/xarray/core/resample_cftime.py index 0f509a7b1f9..210cea2c76a 100644 --- a/xarray/core/resample_cftime.py +++ b/xarray/core/resample_cftime.py @@ -93,31 +93,30 @@ def __init__( self.label = "right" else: self.label = label + # The backward resample sets ``closed`` to ``'right'`` by default + # since the last value should be considered as the edge point for + # the last bin. When origin in "end" or "end_day", the value for a + # specific ``cftime.datetime`` index stands for the resample result + # from the current ``cftime.datetime`` minus ``freq`` to the current + # ``cftime.datetime`` with a right close. + elif self.origin in ["end", "end_day"]: + if closed is None: + self.closed = "right" + else: + self.closed = closed + if label is None: + self.label = "right" + else: + self.label = label else: - # The backward resample sets ``closed`` to ``'right'`` by default - # since the last value should be considered as the edge point for - # the last bin. When origin in "end" or "end_day", the value for a - # specific ``cftime.datetime`` index stands for the resample result - # from the current ``cftime.datetime`` minus ``freq`` to the current - # ``cftime.datetime`` with a right close. - if self.origin in ["end", "end_day"]: - if closed is None: - self.closed = "right" - else: - self.closed = closed - if label is None: - self.label = "right" - else: - self.label = label + if closed is None: + self.closed = "left" + else: + self.closed = closed + if label is None: + self.label = "left" else: - if closed is None: - self.closed = "left" - else: - self.closed = closed - if label is None: - self.label = "left" - else: - self.label = label + self.label = label if offset is not None: try: diff --git a/xarray/core/treenode.py b/xarray/core/treenode.py index 0d97bd036ab..c5d910994b6 100644 --- a/xarray/core/treenode.py +++ b/xarray/core/treenode.py @@ -559,11 +559,10 @@ def _get_item(self: Tree, path: str | NodePath) -> Tree | T_DataArray: current_node = current_node.parent elif part in ("", "."): pass + elif current_node.get(part) is None: + raise KeyError(f"Could not find node at {path}") else: - if current_node.get(part) is None: - raise KeyError(f"Could not find node at {path}") - else: - current_node = current_node.get(part) + current_node = current_node.get(part) return current_node def _set(self: Tree, key: str, val: Tree) -> None: @@ -631,16 +630,15 @@ def _set_item( current_node = current_node.parent elif part in ("", "."): pass + elif part in current_node.children: + current_node = current_node.children[part] + elif new_nodes_along_path: + # Want child classes (i.e. DataTree) to populate tree with their own types + new_node = type(self)() + current_node._set(part, new_node) + current_node = current_node.children[part] else: - if part in current_node.children: - current_node = current_node.children[part] - elif new_nodes_along_path: - # Want child classes (i.e. DataTree) to populate tree with their own types - new_node = type(self)() - current_node._set(part, new_node) - current_node = current_node.children[part] - else: - raise KeyError(f"Could not reach node at path {path}") + raise KeyError(f"Could not reach node at path {path}") if name in current_node.children: # Deal with anything already existing at this location diff --git a/xarray/tests/test_backends.py b/xarray/tests/test_backends.py index 6a14142a7ff..238ff949bf4 100644 --- a/xarray/tests/test_backends.py +++ b/xarray/tests/test_backends.py @@ -851,15 +851,14 @@ def find_and_validate_array(obj): if hasattr(obj, "array"): if isinstance(obj.array, indexing.ExplicitlyIndexed): find_and_validate_array(obj.array) + elif isinstance(obj.array, np.ndarray): + assert isinstance(obj, indexing.NumpyIndexingAdapter) + elif isinstance(obj.array, dask_array_type): + assert isinstance(obj, indexing.DaskIndexingAdapter) + elif isinstance(obj.array, pd.Index): + assert isinstance(obj, indexing.PandasIndexingAdapter) else: - if isinstance(obj.array, np.ndarray): - assert isinstance(obj, indexing.NumpyIndexingAdapter) - elif isinstance(obj.array, dask_array_type): - assert isinstance(obj, indexing.DaskIndexingAdapter) - elif isinstance(obj.array, pd.Index): - assert isinstance(obj, indexing.PandasIndexingAdapter) - else: - raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}") + raise TypeError(f"{type(obj.array)} is wrapped by {type(obj)}") for v in ds.variables.values(): find_and_validate_array(v._data) @@ -1198,7 +1197,7 @@ def test_coordinate_variables_after_iris_roundtrip(self) -> None: def test_coordinates_encoding(self) -> None: def equals_latlon(obj): - return obj == "lat lon" or obj == "lon lat" + return obj in {"lat lon", "lon lat"} original = Dataset( {"temp": ("x", [0, 1]), "precip": ("x", [0, -1])}, @@ -3817,20 +3816,19 @@ def assert_expected_files(expected: list[str], store: str) -> None: # that was performed by the roundtrip_dir if (write_empty is False) or (write_empty is None and has_zarr_v3): expected.append("1.1.0") + elif not has_zarr_v3: + # TODO: remove zarr3 if once zarr issue is fixed + # https://github.com/zarr-developers/zarr-python/issues/2931 + expected.extend( + [ + "1.1.0", + "1.0.0", + "1.0.1", + "1.1.1", + ] + ) else: - if not has_zarr_v3: - # TODO: remove zarr3 if once zarr issue is fixed - # https://github.com/zarr-developers/zarr-python/issues/2931 - expected.extend( - [ - "1.1.0", - "1.0.0", - "1.0.1", - "1.1.1", - ] - ) - else: - expected.append("1.1.0") + expected.append("1.1.0") if zarr_format_3: expected = [e.replace(".", "/") for e in expected] assert_expected_files(expected, store) diff --git a/xarray/tests/test_cftime_offsets.py b/xarray/tests/test_cftime_offsets.py index abec4c62080..75269286007 100644 --- a/xarray/tests/test_cftime_offsets.py +++ b/xarray/tests/test_cftime_offsets.py @@ -313,18 +313,16 @@ def test_to_offset_quarter(month_label, month_int, multiple, offset_str): elif multiple: if month_int: expected = offset_type(n=multiple) - else: - if offset_type == QuarterBegin: - expected = offset_type(n=multiple, month=1) - elif offset_type == QuarterEnd: - expected = offset_type(n=multiple, month=12) + elif offset_type == QuarterBegin: + expected = offset_type(n=multiple, month=1) + elif offset_type == QuarterEnd: + expected = offset_type(n=multiple, month=12) elif month_int: expected = offset_type(month=month_int) - else: - if offset_type == QuarterBegin: - expected = offset_type(month=1) - elif offset_type == QuarterEnd: - expected = offset_type(month=12) + elif offset_type == QuarterBegin: + expected = offset_type(month=1) + elif offset_type == QuarterEnd: + expected = offset_type(month=12) assert result == expected diff --git a/xarray/tests/test_combine.py b/xarray/tests/test_combine.py index 80a795c4c52..40d63ed6981 100644 --- a/xarray/tests/test_combine.py +++ b/xarray/tests/test_combine.py @@ -556,11 +556,11 @@ def test_auto_combine_2d_combine_attrs_kwarg(self): datasets, concat_dim=["dim1", "dim2"], combine_attrs="identical" ) - for combine_attrs in expected_dict: + for combine_attrs, expected in expected_dict.items(): result = combine_nested( datasets, concat_dim=["dim1", "dim2"], combine_attrs=combine_attrs ) - assert_identical(result, expected_dict[combine_attrs]) + assert_identical(result, expected) def test_combine_nested_missing_data_new_dim(self): # Your data includes "time" and "station" dimensions, and each year's diff --git a/xarray/tests/test_computation.py b/xarray/tests/test_computation.py index 2bf6205acc1..91a380e840f 100644 --- a/xarray/tests/test_computation.py +++ b/xarray/tests/test_computation.py @@ -1852,7 +1852,6 @@ def test_equally_weighted_cov_corr() -> None: coords={"time": pd.date_range("2000-01-01", freq="1D", periods=21)}, dims=("a", "time", "x"), ) - # assert_allclose( xr.cov(da, db, weights=None), xr.cov(da, db, weights=xr.DataArray(1)) ) diff --git a/xarray/tests/test_concat.py b/xarray/tests/test_concat.py index 49c6490d819..97a6090604c 100644 --- a/xarray/tests/test_concat.py +++ b/xarray/tests/test_concat.py @@ -705,9 +705,9 @@ def test_concat_join_kwarg(self) -> None: with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") - for join in expected: + for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") - assert_equal(actual, expected[join]) + assert_equal(actual, expected_item) # regression test for #3681 actual = concat( @@ -1217,9 +1217,9 @@ def test_concat_join_kwarg(self) -> None: with pytest.raises(ValueError, match=r"cannot align.*exact.*dimensions.*'y'"): actual = concat([ds1, ds2], join="exact", dim="x") - for join in expected: + for join, expected_item in expected.items(): actual = concat([ds1, ds2], join=join, dim="x") - assert_equal(actual, expected[join].to_dataarray()) + assert_equal(actual, expected_item.to_dataarray()) def test_concat_combine_attrs_kwarg(self) -> None: da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42}) @@ -1241,9 +1241,9 @@ def test_concat_combine_attrs_kwarg(self) -> None: da3.attrs["b"] = 44 actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts") - for combine_attrs in expected: + for combine_attrs, expected_item in expected.items(): actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs) - assert_identical(actual, expected[combine_attrs]) + assert_identical(actual, expected_item) @pytest.mark.parametrize("dtype", [str, bytes]) @pytest.mark.parametrize("dim", ["x1", "x2"]) diff --git a/xarray/tests/test_dataarray.py b/xarray/tests/test_dataarray.py index e7acdcdd4f3..fe3f9285b71 100644 --- a/xarray/tests/test_dataarray.py +++ b/xarray/tests/test_dataarray.py @@ -3925,7 +3925,7 @@ def test__title_for_slice(self) -> None: assert "" == array._title_for_slice() assert "c = 0" == array.isel(c=0)._title_for_slice() title = array.isel(b=1, c=0)._title_for_slice() - assert "b = 1, c = 0" == title or "c = 0, b = 1" == title + assert title in {"b = 1, c = 0", "c = 0, b = 1"} a2 = DataArray(np.ones((4, 1)), dims=["a", "b"]) assert "" == a2._title_for_slice() diff --git a/xarray/tests/test_distributed.py b/xarray/tests/test_distributed.py index 482141832e4..9ae83bc2664 100644 --- a/xarray/tests/test_distributed.py +++ b/xarray/tests/test_distributed.py @@ -21,14 +21,14 @@ from dask.distributed import Client, Lock from distributed.client import futures_of -from distributed.utils_test import ( # noqa: F401 - cleanup, - client, +from distributed.utils_test import ( + cleanup, # noqa: F401 + client, # noqa: F401 cluster, - cluster_fixture, + cluster_fixture, # noqa: F401 gen_cluster, - loop, - loop_in_thread, + loop, # noqa: F401 + loop_in_thread, # noqa: F401 ) import xarray as xr @@ -49,9 +49,6 @@ ) from xarray.tests.test_dataset import create_test_data -loop = loop # loop is an imported fixture, which flake8 has issues ack-ing -client = client # client is an imported fixture, which flake8 has issues ack-ing - @pytest.fixture def tmp_netcdf_filename(tmpdir): @@ -89,7 +86,10 @@ def tmp_netcdf_filename(tmpdir): @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_netcdf_roundtrip( - loop, tmp_netcdf_filename, engine, nc_format + loop, # noqa: F811 + tmp_netcdf_filename, + engine, + nc_format, ): if engine not in ENGINES: pytest.skip("engine not available") @@ -119,7 +119,8 @@ def test_dask_distributed_netcdf_roundtrip( @requires_netCDF4 def test_dask_distributed_write_netcdf_with_dimensionless_variables( - loop, tmp_netcdf_filename + loop, # noqa: F811 + tmp_netcdf_filename, ): with cluster() as (s, [a, b]): with Client(s["address"], loop=loop): @@ -199,7 +200,10 @@ def test_open_mfdataset_multiple_files_parallel(parallel, tmp_path): @pytest.mark.parametrize("engine,nc_format", ENGINES_AND_FORMATS) def test_dask_distributed_read_netcdf_integration_test( - loop, tmp_netcdf_filename, engine, nc_format + loop, # noqa: F811 + tmp_netcdf_filename, + engine, + nc_format, ): if engine not in ENGINES: pytest.skip("engine not available") @@ -223,7 +227,7 @@ def test_dask_distributed_read_netcdf_integration_test( # heads-up, this is using quite private zarr API # https://github.com/dask/dask/blob/e04734b4d8959ba259801f2e2a490cb4ee8d891f/dask/tests/test_distributed.py#L338-L358 @pytest.fixture -def zarr(client): +def zarr(client): # noqa: F811 zarr_lib = pytest.importorskip("zarr") # Zarr-Python 3 lazily allocates a dedicated thread/IO loop # for to execute async tasks. To avoid having this thread @@ -248,7 +252,7 @@ def zarr(client): @pytest.mark.parametrize("consolidated", [True, False]) @pytest.mark.parametrize("compute", [True, False]) def test_dask_distributed_zarr_integration_test( - client, + client, # noqa: F811 zarr, consolidated: bool, compute: bool, diff --git a/xarray/tests/test_plot.py b/xarray/tests/test_plot.py index 8cb8a435a95..3c7d83d2825 100644 --- a/xarray/tests/test_plot.py +++ b/xarray/tests/test_plot.py @@ -1527,7 +1527,7 @@ def test_default_title(self) -> None: a.coords["d"] = "foo" self.plotfunc(a.isel(c=1)) title = plt.gca().get_title() - assert "c = 1, d = foo" == title or "d = foo, c = 1" == title + assert title in {"c = 1, d = foo", "d = foo, c = 1"} def test_colorbar_default_label(self) -> None: self.plotmethod(add_colorbar=True) diff --git a/xarray/tests/test_sparse.py b/xarray/tests/test_sparse.py index aebca9b24bc..f33a906947e 100644 --- a/xarray/tests/test_sparse.py +++ b/xarray/tests/test_sparse.py @@ -236,15 +236,14 @@ def test_variable_method(func, sparse_output): if sparse_output: assert isinstance(ret_s.data, sparse.SparseArray) assert np.allclose(ret_s.data.todense(), ret_d.data, equal_nan=True) + elif func.meth != "to_dict": + assert np.allclose(ret_s, ret_d) else: - if func.meth != "to_dict": - assert np.allclose(ret_s, ret_d) - else: - # pop the arrays from the dict - arr_s, arr_d = ret_s.pop("data"), ret_d.pop("data") + # pop the arrays from the dict + arr_s, arr_d = ret_s.pop("data"), ret_d.pop("data") - assert np.allclose(arr_s, arr_d) - assert ret_s == ret_d + assert np.allclose(arr_s, arr_d) + assert ret_s == ret_d @pytest.mark.parametrize(