Skip to content

STY: Use ruff instead of black for formatting #56704

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Jan 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 3 additions & 5 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,6 @@ ci:
# manual stage hooks
skip: [pylint, pyright, mypy]
repos:
- repo: https://github.com/hauntsaninja/black-pre-commit-mirror
# black compiled with mypyc
rev: 23.11.0
hooks:
- id: black
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.1.6
hooks:
Expand All @@ -35,6 +30,9 @@ repos:
files: ^pandas
exclude: ^pandas/tests
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
- id: ruff-format
# TODO: "." not needed in ruff 0.1.8
args: ["."]
- repo: https://github.com/jendrikseipp/vulture
rev: 'v2.10'
hooks:
Expand Down
4 changes: 1 addition & 3 deletions asv_bench/benchmarks/indexing.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,7 @@ def time_loc_slice(self, index, index_structure):

class NumericMaskedIndexing:
monotonic_list = list(range(10**6))
non_monotonic_list = (
list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))
)
non_monotonic_list = list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))

params = [
("Int64", "UInt64", "Float64"),
Expand Down
3 changes: 2 additions & 1 deletion asv_bench/benchmarks/io/style.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,8 @@ def _style_format(self):
# apply a formatting function
# subset is flexible but hinders vectorised solutions
self.st = self.df.style.format(
"{:,.3f}", subset=IndexSlice["row_1":f"row_{ir}", "float_1":f"float_{ic}"]
"{:,.3f}",
subset=IndexSlice["row_1" : f"row_{ir}", "float_1" : f"float_{ic}"],
)

def _style_apply_format_hide(self):
Expand Down
2 changes: 1 addition & 1 deletion doc/source/development/contributing_codebase.rst
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ Pre-commit
----------

Additionally, :ref:`Continuous Integration <contributing.ci>` will run code formatting checks
like ``black``, ``ruff``,
like ``ruff``,
``isort``, and ``clang-format`` and more using `pre-commit hooks <https://pre-commit.com/>`_.
Any warnings from these checks will cause the :ref:`Continuous Integration <contributing.ci>` to fail; therefore,
it is helpful to run the check yourself before submitting code. This
Expand Down
7 changes: 5 additions & 2 deletions pandas/_libs/hashtable.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -196,15 +196,18 @@ class HashTable:
*,
return_inverse: Literal[True],
mask: None = ...,
) -> tuple[np.ndarray, npt.NDArray[np.intp],]: ... # np.ndarray[subclass-specific]
) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
@overload
def unique(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
*,
return_inverse: Literal[False] = ...,
mask: npt.NDArray[np.bool_],
) -> tuple[np.ndarray, npt.NDArray[np.bool_],]: ... # np.ndarray[subclass-specific]
) -> tuple[
np.ndarray,
npt.NDArray[np.bool_],
]: ... # np.ndarray[subclass-specific]
def factorize(
self,
values: np.ndarray, # np.ndarray[subclass-specific]
Expand Down
6 changes: 4 additions & 2 deletions pandas/_libs/lib.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,8 @@ def indices_fast(
sorted_labels: list[npt.NDArray[np.int64]],
) -> dict[Hashable, npt.NDArray[np.intp]]: ...
def generate_slices(
labels: np.ndarray, ngroups: int # const intp_t[:]
labels: np.ndarray,
ngroups: int, # const intp_t[:]
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
def count_level_2d(
mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
Expand Down Expand Up @@ -209,5 +210,6 @@ def get_reverse_indexer(
def is_bool_list(obj: list) -> bool: ...
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
def is_range_indexer(
left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
left: np.ndarray,
n: int, # np.ndarray[np.int64, ndim=1]
) -> bool: ...
8 changes: 2 additions & 6 deletions pandas/_testing/_hypothesis.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,12 +54,8 @@
DATETIME_NO_TZ = st.datetimes()

DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
min_value=pd.Timestamp(
1900, 1, 1
).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
max_value=pd.Timestamp(
1900, 1, 1
).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
)

Expand Down
3 changes: 2 additions & 1 deletion pandas/core/apply.py
Original file line number Diff line number Diff line change
Expand Up @@ -1010,7 +1010,8 @@ def wrapper(*args, **kwargs):
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
# list[Callable[..., Any] | str]]"; expected "Hashable"
nb_looper = generate_apply_looper(
self.func, **engine_kwargs # type: ignore[arg-type]
self.func, # type: ignore[arg-type]
**engine_kwargs,
)
result = nb_looper(self.values, self.axis)
# If we made the result 2-D, squeeze it back to 1-D
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arrays/_arrow_string_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,8 @@ def _str_get(self, i: int) -> Self:
self._pa_array, start=start, stop=stop, step=step
)
null_value = pa.scalar(
None, type=self._pa_array.type # type: ignore[attr-defined]
None,
type=self._pa_array.type, # type: ignore[attr-defined]
)
result = pc.if_else(not_out_of_bounds, selected, null_value)
return type(self)(result)
Expand Down
4 changes: 3 additions & 1 deletion pandas/core/arrays/_mixins.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,9 @@ def fillna(
# error: Argument 2 to "check_value_size" has incompatible type
# "ExtensionArray"; expected "ndarray"
value = missing.check_value_size(
value, mask, len(self) # type: ignore[arg-type]
value,
mask, # type: ignore[arg-type]
len(self),
)

if mask.any():
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arrays/arrow/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -2855,7 +2855,8 @@ def _dt_tz_localize(
"shift_backward": "earliest",
"shift_forward": "latest",
}.get(
nonexistent, None # type: ignore[arg-type]
nonexistent, # type: ignore[arg-type]
None,
)
if nonexistent_pa is None:
raise NotImplementedError(f"{nonexistent=} is not supported")
Expand Down
8 changes: 4 additions & 4 deletions pandas/core/arrays/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1121,7 +1121,9 @@ def fillna(
# error: Argument 2 to "check_value_size" has incompatible type
# "ExtensionArray"; expected "ndarray"
value = missing.check_value_size(
value, mask, len(self) # type: ignore[arg-type]
value,
mask, # type: ignore[arg-type]
len(self),
)

if mask.any():
Expand Down Expand Up @@ -1490,9 +1492,7 @@ def factorize(
uniques_ea = self._from_factorized(uniques, self)
return codes, uniques_ea

_extension_array_shared_docs[
"repeat"
] = """
_extension_array_shared_docs["repeat"] = """
Repeat elements of a %(klass)s.

Returns a new %(klass)s where each element of the current %(klass)s
Expand Down
12 changes: 9 additions & 3 deletions pandas/core/arrays/datetimelike.py
Original file line number Diff line number Diff line change
Expand Up @@ -1236,7 +1236,9 @@ def _add_timedeltalike(self, other: Timedelta | TimedeltaArray) -> Self:

# error: Unexpected keyword argument "freq" for "_simple_new" of "NDArrayBacked"
return type(self)._simple_new(
res_values, dtype=self.dtype, freq=new_freq # type: ignore[call-arg]
res_values,
dtype=self.dtype,
freq=new_freq, # type: ignore[call-arg]
)

@final
Expand All @@ -1256,7 +1258,9 @@ def _add_nat(self) -> Self:
result = result.view(self._ndarray.dtype) # preserve reso
# error: Unexpected keyword argument "freq" for "_simple_new" of "NDArrayBacked"
return type(self)._simple_new(
result, dtype=self.dtype, freq=None # type: ignore[call-arg]
result,
dtype=self.dtype,
freq=None, # type: ignore[call-arg]
)

@final
Expand Down Expand Up @@ -2162,7 +2166,9 @@ def as_unit(self, unit: str, round_ok: bool = True) -> Self:
# error: Unexpected keyword argument "freq" for "_simple_new" of
# "NDArrayBacked" [call-arg]
return type(self)._simple_new(
new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
new_values,
dtype=new_dtype,
freq=self.freq, # type: ignore[call-arg]
)

# TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta
Expand Down
8 changes: 2 additions & 6 deletions pandas/core/arrays/interval.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,9 +122,7 @@
}


_interval_shared_docs[
"class"
] = """
_interval_shared_docs["class"] = """
%(summary)s

Parameters
Expand Down Expand Up @@ -1489,9 +1487,7 @@ def set_closed(self, closed: IntervalClosedType) -> Self:
dtype = IntervalDtype(left.dtype, closed=closed)
return self._simple_new(left, right, dtype=dtype)

_interval_shared_docs[
"is_non_overlapping_monotonic"
] = """
_interval_shared_docs["is_non_overlapping_monotonic"] = """
Return a boolean whether the %(klass)s is non-overlapping and monotonic.

Non-overlapping means (no Intervals share points), and monotonic means
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arrays/masked.py
Original file line number Diff line number Diff line change
Expand Up @@ -1089,7 +1089,8 @@ def value_counts(self, dropna: bool = True) -> Series:
arr = IntegerArray(value_counts, mask)
index = Index(
self.dtype.construct_array_type()(
keys, mask_index # type: ignore[arg-type]
keys, # type: ignore[arg-type]
mask_index,
)
)
return Series(arr, index=index, name="count", copy=False)
Expand Down
3 changes: 2 additions & 1 deletion pandas/core/arrays/sparse/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,7 +454,8 @@ def __init__(
# error: Argument "dtype" to "asarray" has incompatible type
# "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
sparse_values = np.asarray(
data.sp_values, dtype=dtype # type: ignore[arg-type]
data.sp_values,
dtype=dtype, # type: ignore[arg-type]
)
elif sparse_index is None:
data = extract_array(data, extract_numpy=True)
Expand Down
4 changes: 1 addition & 3 deletions pandas/core/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -1207,9 +1207,7 @@ def factorize(
uniques = Index(uniques)
return codes, uniques

_shared_docs[
"searchsorted"
] = """
_shared_docs["searchsorted"] = """
Find indices where elements should be inserted to maintain order.

Find the indices into a sorted {klass} `self` such that, if the
Expand Down
3 changes: 1 addition & 2 deletions pandas/core/computation/expr.py
Original file line number Diff line number Diff line change
Expand Up @@ -695,8 +695,7 @@ def visit_Call(self, node, side=None, **kwargs):
if not isinstance(key, ast.keyword):
# error: "expr" has no attribute "id"
raise ValueError(
"keyword error in function call "
f"'{node.func.id}'" # type: ignore[attr-defined]
"keyword error in function call " f"'{node.func.id}'" # type: ignore[attr-defined]
)

if key.arg:
Expand Down
4 changes: 3 additions & 1 deletion pandas/core/dtypes/cast.py
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,9 @@ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
# error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
# "Type[Any]"; expected "Hashable" [arg-type]
dtype, fill_value = _maybe_promote_cached(
dtype, fill_value, type(fill_value) # type: ignore[arg-type]
dtype,
fill_value,
type(fill_value), # type: ignore[arg-type]
)
except TypeError:
# if fill_value is not hashable (required for caching)
Expand Down
17 changes: 5 additions & 12 deletions pandas/core/frame.py
Original file line number Diff line number Diff line change
Expand Up @@ -1405,9 +1405,7 @@ def style(self) -> Styler:

return Styler(self)

_shared_docs[
"items"
] = r"""
_shared_docs["items"] = r"""
Iterate over (column name, Series) pairs.

Iterates over the DataFrame columns, returning a tuple with
Expand Down Expand Up @@ -2030,8 +2028,7 @@ def to_dict(
orient: Literal[
"dict", "list", "series", "split", "tight", "records", "index"
] = "dict",
into: type[MutableMappingT]
| MutableMappingT = dict, # type: ignore[assignment]
into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment]
index: bool = True,
) -> MutableMappingT | list[MutableMappingT]:
"""
Expand Down Expand Up @@ -9137,9 +9134,7 @@ def groupby(
dropna=dropna,
)

_shared_docs[
"pivot"
] = """
_shared_docs["pivot"] = """
Return reshaped DataFrame organized by given index / column values.

Reshape data (produce a "pivot" table) based on column values. Uses
Expand Down Expand Up @@ -9283,9 +9278,7 @@ def pivot(

return pivot(self, index=index, columns=columns, values=values)

_shared_docs[
"pivot_table"
] = """
_shared_docs["pivot_table"] = """
Create a spreadsheet-style pivot table as a DataFrame.

The levels in the pivot table will be stored in MultiIndex objects
Expand Down Expand Up @@ -12529,7 +12522,7 @@ def _to_dict_of_blocks(self):
mgr = cast(BlockManager, mgr_to_mgr(mgr, "block"))
return {
k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self)
for k, v, in mgr.to_dict().items()
for k, v in mgr.to_dict().items()
}

@property
Expand Down
27 changes: 13 additions & 14 deletions pandas/core/generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -8006,8 +8006,6 @@ def replace(
if items:
keys, values = zip(*items)
else:
# error: Incompatible types in assignment (expression has type
# "list[Never]", variable has type "tuple[Any, ...]")
keys, values = ([], []) # type: ignore[assignment]

are_mappings = [is_dict_like(v) for v in values]
Expand Down Expand Up @@ -8825,15 +8823,11 @@ def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):

if lower is not None:
cond = mask | (self >= lower)
result = result.where(
cond, lower, inplace=inplace
) # type: ignore[assignment]
result = result.where(cond, lower, inplace=inplace) # type: ignore[assignment]
if upper is not None:
cond = mask | (self <= upper)
result = self if inplace else result
result = result.where(
cond, upper, inplace=inplace
) # type: ignore[assignment]
result = result.where(cond, upper, inplace=inplace) # type: ignore[assignment]

return result

Expand Down Expand Up @@ -12242,7 +12236,12 @@ def _accum_func(

if axis == 1:
return self.T._accum_func(
name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026
name,
func,
axis=0,
skipna=skipna,
*args, # noqa: B026
**kwargs,
).T

def block_accum_func(blk_values):
Expand Down Expand Up @@ -12720,14 +12719,16 @@ def __imul__(self, other) -> Self:
def __itruediv__(self, other) -> Self:
# error: Unsupported left operand type for / ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__truediv__ # type: ignore[operator]
other,
type(self).__truediv__, # type: ignore[operator]
)

@final
def __ifloordiv__(self, other) -> Self:
# error: Unsupported left operand type for // ("Type[NDFrame]")
return self._inplace_method(
other, type(self).__floordiv__ # type: ignore[operator]
other,
type(self).__floordiv__, # type: ignore[operator]
)

@final
Expand Down Expand Up @@ -13495,9 +13496,7 @@ def last_valid_index(self) -> Hashable | None:
Series([], dtype: bool)
"""

_shared_docs[
"stat_func_example"
] = """
_shared_docs["stat_func_example"] = """

Examples
--------
Expand Down
Loading