Skip to content

Commit 2522b0a

Browse files
authored
STY: Use ruff instead of black for formatting (#56704)
* STY: Use ruff instead of black for formatting * mypy * move pylint * Remove trailing comma:
1 parent c4b6bed commit 2522b0a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

71 files changed

+235
-339
lines changed

.pre-commit-config.yaml

+3-5
Original file line numberDiff line numberDiff line change
@@ -18,11 +18,6 @@ ci:
1818
# manual stage hooks
1919
skip: [pylint, pyright, mypy]
2020
repos:
21-
- repo: https://github.com/hauntsaninja/black-pre-commit-mirror
22-
# black compiled with mypyc
23-
rev: 23.11.0
24-
hooks:
25-
- id: black
2621
- repo: https://github.com/astral-sh/ruff-pre-commit
2722
rev: v0.1.6
2823
hooks:
@@ -35,6 +30,9 @@ repos:
3530
files: ^pandas
3631
exclude: ^pandas/tests
3732
args: [--select, "ANN001,ANN2", --fix-only, --exit-non-zero-on-fix]
33+
- id: ruff-format
34+
# TODO: "." not needed in ruff 0.1.8
35+
args: ["."]
3836
- repo: https://github.com/jendrikseipp/vulture
3937
rev: 'v2.10'
4038
hooks:

asv_bench/benchmarks/indexing.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,7 @@ def time_loc_slice(self, index, index_structure):
8484

8585
class NumericMaskedIndexing:
8686
monotonic_list = list(range(10**6))
87-
non_monotonic_list = (
88-
list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))
89-
)
87+
non_monotonic_list = list(range(50)) + [54, 53, 52, 51] + list(range(55, 10**6 - 1))
9088

9189
params = [
9290
("Int64", "UInt64", "Float64"),

asv_bench/benchmarks/io/style.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,8 @@ def _style_format(self):
7676
# apply a formatting function
7777
# subset is flexible but hinders vectorised solutions
7878
self.st = self.df.style.format(
79-
"{:,.3f}", subset=IndexSlice["row_1":f"row_{ir}", "float_1":f"float_{ic}"]
79+
"{:,.3f}",
80+
subset=IndexSlice["row_1" : f"row_{ir}", "float_1" : f"float_{ic}"],
8081
)
8182

8283
def _style_apply_format_hide(self):

doc/source/development/contributing_codebase.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@ Pre-commit
3838
----------
3939

4040
Additionally, :ref:`Continuous Integration <contributing.ci>` will run code formatting checks
41-
like ``black``, ``ruff``,
41+
like ``ruff``,
4242
``isort``, and ``clang-format`` and more using `pre-commit hooks <https://pre-commit.com/>`_.
4343
Any warnings from these checks will cause the :ref:`Continuous Integration <contributing.ci>` to fail; therefore,
4444
it is helpful to run the check yourself before submitting code. This

pandas/_libs/hashtable.pyi

+5-2
Original file line numberDiff line numberDiff line change
@@ -196,15 +196,18 @@ class HashTable:
196196
*,
197197
return_inverse: Literal[True],
198198
mask: None = ...,
199-
) -> tuple[np.ndarray, npt.NDArray[np.intp],]: ... # np.ndarray[subclass-specific]
199+
) -> tuple[np.ndarray, npt.NDArray[np.intp]]: ... # np.ndarray[subclass-specific]
200200
@overload
201201
def unique(
202202
self,
203203
values: np.ndarray, # np.ndarray[subclass-specific]
204204
*,
205205
return_inverse: Literal[False] = ...,
206206
mask: npt.NDArray[np.bool_],
207-
) -> tuple[np.ndarray, npt.NDArray[np.bool_],]: ... # np.ndarray[subclass-specific]
207+
) -> tuple[
208+
np.ndarray,
209+
npt.NDArray[np.bool_],
210+
]: ... # np.ndarray[subclass-specific]
208211
def factorize(
209212
self,
210213
values: np.ndarray, # np.ndarray[subclass-specific]

pandas/_libs/lib.pyi

+4-2
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,8 @@ def indices_fast(
179179
sorted_labels: list[npt.NDArray[np.int64]],
180180
) -> dict[Hashable, npt.NDArray[np.intp]]: ...
181181
def generate_slices(
182-
labels: np.ndarray, ngroups: int # const intp_t[:]
182+
labels: np.ndarray,
183+
ngroups: int, # const intp_t[:]
183184
) -> tuple[npt.NDArray[np.int64], npt.NDArray[np.int64]]: ...
184185
def count_level_2d(
185186
mask: np.ndarray, # ndarray[uint8_t, ndim=2, cast=True],
@@ -209,5 +210,6 @@ def get_reverse_indexer(
209210
def is_bool_list(obj: list) -> bool: ...
210211
def dtypes_all_equal(types: list[DtypeObj]) -> bool: ...
211212
def is_range_indexer(
212-
left: np.ndarray, n: int # np.ndarray[np.int64, ndim=1]
213+
left: np.ndarray,
214+
n: int, # np.ndarray[np.int64, ndim=1]
213215
) -> bool: ...

pandas/_testing/_hypothesis.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -54,12 +54,8 @@
5454
DATETIME_NO_TZ = st.datetimes()
5555

5656
DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes(
57-
min_value=pd.Timestamp(
58-
1900, 1, 1
59-
).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
60-
max_value=pd.Timestamp(
61-
1900, 1, 1
62-
).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
57+
min_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
58+
max_value=pd.Timestamp(1900, 1, 1).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues]
6359
timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()),
6460
)
6561

pandas/core/apply.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1010,7 +1010,8 @@ def wrapper(*args, **kwargs):
10101010
# [..., Any] | str] | dict[Hashable,Callable[..., Any] | str |
10111011
# list[Callable[..., Any] | str]]"; expected "Hashable"
10121012
nb_looper = generate_apply_looper(
1013-
self.func, **engine_kwargs # type: ignore[arg-type]
1013+
self.func, # type: ignore[arg-type]
1014+
**engine_kwargs,
10141015
)
10151016
result = nb_looper(self.values, self.axis)
10161017
# If we made the result 2-D, squeeze it back to 1-D

pandas/core/arrays/_arrow_string_mixins.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,8 @@ def _str_get(self, i: int) -> Self:
5858
self._pa_array, start=start, stop=stop, step=step
5959
)
6060
null_value = pa.scalar(
61-
None, type=self._pa_array.type # type: ignore[attr-defined]
61+
None,
62+
type=self._pa_array.type, # type: ignore[attr-defined]
6263
)
6364
result = pc.if_else(not_out_of_bounds, selected, null_value)
6465
return type(self)(result)

pandas/core/arrays/_mixins.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,9 @@ def fillna(
347347
# error: Argument 2 to "check_value_size" has incompatible type
348348
# "ExtensionArray"; expected "ndarray"
349349
value = missing.check_value_size(
350-
value, mask, len(self) # type: ignore[arg-type]
350+
value,
351+
mask, # type: ignore[arg-type]
352+
len(self),
351353
)
352354

353355
if mask.any():

pandas/core/arrays/arrow/array.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -2855,7 +2855,8 @@ def _dt_tz_localize(
28552855
"shift_backward": "earliest",
28562856
"shift_forward": "latest",
28572857
}.get(
2858-
nonexistent, None # type: ignore[arg-type]
2858+
nonexistent, # type: ignore[arg-type]
2859+
None,
28592860
)
28602861
if nonexistent_pa is None:
28612862
raise NotImplementedError(f"{nonexistent=} is not supported")

pandas/core/arrays/base.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1121,7 +1121,9 @@ def fillna(
11211121
# error: Argument 2 to "check_value_size" has incompatible type
11221122
# "ExtensionArray"; expected "ndarray"
11231123
value = missing.check_value_size(
1124-
value, mask, len(self) # type: ignore[arg-type]
1124+
value,
1125+
mask, # type: ignore[arg-type]
1126+
len(self),
11251127
)
11261128

11271129
if mask.any():
@@ -1490,9 +1492,7 @@ def factorize(
14901492
uniques_ea = self._from_factorized(uniques, self)
14911493
return codes, uniques_ea
14921494

1493-
_extension_array_shared_docs[
1494-
"repeat"
1495-
] = """
1495+
_extension_array_shared_docs["repeat"] = """
14961496
Repeat elements of a %(klass)s.
14971497
14981498
Returns a new %(klass)s where each element of the current %(klass)s

pandas/core/arrays/datetimelike.py

+9-3
Original file line numberDiff line numberDiff line change
@@ -1236,7 +1236,9 @@ def _add_timedeltalike(self, other: Timedelta | TimedeltaArray) -> Self:
12361236

12371237
# error: Unexpected keyword argument "freq" for "_simple_new" of "NDArrayBacked"
12381238
return type(self)._simple_new(
1239-
res_values, dtype=self.dtype, freq=new_freq # type: ignore[call-arg]
1239+
res_values,
1240+
dtype=self.dtype,
1241+
freq=new_freq, # type: ignore[call-arg]
12401242
)
12411243

12421244
@final
@@ -1256,7 +1258,9 @@ def _add_nat(self) -> Self:
12561258
result = result.view(self._ndarray.dtype) # preserve reso
12571259
# error: Unexpected keyword argument "freq" for "_simple_new" of "NDArrayBacked"
12581260
return type(self)._simple_new(
1259-
result, dtype=self.dtype, freq=None # type: ignore[call-arg]
1261+
result,
1262+
dtype=self.dtype,
1263+
freq=None, # type: ignore[call-arg]
12601264
)
12611265

12621266
@final
@@ -2162,7 +2166,9 @@ def as_unit(self, unit: str, round_ok: bool = True) -> Self:
21622166
# error: Unexpected keyword argument "freq" for "_simple_new" of
21632167
# "NDArrayBacked" [call-arg]
21642168
return type(self)._simple_new(
2165-
new_values, dtype=new_dtype, freq=self.freq # type: ignore[call-arg]
2169+
new_values,
2170+
dtype=new_dtype,
2171+
freq=self.freq, # type: ignore[call-arg]
21662172
)
21672173

21682174
# TODO: annotate other as DatetimeArray | TimedeltaArray | Timestamp | Timedelta

pandas/core/arrays/interval.py

+2-6
Original file line numberDiff line numberDiff line change
@@ -122,9 +122,7 @@
122122
}
123123

124124

125-
_interval_shared_docs[
126-
"class"
127-
] = """
125+
_interval_shared_docs["class"] = """
128126
%(summary)s
129127
130128
Parameters
@@ -1489,9 +1487,7 @@ def set_closed(self, closed: IntervalClosedType) -> Self:
14891487
dtype = IntervalDtype(left.dtype, closed=closed)
14901488
return self._simple_new(left, right, dtype=dtype)
14911489

1492-
_interval_shared_docs[
1493-
"is_non_overlapping_monotonic"
1494-
] = """
1490+
_interval_shared_docs["is_non_overlapping_monotonic"] = """
14951491
Return a boolean whether the %(klass)s is non-overlapping and monotonic.
14961492
14971493
Non-overlapping means (no Intervals share points), and monotonic means

pandas/core/arrays/masked.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -1089,7 +1089,8 @@ def value_counts(self, dropna: bool = True) -> Series:
10891089
arr = IntegerArray(value_counts, mask)
10901090
index = Index(
10911091
self.dtype.construct_array_type()(
1092-
keys, mask_index # type: ignore[arg-type]
1092+
keys, # type: ignore[arg-type]
1093+
mask_index,
10931094
)
10941095
)
10951096
return Series(arr, index=index, name="count", copy=False)

pandas/core/arrays/sparse/array.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -454,7 +454,8 @@ def __init__(
454454
# error: Argument "dtype" to "asarray" has incompatible type
455455
# "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
456456
sparse_values = np.asarray(
457-
data.sp_values, dtype=dtype # type: ignore[arg-type]
457+
data.sp_values,
458+
dtype=dtype, # type: ignore[arg-type]
458459
)
459460
elif sparse_index is None:
460461
data = extract_array(data, extract_numpy=True)

pandas/core/base.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -1207,9 +1207,7 @@ def factorize(
12071207
uniques = Index(uniques)
12081208
return codes, uniques
12091209

1210-
_shared_docs[
1211-
"searchsorted"
1212-
] = """
1210+
_shared_docs["searchsorted"] = """
12131211
Find indices where elements should be inserted to maintain order.
12141212
12151213
Find the indices into a sorted {klass} `self` such that, if the

pandas/core/computation/expr.py

+1-2
Original file line numberDiff line numberDiff line change
@@ -695,8 +695,7 @@ def visit_Call(self, node, side=None, **kwargs):
695695
if not isinstance(key, ast.keyword):
696696
# error: "expr" has no attribute "id"
697697
raise ValueError(
698-
"keyword error in function call "
699-
f"'{node.func.id}'" # type: ignore[attr-defined]
698+
"keyword error in function call " f"'{node.func.id}'" # type: ignore[attr-defined]
700699
)
701700

702701
if key.arg:

pandas/core/dtypes/cast.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -589,7 +589,9 @@ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
589589
# error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
590590
# "Type[Any]"; expected "Hashable" [arg-type]
591591
dtype, fill_value = _maybe_promote_cached(
592-
dtype, fill_value, type(fill_value) # type: ignore[arg-type]
592+
dtype,
593+
fill_value,
594+
type(fill_value), # type: ignore[arg-type]
593595
)
594596
except TypeError:
595597
# if fill_value is not hashable (required for caching)

pandas/core/frame.py

+5-12
Original file line numberDiff line numberDiff line change
@@ -1405,9 +1405,7 @@ def style(self) -> Styler:
14051405

14061406
return Styler(self)
14071407

1408-
_shared_docs[
1409-
"items"
1410-
] = r"""
1408+
_shared_docs["items"] = r"""
14111409
Iterate over (column name, Series) pairs.
14121410
14131411
Iterates over the DataFrame columns, returning a tuple with
@@ -2030,8 +2028,7 @@ def to_dict(
20302028
orient: Literal[
20312029
"dict", "list", "series", "split", "tight", "records", "index"
20322030
] = "dict",
2033-
into: type[MutableMappingT]
2034-
| MutableMappingT = dict, # type: ignore[assignment]
2031+
into: type[MutableMappingT] | MutableMappingT = dict, # type: ignore[assignment]
20352032
index: bool = True,
20362033
) -> MutableMappingT | list[MutableMappingT]:
20372034
"""
@@ -9137,9 +9134,7 @@ def groupby(
91379134
dropna=dropna,
91389135
)
91399136

9140-
_shared_docs[
9141-
"pivot"
9142-
] = """
9137+
_shared_docs["pivot"] = """
91439138
Return reshaped DataFrame organized by given index / column values.
91449139
91459140
Reshape data (produce a "pivot" table) based on column values. Uses
@@ -9283,9 +9278,7 @@ def pivot(
92839278

92849279
return pivot(self, index=index, columns=columns, values=values)
92859280

9286-
_shared_docs[
9287-
"pivot_table"
9288-
] = """
9281+
_shared_docs["pivot_table"] = """
92899282
Create a spreadsheet-style pivot table as a DataFrame.
92909283
92919284
The levels in the pivot table will be stored in MultiIndex objects
@@ -12529,7 +12522,7 @@ def _to_dict_of_blocks(self):
1252912522
mgr = cast(BlockManager, mgr_to_mgr(mgr, "block"))
1253012523
return {
1253112524
k: self._constructor_from_mgr(v, axes=v.axes).__finalize__(self)
12532-
for k, v, in mgr.to_dict().items()
12525+
for k, v in mgr.to_dict().items()
1253312526
}
1253412527

1253512528
@property

pandas/core/generic.py

+13-14
Original file line numberDiff line numberDiff line change
@@ -8006,8 +8006,6 @@ def replace(
80068006
if items:
80078007
keys, values = zip(*items)
80088008
else:
8009-
# error: Incompatible types in assignment (expression has type
8010-
# "list[Never]", variable has type "tuple[Any, ...]")
80118009
keys, values = ([], []) # type: ignore[assignment]
80128010

80138011
are_mappings = [is_dict_like(v) for v in values]
@@ -8825,15 +8823,11 @@ def _clip_with_scalar(self, lower, upper, inplace: bool_t = False):
88258823

88268824
if lower is not None:
88278825
cond = mask | (self >= lower)
8828-
result = result.where(
8829-
cond, lower, inplace=inplace
8830-
) # type: ignore[assignment]
8826+
result = result.where(cond, lower, inplace=inplace) # type: ignore[assignment]
88318827
if upper is not None:
88328828
cond = mask | (self <= upper)
88338829
result = self if inplace else result
8834-
result = result.where(
8835-
cond, upper, inplace=inplace
8836-
) # type: ignore[assignment]
8830+
result = result.where(cond, upper, inplace=inplace) # type: ignore[assignment]
88378831

88388832
return result
88398833

@@ -12242,7 +12236,12 @@ def _accum_func(
1224212236

1224312237
if axis == 1:
1224412238
return self.T._accum_func(
12245-
name, func, axis=0, skipna=skipna, *args, **kwargs # noqa: B026
12239+
name,
12240+
func,
12241+
axis=0,
12242+
skipna=skipna,
12243+
*args, # noqa: B026
12244+
**kwargs,
1224612245
).T
1224712246

1224812247
def block_accum_func(blk_values):
@@ -12720,14 +12719,16 @@ def __imul__(self, other) -> Self:
1272012719
def __itruediv__(self, other) -> Self:
1272112720
# error: Unsupported left operand type for / ("Type[NDFrame]")
1272212721
return self._inplace_method(
12723-
other, type(self).__truediv__ # type: ignore[operator]
12722+
other,
12723+
type(self).__truediv__, # type: ignore[operator]
1272412724
)
1272512725

1272612726
@final
1272712727
def __ifloordiv__(self, other) -> Self:
1272812728
# error: Unsupported left operand type for // ("Type[NDFrame]")
1272912729
return self._inplace_method(
12730-
other, type(self).__floordiv__ # type: ignore[operator]
12730+
other,
12731+
type(self).__floordiv__, # type: ignore[operator]
1273112732
)
1273212733

1273312734
@final
@@ -13495,9 +13496,7 @@ def last_valid_index(self) -> Hashable | None:
1349513496
Series([], dtype: bool)
1349613497
"""
1349713498

13498-
_shared_docs[
13499-
"stat_func_example"
13500-
] = """
13499+
_shared_docs["stat_func_example"] = """
1350113500
1350213501
Examples
1350313502
--------

0 commit comments

Comments
 (0)