Skip to content

Commit 6fac28d

Browse files
authored
STYLE: fix pylint unnecessary-comprehension warnings (#49674)
* STYLE: fix pylint unnecessary-comprehension warnings * fixup! STYLE: fix pylint unnecessary-comprehension warnings * fixup! fixup! STYLE: fix pylint unnecessary-comprehension warnings
1 parent f569301 commit 6fac28d

File tree

13 files changed

+15
-15
lines changed

13 files changed

+15
-15
lines changed

asv_bench/benchmarks/reshape.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -112,9 +112,7 @@ def setup(self, dtype):
112112
values = np.take(list(string.ascii_letters), indices)
113113
values = [pd.Categorical(v) for v in values.T]
114114

115-
self.df = DataFrame(
116-
{i: cat for i, cat in enumerate(values)}, index, columns
117-
)
115+
self.df = DataFrame(dict(enumerate(values)), index, columns)
118116

119117
self.df2 = self.df.iloc[:-1]
120118

doc/source/conf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,7 @@
374374

375375

376376
html_context = {
377-
"redirects": {old: new for old, new in moved_api_pages},
377+
"redirects": dict(moved_api_pages),
378378
"header": header,
379379
}
380380

doc/sphinxext/announce.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ def get_authors(revision_range):
8888
pre.discard("Homu")
8989

9090
# Append '+' to new authors.
91-
authors = [s + " +" for s in cur - pre] + [s for s in cur & pre]
91+
authors = [s + " +" for s in cur - pre] + list(cur & pre)
9292
authors.sort()
9393
return authors
9494

pandas/core/computation/expr.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -382,7 +382,7 @@ class BaseExprVisitor(ast.NodeVisitor):
382382

383383
unary_ops = UNARY_OPS_SYMS
384384
unary_op_nodes = "UAdd", "USub", "Invert", "Not"
385-
unary_op_nodes_map = {k: v for k, v in zip(unary_ops, unary_op_nodes)}
385+
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
386386

387387
rewrite_map = {
388388
ast.Eq: ast.In,

pandas/io/xml.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ def _parse_nodes(self, elems: list[Any]) -> list[dict[str, str | None]]:
283283
dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
284284

285285
if self.names:
286-
dicts = [{nm: v for nm, v in zip(self.names, d.values())} for d in dicts]
286+
dicts = [dict(zip(self.names, d.values())) for d in dicts]
287287

288288
return dicts
289289

@@ -380,7 +380,7 @@ def _iterparse_nodes(self, iterparse: Callable) -> list[dict[str, str | None]]:
380380
dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts]
381381

382382
if self.names:
383-
dicts = [{nm: v for nm, v in zip(self.names, d.values())} for d in dicts]
383+
dicts = [dict(zip(self.names, d.values())) for d in dicts]
384384

385385
return dicts
386386

pandas/tests/frame/constructors/test_from_records.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def test_from_records_dictlike(self):
151151
for b in blocks.values():
152152
columns.extend(b.columns)
153153

154-
asdict = {x: y for x, y in df.items()}
154+
asdict = dict(df.items())
155155
asdict2 = {x: y.values for x, y in df.items()}
156156

157157
# dict of series & dict of ndarrays (have dtype info)

pandas/tests/frame/test_constructors.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ def test_constructor_mixed_dtypes(self, typ, ad):
344344

345345
for d, a in zip(dtypes, arrays):
346346
assert a.dtype == d
347-
ad.update({d: a for d, a in zip(dtypes, arrays)})
347+
ad.update(dict(zip(dtypes, arrays)))
348348
df = DataFrame(ad)
349349

350350
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES

pandas/tests/groupby/test_apply.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -930,7 +930,7 @@ def test_apply_index_has_complex_internals(index):
930930
(lambda x: set(x.index.to_list()), [{0, 1}, {2, 3}]),
931931
(lambda x: tuple(x.index.to_list()), [(0, 1), (2, 3)]),
932932
(
933-
lambda x: {n: i for (n, i) in enumerate(x.index.to_list())},
933+
lambda x: dict(enumerate(x.index.to_list())),
934934
[{0: 0, 1: 1}, {0: 2, 1: 3}],
935935
),
936936
(

pandas/tests/groupby/test_grouping.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -946,6 +946,9 @@ def test_multi_iter_frame(self, three_group):
946946
df["k1"] = np.array(["b", "b", "b", "a", "a", "a"])
947947
df["k2"] = np.array(["1", "1", "1", "2", "2", "2"])
948948
grouped = df.groupby(["k1", "k2"])
949+
# calling `dict` on a DataFrameGroupBy leads to a TypeError,
950+
# we need to use a dictionary comprehension here
951+
# pylint: disable-next=unnecessary-comprehension
949952
groups = {key: gp for key, gp in grouped}
950953
assert len(groups) == 2
951954

pandas/tests/indexes/categorical/test_map.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,7 @@ def test_map_with_dict_or_series(self):
109109
# Order of categories in result can be different
110110
tm.assert_index_equal(result, expected)
111111

112-
mapper = {o: n for o, n in zip(orig_values[:-1], new_values[:-1])}
112+
mapper = dict(zip(orig_values[:-1], new_values[:-1]))
113113
result = cur_index.map(mapper)
114114
# Order of categories in result can be different
115115
tm.assert_index_equal(result, expected)

pandas/tests/io/test_sql.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def create_and_load_iris(conn, iris_file: Path, dialect: str):
152152
with iris_file.open(newline=None) as csvfile:
153153
reader = csv.reader(csvfile)
154154
header = next(reader)
155-
params = [{key: value for key, value in zip(header, row)} for row in reader]
155+
params = [dict(zip(header, row)) for row in reader]
156156
stmt = insert(iris).values(params)
157157
if isinstance(conn, Engine):
158158
with conn.connect() as conn:

pandas/tests/series/methods/test_replace.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -661,7 +661,7 @@ def test_replace_different_int_types(self, any_int_numpy_dtype):
661661
labs = pd.Series([1, 1, 1, 0, 0, 2, 2, 2], dtype=any_int_numpy_dtype)
662662

663663
maps = pd.Series([0, 2, 1], dtype=any_int_numpy_dtype)
664-
map_dict = {old: new for (old, new) in zip(maps.values, maps.index)}
664+
map_dict = dict(zip(maps.values, maps.index))
665665

666666
result = labs.replace(map_dict)
667667
expected = labs.replace({0: 0, 2: 1, 1: 2})

pyproject.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,6 @@ disable = [
120120
"too-many-public-methods",
121121
"too-many-return-statements",
122122
"too-many-statements",
123-
"unnecessary-comprehension",
124123
"unnecessary-list-index-lookup",
125124

126125
# pylint type "W": warning, for python specific problems

0 commit comments

Comments
 (0)