Skip to content

Commit

Permalink
CLN: Update .format(...) strings to f-expressions (pandas-dev#29571)
Browse files Browse the repository at this point in the history
  • Loading branch information
srinivasreddy authored and proost committed Dec 19, 2019
1 parent a39f720 commit 60a7715
Show file tree
Hide file tree
Showing 14 changed files with 32 additions and 32 deletions.
10 changes: 5 additions & 5 deletions asv_bench/benchmarks/categoricals.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class ValueCounts:

def setup(self, dropna):
n = 5 * 10 ** 5
arr = ["s{:04d}".format(i) for i in np.random.randint(0, n // 10, size=n)]
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")

def time_value_counts(self, dropna):
Expand All @@ -102,7 +102,7 @@ def time_rendering(self):
class SetCategories:
def setup(self):
n = 5 * 10 ** 5
arr = ["s{:04d}".format(i) for i in np.random.randint(0, n // 10, size=n)]
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")

def time_set_categories(self):
Expand All @@ -112,7 +112,7 @@ def time_set_categories(self):
class RemoveCategories:
def setup(self):
n = 5 * 10 ** 5
arr = ["s{:04d}".format(i) for i in np.random.randint(0, n // 10, size=n)]
arr = [f"s{i:04d}" for i in np.random.randint(0, n // 10, size=n)]
self.ts = pd.Series(arr).astype("category")

def time_remove_categories(self):
Expand Down Expand Up @@ -166,7 +166,7 @@ def setup(self, dtype):
sample_size = 100
arr = [i for i in np.random.randint(0, n // 10, size=n)]
if dtype == "object":
arr = ["s{:04d}".format(i) for i in arr]
arr = [f"s{i:04d}" for i in arr]
self.sample = np.random.choice(arr, sample_size)
self.series = pd.Series(arr).astype("category")

Expand Down Expand Up @@ -225,7 +225,7 @@ def setup(self, index):
elif index == "non_monotonic":
self.data = pd.Categorical.from_codes([0, 1, 2] * N, categories=categories)
else:
raise ValueError("Invalid index param: {}".format(index))
raise ValueError(f"Invalid index param: {index}")

self.scalar = 10000
self.list = list(range(10000))
Expand Down
6 changes: 2 additions & 4 deletions asv_bench/benchmarks/gil.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,13 +250,11 @@ def setup(self, dtype):
np.random.randn(rows, cols), index=date_range("1/1/2000", periods=rows)
),
"object": DataFrame(
"foo",
index=range(rows),
columns=["object%03d".format(i) for i in range(5)],
"foo", index=range(rows), columns=["object%03d" for _ in range(5)]
),
}

self.fname = "__test_{}__.csv".format(dtype)
self.fname = f"__test_{dtype}__.csv"
df = data[dtype]
df.to_csv(self.fname)

Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/index_object.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class Indexing:

def setup(self, dtype):
N = 10 ** 6
self.idx = getattr(tm, "make{}Index".format(dtype))(N)
self.idx = getattr(tm, f"make{dtype}Index")(N)
self.array_mask = (np.arange(N) % 3) == 0
self.series_mask = Series(self.array_mask)
self.sorted = self.idx.sort_values()
Expand Down
10 changes: 4 additions & 6 deletions asv_bench/benchmarks/io/csv.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,7 +202,7 @@ def setup(self, sep, thousands):
data = np.random.randn(N, K) * np.random.randint(100, 10000, (N, K))
df = DataFrame(data)
if thousands is not None:
fmt = ":{}".format(thousands)
fmt = f":{thousands}"
fmt = "{" + fmt + "}"
df = df.applymap(lambda x: fmt.format(x))
df.to_csv(self.fname, sep=sep)
Expand Down Expand Up @@ -231,7 +231,7 @@ def setup(self, sep, decimal, float_precision):
floats = [
"".join(random.choice(string.digits) for _ in range(28)) for _ in range(15)
]
rows = sep.join(["0{}".format(decimal) + "{}"] * 3) + "\n"
rows = sep.join([f"0{decimal}" + "{}"] * 3) + "\n"
data = rows * 5
data = data.format(*floats) * 200 # 1000 x 3 strings csv
self.StringIO_input = StringIO(data)
Expand Down Expand Up @@ -309,9 +309,7 @@ class ReadCSVCachedParseDates(StringIORewind):
param_names = ["do_cache"]

def setup(self, do_cache):
data = (
"\n".join("10/{}".format(year) for year in range(2000, 2100)) + "\n"
) * 10
data = ("\n".join(f"10/{year}" for year in range(2000, 2100)) + "\n") * 10
self.StringIO_input = StringIO(data)

def time_read_csv_cached(self, do_cache):
Expand All @@ -336,7 +334,7 @@ class ReadCSVMemoryGrowth(BaseIO):
def setup(self):
with open(self.fname, "w") as f:
for i in range(self.num_rows):
f.write("{i}\n".format(i=i))
f.write(f"{i}\n")

def mem_parser_chunks(self):
# see gh-24805.
Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/io/excel.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def _generate_dataframe():
C = 5
df = DataFrame(
np.random.randn(N, C),
columns=["float{}".format(i) for i in range(C)],
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
df["object"] = tm.makeStringIndex(N)
Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/io/hdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def setup(self, format):
C = 5
self.df = DataFrame(
np.random.randn(N, C),
columns=["float{}".format(i) for i in range(C)],
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
self.df["object"] = tm.makeStringIndex(N)
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/io/json.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def setup(self, orient, index):
}
df = DataFrame(
np.random.randn(N, 5),
columns=["float_{}".format(i) for i in range(5)],
columns=[f"float_{i}" for i in range(5)],
index=indexes[index],
)
df.to_json(self.fname, orient=orient)
Expand All @@ -43,7 +43,7 @@ def setup(self, index):
}
df = DataFrame(
np.random.randn(N, 5),
columns=["float_{}".format(i) for i in range(5)],
columns=[f"float_{i}" for i in range(5)],
index=indexes[index],
)
df.to_json(self.fname, orient="records", lines=True)
Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/io/msgpack.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ def setup(self):
C = 5
self.df = DataFrame(
np.random.randn(N, C),
columns=["float{}".format(i) for i in range(C)],
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
self.df["object"] = tm.makeStringIndex(N)
Expand Down
2 changes: 1 addition & 1 deletion asv_bench/benchmarks/io/pickle.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def setup(self):
C = 5
self.df = DataFrame(
np.random.randn(N, C),
columns=["float{}".format(i) for i in range(C)],
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
self.df["object"] = tm.makeStringIndex(N)
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/io/sql.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ def setup(self, connection):
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_all = "SELECT * FROM {}".format(self.table_name)
self.query_all = f"SELECT * FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
Expand Down Expand Up @@ -58,7 +58,7 @@ def setup(self, connection, dtype):
"sqlite": sqlite3.connect(":memory:"),
}
self.table_name = "test_type"
self.query_col = "SELECT {} FROM {}".format(dtype, self.table_name)
self.query_col = f"SELECT {dtype} FROM {self.table_name}"
self.con = con[connection]
self.df = DataFrame(
{
Expand Down
4 changes: 2 additions & 2 deletions asv_bench/benchmarks/io/stata.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ def setup(self, convert_dates):
C = self.C = 5
self.df = DataFrame(
np.random.randn(N, C),
columns=["float{}".format(i) for i in range(C)],
columns=[f"float{i}" for i in range(C)],
index=date_range("20000101", periods=N, freq="H"),
)
self.df["object"] = tm.makeStringIndex(self.N)
Expand Down Expand Up @@ -47,7 +47,7 @@ def setup(self, convert_dates):
for i in range(10):
missing_data = np.random.randn(self.N)
missing_data[missing_data < 0] = np.nan
self.df["missing_{0}".format(i)] = missing_data
self.df[f"missing_{i}"] = missing_data
self.df.to_stata(self.fname, self.convert_dates)


Expand Down
6 changes: 3 additions & 3 deletions asv_bench/benchmarks/timedelta.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@ def setup(self):
self.str_days = []
self.str_seconds = []
for i in self.ints:
self.str_days.append("{0} days".format(i))
self.str_seconds.append("00:00:{0:02d}".format(i))
self.str_days.append(f"{i} days")
self.str_seconds.append(f"00:00:{i:02d}")

def time_convert_int(self):
to_timedelta(self.ints, unit="s")
Expand All @@ -34,7 +34,7 @@ class ToTimedeltaErrors:

def setup(self, errors):
ints = np.random.randint(0, 60, size=10000)
self.arr = ["{0} days".format(i) for i in ints]
self.arr = [f"{i} days" for i in ints]
self.arr[-1] = "apple"

def time_convert(self, errors):
Expand Down
6 changes: 5 additions & 1 deletion pandas/core/groupby/grouper.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,11 @@ def __init__(
if self.name is None:
self.name = index.names[level]

self.grouper, self._codes, self._group_index = index._get_grouper_for_level( # noqa: E501
(
self.grouper,
self._codes,
self._group_index,
) = index._get_grouper_for_level( # noqa: E501
self.grouper, level
)

Expand Down
4 changes: 2 additions & 2 deletions pandas/io/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def _is_url(url) -> bool:


def _expand_user(
filepath_or_buffer: FilePathOrBuffer[AnyStr]
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Expand Down Expand Up @@ -139,7 +139,7 @@ def _validate_header_arg(header) -> None:


def _stringify_path(
filepath_or_buffer: FilePathOrBuffer[AnyStr]
filepath_or_buffer: FilePathOrBuffer[AnyStr],
) -> FilePathOrBuffer[AnyStr]:
"""Attempt to convert a path-like object to a string.
Expand Down

0 comments on commit 60a7715

Please sign in to comment.