Skip to content

Commit

Permalink
style: format code in docstrings
Browse files Browse the repository at this point in the history
  • Loading branch information
cpcloud committed Dec 18, 2023
1 parent ccdecef commit f20e34e
Show file tree
Hide file tree
Showing 16 changed files with 17 additions and 72 deletions.
4 changes: 1 addition & 3 deletions ibis/backends/base/df/timecontext.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,9 +183,7 @@ def construct_time_context_aware_series(
1 2.2
2 3.3
Name: value, dtype: float64
>>> construct_time_context_aware_series(
... series, df
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> construct_time_context_aware_series(series, df) # quartodoc: +SKIP # doctest: +SKIP
time
0 2017-01-02 1.1
1 2017-01-03 2.2
Expand Down
1 change: 0 additions & 1 deletion ibis/backends/base/sql/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ def raw_sql(self, query: str):
>>> con = ibis.connect("duckdb://")
>>> with con.raw_sql("SELECT 1") as cursor:
... result = cursor.fetchall()
...
>>> result
[(1,)]
>>> cursor.closed
Expand Down
1 change: 0 additions & 1 deletion ibis/backends/base/sql/alchemy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,6 @@ def raw_sql(self, query: str | sa.sql.ClauseElement):
>>> con = ibis.connect("duckdb://")
>>> with con.raw_sql("SELECT 1") as cursor:
... result = cursor.fetchall()
...
>>> result
[(1,)]
>>> cursor.closed
Expand Down
9 changes: 2 additions & 7 deletions ibis/backends/bigquery/udf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def python(
>>> @udf.python(input_type=[dt.double], output_type=dt.double)
... def add_one(x):
... return x + 1
...
>>> print(add_one.sql)
CREATE TEMPORARY FUNCTION add_one_0(x FLOAT64)
RETURNS FLOAT64
Expand All @@ -84,9 +83,7 @@ def python(
}
return add_one(x);
""";
>>> @udf.python(
... input_type=[dt.double, dt.double], output_type=dt.Array(dt.double)
... )
>>> @udf.python(input_type=[dt.double, dt.double], output_type=dt.Array(dt.double))
... def my_range(start, stop):
... def gen(start, stop):
... curr = start
Expand Down Expand Up @@ -121,9 +118,7 @@ def python(
""";
>>> @udf.python(
... input_type=[dt.double, dt.double],
... output_type=dt.Struct.from_tuples(
... [("width", "double"), ("height", "double")]
... ),
... output_type=dt.Struct.from_tuples([("width", "double"), ("height", "double")]),
... )
... def my_rectangle(width, height):
... class Rectangle:
Expand Down
11 changes: 2 additions & 9 deletions ibis/backends/duckdb/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -725,10 +725,7 @@ def list_tables(
>>> con.list_tables(schema="my_schema")
[]
>>> with con.begin() as c:
... c.exec_driver_sql(
... "CREATE TABLE my_schema.baz (a INTEGER)"
... ) # doctest: +ELLIPSIS
...
... c.exec_driver_sql("CREATE TABLE my_schema.baz (a INTEGER)") # doctest: +ELLIPSIS
<...>
>>> con.list_tables(schema="my_schema")
['baz']
Expand Down Expand Up @@ -815,7 +812,6 @@ def read_sqlite(self, path: str | Path, table_name: str | None = None) -> ir.Tab
... con.execute(
... "INSERT INTO t VALUES (1, 'a'), (2, 'b'), (3, 'c')"
... ) # doctest: +ELLIPSIS
...
<...>
>>> con = ibis.connect("duckdb://")
>>> t = con.read_sqlite("/tmp/sqlite.db", table_name="t")
Expand Down Expand Up @@ -907,7 +903,6 @@ def attach_sqlite(
... con.execute(
... "INSERT INTO t VALUES (1, 'a'), (2, 'b'), (3, 'c')"
... ) # doctest: +ELLIPSIS
...
<...>
>>> con = ibis.connect("duckdb://")
>>> con.list_tables()
Expand Down Expand Up @@ -1126,9 +1121,7 @@ def to_parquet(
Partition on multiple columns.
>>> con.to_parquet(
... penguins, tempfile.mkdtemp(), partition_by=("year", "island")
... )
>>> con.to_parquet(penguins, tempfile.mkdtemp(), partition_by=("year", "island"))
"""
self._run_pre_execute_hooks(expr)
query = self._to_sql(expr, params=params)
Expand Down
12 changes: 3 additions & 9 deletions ibis/backends/impala/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -933,9 +933,7 @@ def insert(
>>> con.insert(table, table_expr) # quartodoc: +SKIP # doctest: +SKIP
Completely overwrite contents
>>> con.insert(
... table, table_expr, overwrite=True
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.insert(table, table_expr, overwrite=True) # quartodoc: +SKIP # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
Expand Down Expand Up @@ -964,9 +962,7 @@ def drop_table(
--------
>>> table = "my_table"
>>> db = "operations"
>>> con.drop_table(
... table, database=db, force=True
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.drop_table(table, database=db, force=True) # quartodoc: +SKIP # doctest: +SKIP
"""
statement = DropTable(name, database=database, must_exist=not force)
self._safe_exec_sql(statement)
Expand Down Expand Up @@ -1024,9 +1020,7 @@ def cache_table(self, table_name, *, database=None, pool="default"):
>>> table = "my_table"
>>> db = "operations"
>>> pool = "op_4GB_pool"
>>> con.cache_table(
... "my_table", database=db, pool=pool
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.cache_table("my_table", database=db, pool=pool) # quartodoc: +SKIP # doctest: +SKIP
"""
statement = ddl.CacheTable(table_name, database=database, pool=pool)
self._safe_exec_sql(statement)
Expand Down
12 changes: 3 additions & 9 deletions ibis/backends/pyspark/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -381,9 +381,7 @@ def create_table(
Examples
--------
>>> con.create_table(
... "new_table_name", table_expr
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.create_table("new_table_name", table_expr) # quartodoc: +SKIP # doctest: +SKIP
"""
import pandas as pd
import pyarrow as pa
Expand Down Expand Up @@ -507,9 +505,7 @@ def drop_table_or_view(
--------
>>> table = "my_table"
>>> db = "operations"
>>> con.drop_table_or_view(
... table, db, force=True
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.drop_table_or_view(table, db, force=True) # quartodoc: +SKIP # doctest: +SKIP
"""
statement = DropTable(name, database=database, must_exist=not force)
self.raw_sql(statement.compile())
Expand Down Expand Up @@ -557,9 +553,7 @@ def insert(
>>> con.insert(table, table_expr) # quartodoc: +SKIP # doctest: +SKIP
# Completely overwrite contents
>>> con.insert(
... table, table_expr, overwrite=True
... ) # quartodoc: +SKIP # doctest: +SKIP
>>> con.insert(table, table_expr, overwrite=True) # quartodoc: +SKIP # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
Expand Down
2 changes: 0 additions & 2 deletions ibis/common/annotations.py
Original file line number Diff line number Diff line change
Expand Up @@ -569,7 +569,6 @@ def annotated(_1=None, _2=None, _3=None, **kwargs):
>>> @annotated(x=instance_of(int), y=instance_of(str))
... def foo(x, y):
... return float(x) + float(y)
...
3. With mixing type annotations and patterns where the latter takes precedence
Expand All @@ -582,7 +581,6 @@ def annotated(_1=None, _2=None, _3=None, **kwargs):
>>> @annotated([instance_of(int), instance_of(str)], instance_of(float))
... def foo(x, y):
... return float(x) + float(y)
...
Parameters
----------
Expand Down
1 change: 0 additions & 1 deletion ibis/common/collections.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,6 @@ class MapSet(Mapping[K, V]):
...
... def __repr__(self):
... return f"MyMap({repr(self._data)})"
...
>>> m = MyMap(a=1, b=2)
>>> n = dict(a=1, b=2, c=3)
>>> m <= n
Expand Down
2 changes: 0 additions & 2 deletions ibis/common/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ def _flatten_collections(node: Any) -> Iterator[N]:
... number: int
... string: str
... children: tuple[Node, ...]
...
>>> a = MyNode(4, "a", ())
>>>
>>> b = MyNode(3, "b", ())
Expand Down Expand Up @@ -96,7 +95,6 @@ def _recursive_lookup(obj: Any, dct: dict) -> Any:
... number: int
... string: str
... children: tuple[Node, ...]
...
>>> a = MyNode(4, "a", ())
>>>
>>> b = MyNode(3, "b", ())
Expand Down
3 changes: 0 additions & 3 deletions ibis/common/patterns.py
Original file line number Diff line number Diff line change
Expand Up @@ -658,7 +658,6 @@ class GenericInstanceOf(Slotted, Pattern):
...
... def __eq__(self, other):
... return type(self) is type(other) and self.value == other.value
...
>>> p = GenericInstanceOf(MyNumber[int])
>>> assert p.match(MyNumber(1), {}) == MyNumber(1)
>>> assert p.match(MyNumber(1.0), {}) is NoMatch
Expand Down Expand Up @@ -802,7 +801,6 @@ class GenericCoercedTo(Slotted, Pattern):
... return cls(float(value))
... else:
... raise CoercionError(f"Cannot coerce to {T}")
...
>>> p = GenericCoercedTo(MyNumber[int])
>>> assert p.match(3.14, {}) == MyNumber(3)
>>> assert p.match("15", {}) == MyNumber(15)
Expand Down Expand Up @@ -1571,7 +1569,6 @@ def pattern(obj: AnyType) -> Pattern:
>>> @pattern
... def as_int(x, context):
... return int(x)
...
>>>
>>> assert as_int.match(1, {}) == 1
Expand Down
4 changes: 0 additions & 4 deletions ibis/common/typing.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,14 +102,12 @@ def get_type_params(obj: Any) -> dict[str, type]:
>>>
>>> class MyList(List[T]):
... ...
...
>>>
>>> get_type_params(MyList[int])
{'T': <class 'int'>}
>>>
>>> class MyDict(Dict[T, U]):
... ...
...
>>>
>>> get_type_params(MyDict[int, str])
{'T': <class 'int'>, 'U': <class 'str'>}
Expand Down Expand Up @@ -147,7 +145,6 @@ def get_bound_typevars(obj: Any) -> dict[TypeVar, tuple[str, type]]:
>>> class MyStruct(Generic[T, U]):
... a: T
... b: U
...
>>> get_bound_typevars(MyStruct[int, str])
{~T: ('a', <class 'int'>), ~U: ('b', <class 'str'>)}
>>>
Expand All @@ -157,7 +154,6 @@ def get_bound_typevars(obj: Any) -> dict[TypeVar, tuple[str, type]]:
... @property
... def myprop(self) -> U:
... ...
...
>>> get_bound_typevars(MyStruct[float, bytes])
{~T: ('a', <class 'float'>), ~U: ('myprop', <class 'bytes'>)}
"""
Expand Down
2 changes: 0 additions & 2 deletions ibis/expr/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -1278,7 +1278,6 @@ def read_csv(
... '''
>>> with open("/tmp/lines.csv", mode="w") as f:
... nbytes = f.write(lines) # nbytes is unused
...
>>> t = ibis.read_csv("/tmp/lines.csv")
>>> t
┏━━━━━━━┳━━━━━━━━┓
Expand Down Expand Up @@ -1334,7 +1333,6 @@ def read_json(
... '''
>>> with open("/tmp/lines.json", mode="w") as f:
... nbytes = f.write(lines) # nbytes is unused
...
>>> t = ibis.read_json("/tmp/lines.json")
>>> t
┏━━━━━━━┳━━━━━━━━┓
Expand Down
16 changes: 3 additions & 13 deletions ibis/legacy/udf/vectorized.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,16 +134,12 @@ def _coerce_to_dataframe(
Examples
--------
>>> import pandas as pd
>>> _coerce_to_dataframe(
... pd.DataFrame({"a": [1, 2, 3]}), dt.Struct(dict(b="int32"))
... ) # noqa: E501
>>> _coerce_to_dataframe(pd.DataFrame({"a": [1, 2, 3]}), dt.Struct(dict(b="int32"))) # noqa: E501
b
0 1
1 2
2 3
>>> _coerce_to_dataframe(
... pd.Series([[1, 2, 3]]), dt.Struct(dict.fromkeys("abc", "int32"))
... ) # noqa: E501
>>> _coerce_to_dataframe(pd.Series([[1, 2, 3]]), dt.Struct(dict.fromkeys("abc", "int32"))) # noqa: E501
a b c
0 1 2 3
>>> _coerce_to_dataframe(
Expand All @@ -157,9 +153,7 @@ def _coerce_to_dataframe(
... ) # noqa: E501
a b c
0 1 2 3
>>> _coerce_to_dataframe(
... [1, 2, 3], dt.Struct(dict.fromkeys("abc", "int32"))
... ) # noqa: E501
>>> _coerce_to_dataframe([1, 2, 3], dt.Struct(dict.fromkeys("abc", "int32"))) # noqa: E501
a b c
0 1 2 3
"""
Expand Down Expand Up @@ -287,7 +281,6 @@ def analytic(input_type, output_type):
>>> @analytic(input_type=[dt.double], output_type=dt.double)
... def zscore(series): # note the use of aggregate functions
... return (series - series.mean()) / series.std()
...
Define and use an UDF with multiple return columns:
Expand Down Expand Up @@ -329,14 +322,12 @@ def elementwise(input_type, output_type):
>>> @elementwise(input_type=[dt.string], output_type=dt.int64)
... def my_string_length(series):
... return series.str.len() * 2
...
Define an UDF with non-column parameters:
>>> @elementwise(input_type=[dt.string], output_type=dt.int64)
... def my_string_length(series, *, times):
... return series.str.len() * times
...
Define and use an UDF with multiple return columns:
Expand Down Expand Up @@ -375,7 +366,6 @@ def reduction(input_type, output_type):
>>> @reduction(input_type=[dt.string], output_type=dt.int64)
... def my_string_length_agg(series, **kwargs):
... return (series.str.len() * 2).sum()
...
Define and use an UDF with multiple return columns:
Expand Down
8 changes: 2 additions & 6 deletions ibis/selectors.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,9 +212,7 @@ def of_type(dtype: dt.DataType | str | type[dt.DataType]) -> Predicate:
>>> import ibis
>>> import ibis.expr.datatypes as dt
>>> import ibis.selectors as s
>>> t = ibis.table(
... dict(name="string", siblings="array<string>", parents="array<int64>")
... )
>>> t = ibis.table(dict(name="string", siblings="array<string>", parents="array<int64>"))
>>> expr = t.select(s.of_type(dt.Array(dt.string)))
>>> expr.columns
['siblings']
Expand Down Expand Up @@ -329,9 +327,7 @@ def contains(
>>> import ibis
>>> import ibis.selectors as s
>>> t = ibis.table(
... dict(
... a="int64", b="string", c="float", d="array<int16>", ab="struct<x: int>"
... )
... dict(a="int64", b="string", c="float", d="array<int16>", ab="struct<x: int>")
... )
>>> expr = t.select(s.contains(("a", "b")))
>>> expr.columns
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -500,6 +500,7 @@ required-imports = ["from __future__ import annotations"]
[tool.ruff.format]
exclude = [".direnv", "result-*", "_py310.py", "decompiled.py"]
docstring-code-format = true
docstring-code-line-length = 88

[tool.conda-lock]
channels = ["conda-forge"]
Expand Down

0 comments on commit f20e34e

Please sign in to comment.