Skip to content

Commit

Permalink
Type ignore SkipIf with Optional[str] message
Browse files Browse the repository at this point in the history
This are, technically speaking, incorrect. However, messages are set
using the same condition as SkipIf, so there is nor practical issue
here.
  • Loading branch information
zero323 committed Aug 31, 2020
1 parent ef138be commit e59b562
Show file tree
Hide file tree
Showing 11 changed files with 24 additions and 24 deletions.
4 changes: 2 additions & 2 deletions python/pyspark/sql/tests/test_arrow.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class ArrowTests(ReusedSQLTestCase):

@classmethod
Expand Down Expand Up @@ -465,7 +465,7 @@ def test_createDataFrame_empty_partition(self):

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class MaxResultArrowTests(unittest.TestCase):
# These tests are separate as 'spark.driver.maxResultSize' configuration
# is a static configuration to Spark context.
Expand Down
20 changes: 10 additions & 10 deletions python/pyspark/sql/tests/test_dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ def _to_pandas(self):
df = self.spark.createDataFrame(data, schema)
return df.toPandas()

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas(self):
import numpy as np
pdf = self._to_pandas()
Expand All @@ -530,7 +530,7 @@ def test_to_pandas(self):
self.assertEquals(types[4], np.object) # datetime.date
self.assertEquals(types[5], 'datetime64[ns]')

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_with_duplicated_column_names(self):
import numpy as np

Expand All @@ -543,7 +543,7 @@ def test_to_pandas_with_duplicated_column_names(self):
self.assertEquals(types.iloc[0], np.int32)
self.assertEquals(types.iloc[1], np.int32)

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_on_cross_join(self):
import numpy as np

Expand All @@ -569,7 +569,7 @@ def test_to_pandas_required_pandas_not_found(self):
with self.assertRaisesRegexp(ImportError, 'Pandas >= .* must be installed'):
self._to_pandas()

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_avoid_astype(self):
import numpy as np
schema = StructType().add("a", IntegerType()).add("b", StringType())\
Expand All @@ -581,7 +581,7 @@ def test_to_pandas_avoid_astype(self):
self.assertEquals(types[1], np.object)
self.assertEquals(types[2], np.float64)

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_from_empty_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on an empty dataframe has the correct dtypes
Expand All @@ -601,7 +601,7 @@ def test_to_pandas_from_empty_dataframe(self):
dtypes_when_empty_df = self.spark.sql(sql).filter("False").toPandas().dtypes
self.assertTrue(np.all(dtypes_when_empty_df == dtypes_when_nonempty_df))

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_from_null_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with only nulls has correct dtypes
Expand Down Expand Up @@ -629,7 +629,7 @@ def test_to_pandas_from_null_dataframe(self):
self.assertEqual(types[7], np.object)
self.assertTrue(np.can_cast(np.datetime64, types[8]))

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_to_pandas_from_mixed_dataframe(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": False}):
# SPARK-29188 test that toPandas() on a dataframe with some nulls has correct dtypes
Expand Down Expand Up @@ -657,7 +657,7 @@ def test_create_dataframe_from_array_of_long(self):
df = self.spark.createDataFrame(data)
self.assertEqual(df.first(), Row(longarray=[-9223372036854775808, 0, 9223372036854775807]))

@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_create_dataframe_from_pandas_with_timestamp(self):
import pandas as pd
from datetime import datetime
Expand Down Expand Up @@ -685,7 +685,7 @@ def test_create_dataframe_required_pandas_not_found(self):
self.spark.createDataFrame(pdf)

# Regression test for SPARK-23360
@unittest.skipIf(not have_pandas, pandas_requirement_message)
@unittest.skipIf(not have_pandas, pandas_requirement_message) # type: ignore[arg-type]
def test_create_dataframe_from_pandas_with_dst(self):
import pandas as pd
from pandas.util.testing import assert_frame_equal
Expand Down Expand Up @@ -889,7 +889,7 @@ def test_query_execution_listener_on_collect(self):

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
def test_query_execution_listener_on_collect_with_arrow(self):
with self.sql_conf({"spark.sql.execution.arrow.pyspark.enabled": True}):
self.assertFalse(
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_cogrouped_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class CogroupedMapInPandasTests(ReusedSQLTestCase):

@property
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_grouped_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedMapInPandasTests(ReusedSQLTestCase):

@property
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class MapInPandasTests(ReusedSQLTestCase):

@classmethod
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_udf.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTests(ReusedSQLTestCase):

def test_pandas_udf_basic(self):
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class GroupedAggPandasUDFTests(ReusedSQLTestCase):

@property
Expand Down
4 changes: 2 additions & 2 deletions python/pyspark/sql/tests/test_pandas_udf_scalar.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class ScalarPandasUDFTests(ReusedSQLTestCase):

@classmethod
Expand Down Expand Up @@ -1095,7 +1095,7 @@ def f3i(it):
self.assertEquals(expected, df1.collect())

# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
@unittest.skipIf(not test_compiled, test_not_compiled_message) # type: ignore[arg-type]
def test_datasource_with_udf(self):
# Same as SQLTests.test_datasource_with_udf, but with Pandas UDF
# This needs to a separate test because Arrow dependency is optional
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_udf_typehints.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class PandasUDFTypeHintsTests(ReusedSQLTestCase):
def test_type_annotation_scalar(self):
def func(col: pd.Series) -> pd.Series:
Expand Down
2 changes: 1 addition & 1 deletion python/pyspark/sql/tests/test_pandas_udf_window.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

@unittest.skipIf(
not have_pandas or not have_pyarrow,
pandas_requirement_message or pyarrow_requirement_message)
pandas_requirement_message or pyarrow_requirement_message) # type: ignore[arg-type]
class WindowPandasUDFTests(ReusedSQLTestCase):
@property
def data(self):
Expand Down
6 changes: 3 additions & 3 deletions python/pyspark/sql/tests/test_udf.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,7 +356,7 @@ def test_udf_registration_returns_udf(self):
df.select(add_four("id").alias("plus_four")).collect()
)

@unittest.skipIf(not test_compiled, test_not_compiled_message)
@unittest.skipIf(not test_compiled, test_not_compiled_message) # type: ignore[arg-type]
def test_register_java_function(self):
self.spark.udf.registerJavaFunction(
"javaStringLength", "test.org.apache.spark.sql.JavaStringLength", IntegerType())
Expand All @@ -373,7 +373,7 @@ def test_register_java_function(self):
[value] = self.spark.sql("SELECT javaStringLength3('test')").first()
self.assertEqual(value, 4)

@unittest.skipIf(not test_compiled, test_not_compiled_message)
@unittest.skipIf(not test_compiled, test_not_compiled_message) # type: ignore[arg-type]
def test_register_java_udaf(self):
self.spark.udf.registerJavaUDAF("javaUDAF", "test.org.apache.spark.sql.MyDoubleAvg")
df = self.spark.createDataFrame([(1, "a"), (2, "b"), (3, "a")], ["id", "name"])
Expand Down Expand Up @@ -560,7 +560,7 @@ def test_nonparam_udf_with_aggregate(self):
self.assertEqual(rows, [Row(_1=1, _2=2, a=u'const_str')])

# SPARK-24721
@unittest.skipIf(not test_compiled, test_not_compiled_message)
@unittest.skipIf(not test_compiled, test_not_compiled_message) # type: ignore[arg-type]
def test_datasource_with_udf(self):
from pyspark.sql.functions import lit, col

Expand Down

0 comments on commit e59b562

Please sign in to comment.