From f674f75227267fcc26b7b7f1108e3cf330a605a8 Mon Sep 17 00:00:00 2001 From: Gaetan Semet Date: Thu, 1 Sep 2016 14:41:33 +0200 Subject: [PATCH] use map comprehension Signed-off-by: Gaetan Semet --- examples/src/main/python/ml/quantile_discretizer_example.py | 2 +- examples/src/main/python/ml/vector_slicer_example.py | 4 ++-- examples/src/main/python/sql/hive.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/src/main/python/ml/quantile_discretizer_example.py b/examples/src/main/python/ml/quantile_discretizer_example.py index 788a0baffebb..0fc1d1949a77 100644 --- a/examples/src/main/python/ml/quantile_discretizer_example.py +++ b/examples/src/main/python/ml/quantile_discretizer_example.py @@ -29,7 +29,7 @@ .getOrCreate() # $example on$ - data = [(0, 18.0,), (1, 19.0,), (2, 8.0,), (3, 5.0,), (4, 2.2,)] + data = [(0, 18.0), (1, 19.0), (2, 8.0), (3, 5.0), (4, 2.2)] df = spark.createDataFrame(data, ["id", "hour"]) # $example off$ diff --git a/examples/src/main/python/ml/vector_slicer_example.py b/examples/src/main/python/ml/vector_slicer_example.py index d2f46b190f9a..68c8cfe27e37 100644 --- a/examples/src/main/python/ml/vector_slicer_example.py +++ b/examples/src/main/python/ml/vector_slicer_example.py @@ -32,8 +32,8 @@ # $example on$ df = spark.createDataFrame([ - Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3}),), - Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]),)]) + Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})), + Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))]) slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1]) diff --git a/examples/src/main/python/sql/hive.py b/examples/src/main/python/sql/hive.py index 9b2a2c4e6a16..98b48908b5a1 100644 --- a/examples/src/main/python/sql/hive.py +++ b/examples/src/main/python/sql/hive.py @@ -79,7 +79,7 @@ # You can also use DataFrames to create temporary views within a SparkSession. Record = Row("key", "value") - recordsDF = spark.createDataFrame(map(lambda i: Record(i, "val_" + str(i)), range(1, 101))) + recordsDF = spark.createDataFrame([Record(i, "val_" + str(i)) for i in range(1, 101)]) recordsDF.createOrReplaceTempView("records") # Queries can then join DataFrame data with data stored in Hive.