Skip to content

Commit 5c8b524

Browse files
Davies Liurxin
authored andcommitted
add comment for toDF
1 parent a4e5e66 commit 5c8b524

File tree

1 file changed

+6
-1
lines changed

1 file changed

+6
-1
lines changed

python/pyspark/sql/context.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -68,15 +68,20 @@ class SQLContext(object):
6868
def __init__(self, sparkContext, sqlContext=None):
6969
"""Create a new SQLContext.
7070
71+
It will add a method called `toDF` to :class:`RDD`, which could be
72+
used to convert an RDD into a DataFrame, it's a shorthand for
73+
:func:`SQLContext.createDataFrame`.
74+
7175
:param sparkContext: The SparkContext to wrap.
7276
:param sqlContext: An optional JVM Scala SQLContext. If set, we do not instatiate a new
7377
SQLContext in the JVM, instead we make all calls to this object.
7478
7579
>>> from datetime import datetime
80+
>>> sqlCtx = SQLContext(sc)
7681
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1L,
7782
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
7883
... time=datetime(2014, 8, 1, 14, 1, 5))])
79-
>>> df = sqlCtx.createDataFrame(allTypes)
84+
>>> df = allTypes.toDF()
8085
>>> df.registerTempTable("allTypes")
8186
>>> sqlCtx.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
8287
... 'from allTypes where b and i > 0').collect()

0 commit comments

Comments
 (0)