diff --git a/docs/sql-programming-guide.md b/docs/sql-programming-guide.md
index 332618edf0c5..03500867df70 100644
--- a/docs/sql-programming-guide.md
+++ b/docs/sql-programming-guide.md
@@ -1371,7 +1371,10 @@ the Data Sources API. The following options are supported:
These options must all be specified if any of them is specified. They describe how to
partition the table when reading in parallel from multiple workers.
- partitionColumn must be a numeric column from the table in question.
+ partitionColumn must be a numeric column from the table in question. Notice
+ that lowerBound and upperBound are just used to decide the
+ partition stride, not for filtering the rows in table. So all rows in the table will be
+ partitioned and returned.
|
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
index c25ef58e6f62..b237fe684cdc 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/SQLContext.scala
@@ -873,8 +873,8 @@ class SQLContext(@transient val sparkContext: SparkContext)
* passed to this function.
*
* @param columnName the name of a column of integral type that will be used for partitioning.
- * @param lowerBound the minimum value of `columnName` to retrieve
- * @param upperBound the maximum value of `columnName` to retrieve
+ * @param lowerBound the minimum value of `columnName` used to decide partition stride
+ * @param upperBound the maximum value of `columnName` used to decide partition stride
* @param numPartitions the number of partitions. the range `minValue`-`maxValue` will be split
* evenly into this many partitions
*
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRelation.scala b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRelation.scala
index 4fa84dc076f7..1c113b34a1ae 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRelation.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/jdbc/JDBCRelation.scala
@@ -50,9 +50,11 @@ private[sql] object JDBCRelation {
* Given a partitioning schematic (a column of integral type, a number of
* partitions, and upper and lower bounds on the column's value), generate
* WHERE clauses for each partition so that each row in the table appears
- * exactly once. The parameters minValue and maxValue are advisory in that
+ * exactly once. The parameters minValue and maxValue are advisory in that
* incorrect values may cause the partitioning to be poor, but no data
- * will fail to be represented.
+ * will fail to be represented. Note: the upper and lower bounds are just
+ * used to decide partition stride, not for filtering. So all the rows in
+ * table will be partitioned.
*/
def columnPartition(partitioning: JDBCPartitioningInfo): Array[Partition] = {
if (partitioning == null) return Array[Partition](JDBCPartition(null, 0))