diff --git a/python/pyspark/sql/readwriter.py b/python/pyspark/sql/readwriter.py
index 78d992e41548..f7c354f51330 100644
--- a/python/pyspark/sql/readwriter.py
+++ b/python/pyspark/sql/readwriter.py
@@ -633,7 +633,7 @@ def text(self, path, compression=None):
@since(2.0)
def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=None,
- header=None, nullValue=None, escapeQuotes=None):
+ header=None, nullValue=None, escapeQuotes=None, quoteAll=None):
"""Saves the content of the :class:`DataFrame` in CSV format at the specified path.
:param path: the path in any Hadoop supported file system
@@ -658,6 +658,9 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No
:param escapeQuotes: A flag indicating whether values containing quotes should always
be enclosed in quotes. If None is set, it uses the default value
``true``, escaping all values containing a quote character.
+ :param quoteAll: A flag indicating whether all values should always be enclosed in
+ quotes. If None is set, it uses the default value ``false``,
+ only escaping values containing a quote character.
:param header: writes the names of columns as the first line. If None is set, it uses
the default value, ``false``.
:param nullValue: sets the string representation of a null value. If None is set, it uses
@@ -667,7 +670,7 @@ def csv(self, path, mode=None, compression=None, sep=None, quote=None, escape=No
"""
self.mode(mode)
self._set_opts(compression=compression, sep=sep, quote=quote, escape=escape, header=header,
- nullValue=nullValue, escapeQuotes=escapeQuotes)
+ nullValue=nullValue, escapeQuotes=escapeQuotes, quoteAll=quoteAll)
self._jwrite.csv(path)
@since(1.5)
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
index f77af76d2bf3..12b304623d30 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/DataFrameWriter.scala
@@ -537,6 +537,8 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T]) {
*
`escapeQuotes` (default `true`): a flag indicating whether values containing
* quotes should always be enclosed in quotes. Default is to escape all values containing
* a quote character.
+ * `quoteAll` (default `false`): A flag indicating whether all values should always be
+ * enclosed in quotes. Default is to only escape values containing a quote character.
* `header` (default `false`): writes the names of columns as the first line.
* `nullValue` (default empty string): sets the string representation of a null value.
* `compression` (default `null`): compression codec to use when saving to file. This can be
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala
index 581eda7e09a3..22fb8163b1c0 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVOptions.scala
@@ -115,6 +115,8 @@ private[sql] class CSVOptions(@transient private val parameters: Map[String, Str
val maxMalformedLogPerPartition = getInt("maxMalformedLogPerPartition", 10)
+ val quoteAll = getBool("quoteAll", false)
+
val inputBufferSize = 128
val isCommentSet = this.comment != '\u0000'
diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala
index bf62732dd404..13ae76d49893 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVParser.scala
@@ -78,7 +78,7 @@ private[sql] class LineCsvWriter(params: CSVOptions, headers: Seq[String]) exten
writerSettings.setNullValue(params.nullValue)
writerSettings.setEmptyValue(params.nullValue)
writerSettings.setSkipEmptyLines(true)
- writerSettings.setQuoteAllFields(false)
+ writerSettings.setQuoteAllFields(params.quoteAll)
writerSettings.setHeaders(headers: _*)
writerSettings.setQuoteEscapingEnabled(params.escapeQuotes)
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
index f170065132ac..311f1fa8d2af 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/csv/CSVSuite.scala
@@ -366,6 +366,32 @@ class CSVSuite extends QueryTest with SharedSQLContext with SQLTestUtils {
}
}
+ test("save csv with quoteAll enabled") {
+ withTempDir { dir =>
+ val csvDir = new File(dir, "csv").getCanonicalPath
+
+ val data = Seq(("test \"quote\"", 123, "it \"works\"!", "\"very\" well"))
+ val df = spark.createDataFrame(data)
+
+ // escapeQuotes should be true by default
+ df.coalesce(1).write
+ .format("csv")
+ .option("quote", "\"")
+ .option("escape", "\"")
+ .option("quoteAll", "true")
+ .save(csvDir)
+
+ val results = spark.read
+ .format("text")
+ .load(csvDir)
+ .collect()
+
+ val expected = "\"test \"\"quote\"\"\",\"123\",\"it \"\"works\"\"!\",\"\"\"very\"\" well\""
+
+ assert(results.toSeq.map(_.toSeq) === Seq(Seq(expected)))
+ }
+ }
+
test("save csv with quote escaping enabled") {
withTempDir { dir =>
val csvDir = new File(dir, "csv").getCanonicalPath