Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 13 additions & 14 deletions R/pkg/R/DataFrame.R
Original file line number Diff line number Diff line change
Expand Up @@ -1572,18 +1572,17 @@ setMethod("except",
#' spark.sql.sources.default will be used.
#'
#' Additionally, mode is used to specify the behavior of the save operation when
#' data already exists in the data source. There are four modes:
#' append: Contents of this DataFrame are expected to be appended to existing data.
#' overwrite: Existing data is expected to be overwritten by the contents of
# this DataFrame.
#' error: An exception is expected to be thrown.
#' data already exists in the data source. There are four modes: \cr
#' append: Contents of this DataFrame are expected to be appended to existing data. \cr
#' overwrite: Existing data is expected to be overwritten by the contents of this DataFrame. \cr
#' error: An exception is expected to be thrown. \cr
#' ignore: The save operation is expected to not save the contents of the DataFrame
# and to not change the existing data.
#' and to not change the existing data. \cr
#'
#' @param df A SparkSQL DataFrame
#' @param path A name for the table
#' @param source A name for external data source
#' @param mode One of 'append', 'overwrite', 'error', 'ignore'
#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode
#'
#' @rdname write.df
#' @name write.df
Expand All @@ -1596,6 +1595,7 @@ setMethod("except",
#' path <- "path/to/file.json"
#' df <- jsonFile(sqlContext, path)
#' write.df(df, "myfile", "parquet", "overwrite")
#' saveDF(df, parquetPath2, "parquet", mode = saveMode, mergeSchema = mergeSchema)
#' }
setMethod("write.df",
signature(df = "DataFrame", path = "character"),
Expand Down Expand Up @@ -1637,18 +1637,17 @@ setMethod("saveDF",
#' spark.sql.sources.default will be used.
#'
#' Additionally, mode is used to specify the behavior of the save operation when
#' data already exists in the data source. There are four modes:
#' append: Contents of this DataFrame are expected to be appended to existing data.
#' overwrite: Existing data is expected to be overwritten by the contents of
# this DataFrame.
#' error: An exception is expected to be thrown.
#' data already exists in the data source. There are four modes: \cr
#' append: Contents of this DataFrame are expected to be appended to existing data. \cr
#' overwrite: Existing data is expected to be overwritten by the contents of this DataFrame. \cr
#' error: An exception is expected to be thrown. \cr
#' ignore: The save operation is expected to not save the contents of the DataFrame
# and to not change the existing data.
#' and to not change the existing data. \cr
#'
#' @param df A SparkSQL DataFrame
#' @param tableName A name for the table
#' @param source A name for external data source
#' @param mode One of 'append', 'overwrite', 'error', 'ignore'
#' @param mode One of 'append', 'overwrite', 'error', 'ignore' save mode
#'
#' @rdname saveAsTable
#' @name saveAsTable
Expand Down
16 changes: 11 additions & 5 deletions R/pkg/R/SQLContext.R
Original file line number Diff line number Diff line change
Expand Up @@ -452,14 +452,21 @@ dropTempTable <- function(sqlContext, tableName) {
#'
#' @param sqlContext SQLContext to use
#' @param path The path of files to load
#' @param source the name of external data source
#' @param source The name of external data source
#' @param schema The data schema defined in structType
#' @return DataFrame
#' @rdname read.df
#' @name read.df
#' @export
#' @examples
#'\dontrun{
#' sc <- sparkR.init()
#' sqlContext <- sparkRSQL.init(sc)
#' df <- read.df(sqlContext, "path/to/file.json", source = "json")
#' df1 <- read.df(sqlContext, "path/to/file.json", source = "json")
#' schema <- structType(structField("name", "string"),
#' structField("info", "map<string,double>"))
#' df2 <- read.df(sqlContext, mapTypeJsonPath, "json", schema)
#' df3 <- loadDF(sqlContext, "data/test_table", "parquet", mergeSchema = "true")
#' }

read.df <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
Expand All @@ -482,9 +489,8 @@ read.df <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...)
dataFrame(sdf)
}

#' @aliases loadDF
#' @export

#' @rdname read.df
#' @name loadDF
loadDF <- function(sqlContext, path = NULL, source = NULL, schema = NULL, ...) {
read.df(sqlContext, path, source, schema, ...)
}
Expand Down