From 087b0f27ed3b66f539796a6914cff20100479292 Mon Sep 17 00:00:00 2001 From: "Steven M. Mortimer" Date: Wed, 5 Jun 2019 18:53:42 -0400 Subject: [PATCH] Allow Bulk operation arguments to be passed through from top-level generics such as sf_create, sf_update, etc. --- R/bulk-operation.R | 91 ++++++++++++++--------- R/create.R | 22 +++--- R/delete.R | 12 +-- R/update.R | 20 ++--- R/upsert.R | 46 ++++++------ docs/reference/sf_batch_details_bulk.html | 5 +- docs/reference/sf_bulk_operation.html | 9 ++- docs/reference/sf_create.html | 2 +- docs/reference/sf_create_job_bulk.html | 20 +++-- docs/reference/sf_delete.html | 2 +- docs/reference/sf_update.html | 2 +- docs/reference/sf_upsert.html | 2 +- man/sf_batch_details_bulk.Rd | 5 +- man/sf_bulk_operation.Rd | 7 +- man/sf_create.Rd | 2 +- man/sf_create_job_bulk.Rd | 20 +++-- man/sf_delete.Rd | 2 +- man/sf_update.Rd | 2 +- man/sf_upsert.Rd | 2 +- 19 files changed, 156 insertions(+), 117 deletions(-) diff --git a/R/bulk-operation.R b/R/bulk-operation.R index 624bbc72..7662fc43 100644 --- a/R/bulk-operation.R +++ b/R/bulk-operation.R @@ -7,15 +7,21 @@ #' @template external_id_fieldname #' @template api_type #' @param content_type character; being one of 'CSV', 'ZIP_CSV', 'ZIP_XML', or 'ZIP_JSON' to -#' indicate the type of data being passed to the Bulk API -#' @param concurrency_mode character; either "Parallel" or "Serial" that specifies whether batches should be completed -#' sequentially or in parallel. Use "Serial" only if Lock contentions persist with in "Parallel" mode. -#' @param line_ending character; indicating the The line ending used for CSV job data, -#' marking the end of a data row. The default is NULL and determined by the operating system using -#' "CRLF" for Windows machines and "LF" for Unix machines +#' indicate the type of data being passed to the Bulk API. +#' @param concurrency_mode character; either "Parallel" or "Serial" that specifies +#' whether batches should be completed sequentially or in parallel. Use "Serial" +#' only if lock contentions persist with in "Parallel" mode. Note: this argument is +#' only used in the Bulk 1.0 API and will be ignored in calls using the Bulk 2.0 API. +#' @param line_ending character; indicating the line ending used for CSV job data, +#' marking the end of a data row. The default is NULL meaing that the line ending +#' is determined by the operating system using "CRLF" for Windows machines and +#' "LF" for Unix machines. Note: this argument is only used in the Bulk 2.0 API +#' and will be ignored in calls using the Bulk 1.0 API. #' @param column_delimiter character; indicating the column delimiter used for CSV job data. #' The default value is COMMA. Valid values are: "BACKQUOTE", "CARET", "COMMA", "PIPE", -#' "SEMICOLON", and "TAB". +#' "SEMICOLON", and "TAB", but this package only accepts and uses "COMMA". Also, +#' note that this argument is only used in the Bulk 2.0 API and will be ignored +#' in calls using the Bulk 1.0 API. #' @template verbose #' @return A \code{tbl_df} parameters defining the created job, including id #' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/} @@ -46,8 +52,8 @@ sf_create_job_bulk <- function(operation = c("insert", "delete", "upsert", "upda object_name, external_id_fieldname = NULL, api_type = c("Bulk 1.0", "Bulk 2.0"), - content_type=c('CSV', 'ZIP_CSV', 'ZIP_XML', 'ZIP_JSON'), - concurrency_mode=c("Parallel", "Serial"), + content_type = c('CSV', 'ZIP_CSV', 'ZIP_XML', 'ZIP_JSON'), + concurrency_mode = c("Parallel", "Serial"), line_ending = NULL, column_delimiter = c('COMMA', 'TAB', 'PIPE', 'SEMICOLON', 'CARET', 'BACKQUOTE'), @@ -57,12 +63,18 @@ sf_create_job_bulk <- function(operation = c("insert", "delete", "upsert", "upda operation <- match.arg(operation) content_type <- match.arg(content_type) if(api_type == "Bulk 1.0"){ - job_response <- sf_create_job_bulk_v1(operation=operation, - object_name=object_name, - external_id_fieldname=external_id_fieldname, - content_type=content_type, - concurrency_mode=concurrency_mode, - verbose=verbose) + if(!missing(line_ending)){ + warning("Ignoring the line_ending argument which isn't used when calling the Bulk 1.0 API", call. = FALSE) + } + if(!missing(column_delimiter)){ + warning("Ignoring the column_delimiter argument which isn't used when calling the Bulk 1.0 API", call. = FALSE) + } + job_response <- sf_create_job_bulk_v1(operation = operation, + object_name = object_name, + external_id_fieldname = external_id_fieldname, + content_type = content_type, + concurrency_mode = concurrency_mode, + verbose = verbose) } else if(api_type == "Bulk 2.0"){ if(!(operation %in% c("insert", "delete", "upsert", "update"))){ stop('Bulk 2.0 only supports the following operations: "insert", "delete", "upsert", and "update"') @@ -70,13 +82,16 @@ sf_create_job_bulk <- function(operation = c("insert", "delete", "upsert", "upda if(!(content_type %in% c("CSV"))){ stop('Bulk 2.0 only supports the "CSV" content type.') } - job_response <- sf_create_job_bulk_v2(operation=operation, - object_name=object_name, - external_id_fieldname=external_id_fieldname, - content_type=content_type, - line_ending=line_ending, - column_delimiter=column_delimiter, - verbose=verbose) + if(!missing(concurrency_mode)){ + warning("Ignoring the concurrency_mode argument which isn't used when calling the Bulk 2.0 API", call. = FALSE) + } + job_response <- sf_create_job_bulk_v2(operation = operation, + object_name = object_name, + external_id_fieldname = external_id_fieldname, + content_type = content_type, + line_ending = line_ending, + column_delimiter = column_delimiter, + verbose = verbose) } else { stop("Unknown API type") } @@ -93,10 +108,10 @@ sf_create_job_bulk <- function(operation = c("insert", "delete", "upsert", "upda sf_create_job_bulk_v1 <- function(operation = c("insert", "delete", "upsert", "update", "hardDelete", "query"), object_name, - external_id_fieldname=NULL, - content_type=c('CSV', 'ZIP_CSV', 'ZIP_XML', 'ZIP_JSON'), - concurrency_mode=c("Parallel", "Serial"), - verbose=FALSE){ + external_id_fieldname = NULL, + content_type = c('CSV', 'ZIP_CSV', 'ZIP_XML', 'ZIP_JSON'), + concurrency_mode = c("Parallel", "Serial"), + verbose = FALSE){ operation <- match.arg(operation) content_type <- match.arg(content_type) @@ -150,7 +165,7 @@ sf_create_job_bulk_v1 <- function(operation = c("insert", "delete", "upsert", "u #' @keywords internal sf_create_job_bulk_v2 <- function(operation = c("insert", "delete", "upsert", "update"), object_name, - external_id_fieldname=NULL, + external_id_fieldname = NULL, content_type = 'CSV', line_ending = NULL, column_delimiter = c('COMMA', 'TAB', 'PIPE', 'SEMICOLON', @@ -158,8 +173,6 @@ sf_create_job_bulk_v2 <- function(operation = c("insert", "delete", "upsert", "u verbose=FALSE){ operation <- match.arg(operation) - content_type <- match.arg(content_type) - line_ending <- match.arg(line_ending) column_delimiter <- match.arg(column_delimiter) if(column_delimiter != "COMMA"){ stop("column_delimiter = 'COMMA' is currently the only supported file delimiter") @@ -696,8 +709,9 @@ sf_batch_status_bulk <- function(job_id, batch_id, api_type=c("Bulk 1.0"), #' @template batch_id #' @template api_type #' @template verbose -#' @return A \code{tbl_df}, formatted by salesforce, with information containing the success or failure or certain rows in a submitted batch, -#' unless the operation was query, then it is a data.frame containing the result_id for retrieving the recordset. +#' @return A \code{tbl_df}, formatted by Salesforce, with information containing +#' the success or failure or certain rows in a submitted batch, unless the operation +#' was query, then it is a data.frame containing the result_id for retrieving the recordset. #' @references \url{https://developer.salesforce.com/docs/atlas.en-us.api_asynch.meta/api_asynch/} #' @note This is a legacy function used only with Bulk 1.0. #' @examples @@ -847,6 +861,8 @@ sf_get_job_records_bulk_v2 <- function(job_id, #' @param operation character; string defining the type of operation being performed #' @template external_id_fieldname #' @template api_type +#' @param ... other arguments passed on to \code{\link{sf_create_job_bulk}} such as +#' \code{content_type}, \code{concurrency_mode}, \code{line_ending} or \code{column_delimiter}. #' @param wait_for_results logical; indicating whether to wait for the operation to complete #' so that the batch results of individual records can be obtained #' @param interval_seconds integer; defines the seconds between attempts to check @@ -875,6 +891,7 @@ sf_bulk_operation <- function(input_data, "update", "hardDelete"), external_id_fieldname = NULL, api_type = c("Bulk 1.0", "Bulk 2.0"), + ..., wait_for_results = TRUE, interval_seconds = 3, max_attempts = 200, @@ -883,11 +900,11 @@ sf_bulk_operation <- function(input_data, stopifnot(!missing(operation)) api_type <- match.arg(api_type) - job_info <- sf_create_job_bulk(operation, object_name=object_name, - external_id_fieldname=external_id_fieldname, - api_type=api_type, verbose=verbose) + job_info <- sf_create_job_bulk(operation, object_name = object_name, + external_id_fieldname = external_id_fieldname, + api_type = api_type, verbose = verbose, ...) batches_info <- sf_create_batches_bulk(job_id = job_info$id, input_data, - api_type=api_type, verbose=verbose) + api_type = api_type, verbose = verbose) if(wait_for_results){ status_complete <- FALSE @@ -929,12 +946,12 @@ sf_bulk_operation <- function(input_data, } if (!status_complete) { message("Function's Time Limit Exceeded. Aborting Job Now") - res <- sf_abort_job_bulk(job_info$id, api_type=api_type, verbose=verbose) + res <- sf_abort_job_bulk(job_info$id, api_type = api_type, verbose = verbose) } else { res <- sf_get_job_records_bulk(job_info$id, api_type=api_type, verbose=verbose) # For Bulk 2.0 jobs -> INVALIDJOBSTATE: Closing already Completed Job not allowed if(api_type == "Bulk 1.0"){ - close_job_info <- sf_close_job_bulk(job_info$id, api_type=api_type, verbose=verbose) + close_job_info <- sf_close_job_bulk(job_info$id, api_type = api_type, verbose = verbose) } } } else { diff --git a/R/create.R b/R/create.R index b30108b5..ab715cb9 100644 --- a/R/create.R +++ b/R/create.R @@ -7,7 +7,7 @@ #' @template object_name #' @template all_or_none #' @template api_type -#' @param ... Other arguments passed on to \code{\link{sf_bulk_operation}}. +#' @param ... other arguments passed on to \code{\link{sf_bulk_operation}}. #' @template verbose #' @return \code{tbl_df} of records with success indicator #' @examples @@ -180,12 +180,12 @@ sf_create_bulk_v1 <- function(input_data, object_name, all_or_none = FALSE, ..., verbose = FALSE){ # allor none? - input_data <- sf_input_data_validation(operation="create", input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - operation="insert", + input_data <- sf_input_data_validation(operation = "create", input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + operation = "insert", api_type = "Bulk 1.0", - verbose=verbose, ...) + verbose = verbose, ...) return(resultset) } @@ -199,12 +199,12 @@ sf_create_bulk_v2 <- function(input_data, object_name, all_or_none = FALSE, verbose = FALSE){ # allor none? #The order of records in the response is not guaranteed to match the ordering of records in the original job data. - input_data <- sf_input_data_validation(operation="create", input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - operation="insert", + input_data <- sf_input_data_validation(operation = "create", input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + operation = "insert", api_type = "Bulk 2.0", - verbose=verbose, ...) + verbose = verbose, ...) return(resultset) } diff --git a/R/delete.R b/R/delete.R index 7b51e296..56dd2e08 100644 --- a/R/delete.R +++ b/R/delete.R @@ -8,7 +8,7 @@ #' @template object_name #' @template all_or_none #' @template api_type -#' @param ... Other arguments passed on to \code{\link{sf_bulk_operation}}. +#' @param ... other arguments passed on to \code{\link{sf_bulk_operation}}. #' @template verbose #' @return \code{tbl_df} of records with success indicator #' @examples @@ -150,9 +150,9 @@ sf_delete_bulk_v1 <- function(ids, object_name, ..., verbose = FALSE){ # allor none? - ids <- sf_input_data_validation(ids, operation='delete') - resultset <- sf_bulk_operation(input_data=ids, object_name=object_name, - operation="delete", + ids <- sf_input_data_validation(ids, operation = 'delete') + resultset <- sf_bulk_operation(input_data = ids, object_name = object_name, + operation = "delete", api_type = "Bulk 1.0", verbose=verbose, ...) return(resultset) @@ -163,8 +163,8 @@ sf_delete_bulk_v2 <- function(ids, object_name, verbose = FALSE){ # allor none? ids <- sf_input_data_validation(ids, operation='delete') - resultset <- sf_bulk_operation(input_data=ids, object_name=object_name, - operation="delete", + resultset <- sf_bulk_operation(input_data = ids, object_name = object_name, + operation = "delete", api_type = "Bulk 2.0", verbose=verbose, ...) return(resultset) diff --git a/R/update.R b/R/update.R index 80787784..c63309cf 100644 --- a/R/update.R +++ b/R/update.R @@ -7,7 +7,7 @@ #' @template object_name #' @template all_or_none #' @template api_type -#' @param ... Other arguments passed on to \code{\link{sf_bulk_operation}}. +#' @param ... other arguments passed on to \code{\link{sf_bulk_operation}}. #' @template verbose #' @return \code{tbl_df} of records with success indicator #' @examples @@ -187,12 +187,12 @@ sf_update_bulk_v1 <- function(input_data, object_name, all_or_none = FALSE, ..., verbose = FALSE){ # allor none? - input_data <- sf_input_data_validation(operation="update", input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - operation="update", + input_data <- sf_input_data_validation(operation = "update", input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + operation = "update", api_type = "Bulk 1.0", - verbose=verbose, ...) + verbose = verbose, ...) return(resultset) } @@ -206,10 +206,10 @@ sf_update_bulk_v2 <- function(input_data, object_name, all_or_none = FALSE, verbose = FALSE){ # allor none? #The order of records in the response is not guaranteed to match the ordering of records in the original job data. - input_data <- sf_input_data_validation(operation='update', input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - operation="update", + input_data <- sf_input_data_validation(operation = 'update', input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + operation = "update", api_type = "Bulk 2.0", verbose=verbose, ...) return(resultset) diff --git a/R/upsert.R b/R/upsert.R index abf28f75..ae703f23 100644 --- a/R/upsert.R +++ b/R/upsert.R @@ -8,7 +8,7 @@ #' @template external_id_fieldname #' @template all_or_none #' @template api_type -#' @param ... Other arguments passed on to \code{\link{sf_bulk_operation}}. +#' @param ... other arguments passed on to \code{\link{sf_bulk_operation}}. #' @template verbose #' @return \code{tbl_df} of records with success indicator #' @examples @@ -42,20 +42,20 @@ sf_upsert <- function(input_data, api_type <- match.arg(api_type) if(api_type == "REST"){ - resultset <- sf_upsert_rest(input_data=input_data, object_name=object_name, - external_id_fieldname=external_id_fieldname, - all_or_none=all_or_none, verbose=verbose) + resultset <- sf_upsert_rest(input_data = input_data, object_name = object_name, + external_id_fieldname = external_id_fieldname, + all_or_none = all_or_none, verbose = verbose) } else if(api_type == "SOAP"){ - resultset <- sf_upsert_soap(input_data=input_data, object_name=object_name, - external_id_fieldname=external_id_fieldname, - all_or_none=all_or_none, verbose=verbose) + resultset <- sf_upsert_soap(input_data = input_data, object_name = object_name, + external_id_fieldname = external_id_fieldname, + all_or_none = all_or_none, verbose = verbose) } else if(api_type == "Bulk 1.0"){ - resultset <- sf_upsert_bulk_v1(input_data=input_data, object_name=object_name, - external_id_fieldname=external_id_fieldname, + resultset <- sf_upsert_bulk_v1(input_data = input_data, object_name = object_name, + external_id_fieldname = external_id_fieldname, verbose = verbose, ...) } else if(api_type == "Bulk 2.0"){ - resultset <- sf_upsert_bulk_v2(input_data=input_data, object_name=object_name, - external_id_fieldname=external_id_fieldname, + resultset <- sf_upsert_bulk_v2(input_data = input_data, object_name = object_name, + external_id_fieldname = external_id_fieldname, verbose = verbose, ...) } else { stop("Unknown API type") @@ -214,13 +214,13 @@ sf_upsert_bulk_v1 <- function(input_data, object_name, ..., verbose = FALSE){ # allor none? - input_data <- sf_input_data_validation(operation="upsert", input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - external_id_fieldname=external_id_fieldname, - operation="upsert", + input_data <- sf_input_data_validation(operation = "upsert", input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + external_id_fieldname = external_id_fieldname, + operation = "upsert", api_type = "Bulk 1.0", - verbose=verbose, ...) + verbose = verbose, ...) return(resultset) } @@ -236,12 +236,12 @@ sf_upsert_bulk_v2 <- function(input_data, object_name, verbose = FALSE){ # allor none? #The order of records in the response is not guaranteed to match the ordering of records in the original job data. - input_data <- sf_input_data_validation(operation='upsert', input_data) - resultset <- sf_bulk_operation(input_data=input_data, - object_name=object_name, - external_id_fieldname=external_id_fieldname, - operation="upsert", + input_data <- sf_input_data_validation(operation = 'upsert', input_data) + resultset <- sf_bulk_operation(input_data = input_data, + object_name = object_name, + external_id_fieldname = external_id_fieldname, + operation = "upsert", api_type = "Bulk 2.0", - verbose=verbose, ...) + verbose = verbose, ...) return(resultset) } diff --git a/docs/reference/sf_batch_details_bulk.html b/docs/reference/sf_batch_details_bulk.html index b17e7caf..72943d82 100644 --- a/docs/reference/sf_batch_details_bulk.html +++ b/docs/reference/sf_batch_details_bulk.html @@ -189,8 +189,9 @@

Arg

Value

-

A tbl_df, formatted by salesforce, with information containing the success or failure or certain rows in a submitted batch, -unless the operation was query, then it is a data.frame containing the result_id for retrieving the recordset.

+

A tbl_df, formatted by Salesforce, with information containing +the success or failure or certain rows in a submitted batch, unless the operation +was query, then it is a data.frame containing the result_id for retrieving the recordset.

Note

diff --git a/docs/reference/sf_bulk_operation.html b/docs/reference/sf_bulk_operation.html index ce611a80..4cf5c22f 100644 --- a/docs/reference/sf_bulk_operation.html +++ b/docs/reference/sf_bulk_operation.html @@ -161,8 +161,8 @@

Run Bulk Operation

sf_bulk_operation(input_data, object_name, operation = c("insert",
   "delete", "upsert", "update", "hardDelete"),
   external_id_fieldname = NULL, api_type = c("Bulk 1.0", "Bulk 2.0"),
-  wait_for_results = TRUE, interval_seconds = 3, max_attempts = 200,
-  verbose = FALSE)
+ ..., wait_for_results = TRUE, interval_seconds = 3, + max_attempts = 200, verbose = FALSE)

Arguments

@@ -191,6 +191,11 @@

Arg

+ + + + diff --git a/docs/reference/sf_create.html b/docs/reference/sf_create.html index d94abbcc..378af772 100644 --- a/docs/reference/sf_create.html +++ b/docs/reference/sf_create.html @@ -187,7 +187,7 @@

Arg

- + diff --git a/docs/reference/sf_create_job_bulk.html b/docs/reference/sf_create_job_bulk.html index b159bfee..a61e025d 100644 --- a/docs/reference/sf_create_job_bulk.html +++ b/docs/reference/sf_create_job_bulk.html @@ -192,24 +192,30 @@

Arg

+indicate the type of data being passed to the Bulk API.

- + - + +"SEMICOLON", and "TAB", but this package only accepts and uses "COMMA". Also, +note that this argument is only used in the Bulk 2.0 API and will be ignored +in calls using the Bulk 1.0 API.

diff --git a/docs/reference/sf_delete.html b/docs/reference/sf_delete.html index cb0cba5b..3784ca7b 100644 --- a/docs/reference/sf_delete.html +++ b/docs/reference/sf_delete.html @@ -187,7 +187,7 @@

Arg

- + diff --git a/docs/reference/sf_update.html b/docs/reference/sf_update.html index a7ff7928..9e9222b4 100644 --- a/docs/reference/sf_update.html +++ b/docs/reference/sf_update.html @@ -187,7 +187,7 @@

Arg

- + diff --git a/docs/reference/sf_upsert.html b/docs/reference/sf_upsert.html index 0efb1e80..dc91fdef 100644 --- a/docs/reference/sf_upsert.html +++ b/docs/reference/sf_upsert.html @@ -193,7 +193,7 @@

Arg

- + diff --git a/man/sf_batch_details_bulk.Rd b/man/sf_batch_details_bulk.Rd index 95bb9c9c..57d4e8cb 100644 --- a/man/sf_batch_details_bulk.Rd +++ b/man/sf_batch_details_bulk.Rd @@ -20,8 +20,9 @@ batch as returned by \link{sf_create_batches_bulk}} \item{verbose}{logical; do you want informative messages?} } \value{ -A \code{tbl_df}, formatted by salesforce, with information containing the success or failure or certain rows in a submitted batch, -unless the operation was query, then it is a data.frame containing the result_id for retrieving the recordset. +A \code{tbl_df}, formatted by Salesforce, with information containing +the success or failure or certain rows in a submitted batch, unless the operation +was query, then it is a data.frame containing the result_id for retrieving the recordset. } \description{ This function returns detailed (row-by-row) information on an existing batch diff --git a/man/sf_bulk_operation.Rd b/man/sf_bulk_operation.Rd index a0bfea1d..2f4ce6b7 100644 --- a/man/sf_bulk_operation.Rd +++ b/man/sf_bulk_operation.Rd @@ -7,8 +7,8 @@ sf_bulk_operation(input_data, object_name, operation = c("insert", "delete", "upsert", "update", "hardDelete"), external_id_fieldname = NULL, api_type = c("Bulk 1.0", "Bulk 2.0"), - wait_for_results = TRUE, interval_seconds = 3, max_attempts = 200, - verbose = FALSE) + ..., wait_for_results = TRUE, interval_seconds = 3, + max_attempts = 200, verbose = FALSE) } \arguments{ \item{input_data}{\code{named vector}, \code{matrix}, \code{data.frame}, or @@ -26,6 +26,9 @@ objects during upserts to determine if the record already exists in Salesforce o \item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request} +\item{...}{other arguments passed on to \code{\link{sf_create_job_bulk}} such as +\code{content_type}, \code{concurrency_mode}, \code{line_ending} or \code{column_delimiter}.} + \item{wait_for_results}{logical; indicating whether to wait for the operation to complete so that the batch results of individual records can be obtained} diff --git a/man/sf_create.Rd b/man/sf_create.Rd index aae56ba8..fd27d222 100644 --- a/man/sf_create.Rd +++ b/man/sf_create.Rd @@ -21,7 +21,7 @@ records are processed successfully} \item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request} -\item{...}{Other arguments passed on to \code{\link{sf_bulk_operation}}.} +\item{...}{other arguments passed on to \code{\link{sf_bulk_operation}}.} \item{verbose}{logical; do you want informative messages?} } diff --git a/man/sf_create_job_bulk.Rd b/man/sf_create_job_bulk.Rd index 9fa12f03..b8a4ce6f 100644 --- a/man/sf_create_job_bulk.Rd +++ b/man/sf_create_job_bulk.Rd @@ -26,18 +26,24 @@ objects during upserts to determine if the record already exists in Salesforce o "Chatter" indicating which API to use when making the request} \item{content_type}{character; being one of 'CSV', 'ZIP_CSV', 'ZIP_XML', or 'ZIP_JSON' to -indicate the type of data being passed to the Bulk API} +indicate the type of data being passed to the Bulk API.} -\item{concurrency_mode}{character; either "Parallel" or "Serial" that specifies whether batches should be completed -sequentially or in parallel. Use "Serial" only if Lock contentions persist with in "Parallel" mode.} +\item{concurrency_mode}{character; either "Parallel" or "Serial" that specifies +whether batches should be completed sequentially or in parallel. Use "Serial" +only if lock contentions persist with in "Parallel" mode. Note: this argument is +only used in the Bulk 1.0 API and will be ignored in calls using the Bulk 2.0 API.} -\item{line_ending}{character; indicating the The line ending used for CSV job data, -marking the end of a data row. The default is NULL and determined by the operating system using -"CRLF" for Windows machines and "LF" for Unix machines} +\item{line_ending}{character; indicating the line ending used for CSV job data, +marking the end of a data row. The default is NULL meaing that the line ending +is determined by the operating system using "CRLF" for Windows machines and +"LF" for Unix machines. Note: this argument is only used in the Bulk 2.0 API +and will be ignored in calls using the Bulk 1.0 API.} \item{column_delimiter}{character; indicating the column delimiter used for CSV job data. The default value is COMMA. Valid values are: "BACKQUOTE", "CARET", "COMMA", "PIPE", -"SEMICOLON", and "TAB".} +"SEMICOLON", and "TAB", but this package only accepts and uses "COMMA". Also, +note that this argument is only used in the Bulk 2.0 API and will be ignored +in calls using the Bulk 1.0 API.} \item{verbose}{logical; do you want informative messages?} } diff --git a/man/sf_delete.Rd b/man/sf_delete.Rd index 0844abde..aac8e793 100644 --- a/man/sf_delete.Rd +++ b/man/sf_delete.Rd @@ -21,7 +21,7 @@ records are processed successfully} \item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request} -\item{...}{Other arguments passed on to \code{\link{sf_bulk_operation}}.} +\item{...}{other arguments passed on to \code{\link{sf_bulk_operation}}.} \item{verbose}{logical; do you want informative messages?} } diff --git a/man/sf_update.Rd b/man/sf_update.Rd index 1dea1b1e..72f8c6ce 100644 --- a/man/sf_update.Rd +++ b/man/sf_update.Rd @@ -21,7 +21,7 @@ records are processed successfully} \item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request} -\item{...}{Other arguments passed on to \code{\link{sf_bulk_operation}}.} +\item{...}{other arguments passed on to \code{\link{sf_bulk_operation}}.} \item{verbose}{logical; do you want informative messages?} } diff --git a/man/sf_upsert.Rd b/man/sf_upsert.Rd index 82a5d368..5ba3fae8 100644 --- a/man/sf_upsert.Rd +++ b/man/sf_upsert.Rd @@ -25,7 +25,7 @@ records are processed successfully} \item{api_type}{character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request} -\item{...}{Other arguments passed on to \code{\link{sf_bulk_operation}}.} +\item{...}{other arguments passed on to \code{\link{sf_bulk_operation}}.} \item{verbose}{logical; do you want informative messages?} }
api_type

character; one of "REST", "SOAP", "Bulk 1.0", "Bulk 2.0", or "Chatter" indicating which API to use when making the request

...

other arguments passed on to sf_create_job_bulk such as +content_type, concurrency_mode, line_ending or column_delimiter.

wait_for_results
...

Other arguments passed on to sf_bulk_operation.

other arguments passed on to sf_bulk_operation.

verbose
content_type

character; being one of 'CSV', 'ZIP_CSV', 'ZIP_XML', or 'ZIP_JSON' to -indicate the type of data being passed to the Bulk API

concurrency_mode

character; either "Parallel" or "Serial" that specifies whether batches should be completed -sequentially or in parallel. Use "Serial" only if Lock contentions persist with in "Parallel" mode.

character; either "Parallel" or "Serial" that specifies +whether batches should be completed sequentially or in parallel. Use "Serial" +only if lock contentions persist with in "Parallel" mode. Note: this argument is +only used in the Bulk 1.0 API and will be ignored in calls using the Bulk 2.0 API.

line_ending

character; indicating the The line ending used for CSV job data, -marking the end of a data row. The default is NULL and determined by the operating system using -"CRLF" for Windows machines and "LF" for Unix machines

character; indicating the line ending used for CSV job data, +marking the end of a data row. The default is NULL meaing that the line ending +is determined by the operating system using "CRLF" for Windows machines and +"LF" for Unix machines. Note: this argument is only used in the Bulk 2.0 API +and will be ignored in calls using the Bulk 1.0 API.

column_delimiter

character; indicating the column delimiter used for CSV job data. The default value is COMMA. Valid values are: "BACKQUOTE", "CARET", "COMMA", "PIPE", -"SEMICOLON", and "TAB".

verbose
...

Other arguments passed on to sf_bulk_operation.

other arguments passed on to sf_bulk_operation.

verbose
...

Other arguments passed on to sf_bulk_operation.

other arguments passed on to sf_bulk_operation.

verbose
...

Other arguments passed on to sf_bulk_operation.

other arguments passed on to sf_bulk_operation.

verbose