Skip to content
This repository has been archived by the owner on Oct 12, 2023. It is now read-only.

Commit

Permalink
Improve R console UI experience (#193)
Browse files Browse the repository at this point in the history
* Improve UI experience

* Added verbose mode to deleteJob and deleteStorageContainer

* Refactor print method
  • Loading branch information
brnleehng authored Dec 14, 2017
1 parent 89dbba9 commit a0d5537
Show file tree
Hide file tree
Showing 4 changed files with 96 additions and 26 deletions.
26 changes: 20 additions & 6 deletions R/doAzureParallel.R
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,20 @@ setHttpTraffic <- function(value = FALSE) {
}
}

cat("Job Summary: ", fill = TRUE)

job <- rAzureBatch::getJob(id)
cat(sprintf("Id: %s", job$id), fill = TRUE)

printJobInformation(
jobId = job$id,
chunkSize = chunkSize,
enableCloudCombine = enableCloudCombine,
errorHandling = obj$errorHandling,
wait = wait,
autoDeleteJob = autoDeleteJob,
cranPackages = obj$packages,
githubPackages = githubPackages,
bioconductorPackages = bioconductorPackages
)

if (!is.null(job$id)) {
saveMetadataBlob(job$id, metadata)
Expand Down Expand Up @@ -531,12 +542,16 @@ setHttpTraffic <- function(value = FALSE) {
containerImage = data$containerImage
)

cat("\r", sprintf("Submitting tasks (%s/%s)", i, length(endIndices)), sep = "")
flush.console()

return(taskId)
})

rAzureBatch::updateJob(id)

if (enableCloudCombine) {
cat("\nSubmitting merge task")
mergeTaskId <- paste0(id, "-merge")
.addTask(
jobId = id,
Expand All @@ -554,6 +569,7 @@ setHttpTraffic <- function(value = FALSE) {
outputFiles = obj$options$azure$outputFiles,
containerImage = data$containerImage
)
cat(". . .")
}

if (wait) {
Expand Down Expand Up @@ -603,12 +619,10 @@ setHttpTraffic <- function(value = FALSE) {
errorValue <- foreach::getErrorValue(it)
errorIndex <- foreach::getErrorIndex(it)

cat(sprintf("Number of errors: %i", numberOfFailedTasks),
fill = TRUE)

# delete job from batch service and job result from storage blob
if (autoDeleteJob) {
deleteJob(id)
# Default behavior is to delete the job data
deleteJob(id, verbose = !autoDeleteJob)
}

if (identical(obj$errorHandling, "stop") &&
Expand Down
30 changes: 20 additions & 10 deletions R/jobUtilities.R
Original file line number Diff line number Diff line change
Expand Up @@ -258,18 +258,28 @@ getJobResult <- function(jobId) {
#' deleteJob("job-001")
#' }
#' @export
deleteJob <- function(jobId) {
deleteStorageContainer(jobId)
deleteJob <- function(jobId, verbose = TRUE) {
deleteStorageContainer(jobId, verbose)

response <- rAzureBatch::deleteJob(jobId, content = "response")

if (response$status_code == 202) {
cat(sprintf("Your job '%s' has been deleted.", jobId),
fill = TRUE)
} else if (response$status_code == 404) {
cat(sprintf("Job '%s' does not exist.", jobId),
fill = TRUE)
}
tryCatch({
httr::stop_for_status(response)

if (verbose) {
cat(sprintf("Your job '%s' has been deleted.", jobId),
fill = TRUE)
}
},
error = function(e) {
if (verbose) {
response <- httr::content(response, encoding = "UTF-8")
cat("Call: deleteJob", fill = TRUE)
cat(sprintf("Exception: %s", response$message$value),
fill = TRUE)
}
}
)
}

#' Terminate a job
Expand Down Expand Up @@ -301,7 +311,7 @@ terminateJob <- function(jobId) {
#' @export
waitForTasksToComplete <-
function(jobId, timeout, errorHandling = "stop") {
cat("Waiting for tasks to complete. . .", fill = TRUE)
cat("\nWaiting for tasks to complete. . .", fill = TRUE)

totalTasks <- 0
currentTasks <- rAzureBatch::listTask(jobId)
Expand Down
30 changes: 20 additions & 10 deletions R/storage_management.R
Original file line number Diff line number Diff line change
Expand Up @@ -34,19 +34,29 @@ listStorageContainers <- function(prefix = "") {
#' @param container The name of the container
#'
#' @export
deleteStorageContainer <- function(container) {
deleteStorageContainer <- function(container, verbose = TRUE) {
response <-
rAzureBatch::deleteContainer(container, content = "response")

if (response$status_code == 202) {
cat(sprintf("Your storage container '%s' has been deleted.", container),
fill = TRUE)
} else if (response$status_code == 404) {
cat(sprintf("storage container '%s' does not exist.", container),
fill = TRUE)
}

response
tryCatch({
httr::stop_for_status(response)

if (verbose) {
cat(sprintf("Your storage container '%s' has been deleted.", jobId),
fill = TRUE)
}
},
error = function(e) {
# Checking for status code instead of using xml2 package
# Storage helper functions require xml2 package which requires special installations
if (verbose && response$status_code == 404) {
cat(sprintf("Call: deleteStorageContainer"),
fill = TRUE)
cat(sprintf("Exception: %s", "The specified storage container does not exist"),
fill = TRUE)
}
}
)
}

#' List storage files from Azure storage.
Expand Down
36 changes: 36 additions & 0 deletions R/stringUtilities.R
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,39 @@ getTaskFailedErrorString <- function(...) {

return(errorMessage)
}

getJobPackageSummary <- function(packages) {
if (length(packages) > 0) {
cat(sprintf("%s: ", deparse(substitute(packages))), fill = TRUE)
cat("\t")
for (i in 1:length(packages)) {
cat(packages[i], "; ", sep = "")
}
cat("\n")
}
}

printJobInformation <- function(jobId,
chunkSize,
enableCloudCombine,
errorHandling,
wait,
autoDeleteJob,
cranPackages,
githubPackages,
bioconductorPackages) {
cat(strrep('=', options("width")), fill = TRUE)
cat(sprintf("Id: %s", jobId), fill = TRUE)
cat(sprintf("chunkSize: %s", as.character(chunkSize)), fill = TRUE)
cat(sprintf("enableCloudCombine: %s", as.character(enableCloudCombine)), fill = TRUE)

packages <- cranPackages
getJobPackageSummary(packages)
getJobPackageSummary(githubPackages)
getJobPackageSummary(bioconductorPackages)

cat(sprintf("errorHandling: %s", as.character(errorHandling)), fill = TRUE)
cat(sprintf("wait: %s", as.character(wait)), fill = TRUE)
cat(sprintf("autoDeleteJob: %s", as.character(autoDeleteJob)), fill = TRUE)
cat(strrep('=', options("width")), fill = TRUE)
}

0 comments on commit a0d5537

Please sign in to comment.