Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions core/src/main/scala/org/apache/spark/rdd/RDD.scala
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,11 @@ abstract class RDD[T: ClassTag](
*/
def cache(): this.type = persist()

def noOpRun(): RDD[T] = withScope{
sc.runJob(this, Utils.getIteratorSize _)
this
}

/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
Expand Down
6 changes: 6 additions & 0 deletions python/pyspark/rdd.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,6 +326,12 @@ def unpersist(self, blocking=False):
self._jrdd.unpersist(blocking)
return self

def noOpRun(self):
"""
Action to run the job until this point. Returns a new RDD.
"""
return RDD(self._jrdd.rdd().noOpRun(), self.ctx, self._jrdd_deserializer)

def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
Expand Down
6 changes: 6 additions & 0 deletions python/pyspark/sql/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -602,6 +602,12 @@ def persist(self, storageLevel=StorageLevel.MEMORY_AND_DISK):
self._jdf.persist(javaStorageLevel)
return self

def noOpRun(self):
"""
Action to run the job until this point and return a new DF.
"""
return DataFrame(self._jdf.noOpRun(), self.sql_ctx)

@property
@since(2.1)
def storageLevel(self):
Expand Down
8 changes: 8 additions & 0 deletions sql/core/src/main/scala/org/apache/spark/sql/Dataset.scala
Original file line number Diff line number Diff line change
Expand Up @@ -2949,6 +2949,14 @@ class Dataset[T] private[sql](
*/
def cache(): this.type = persist()

def noOpRun(): Dataset[T] = withAction("noOpRun", queryExecution) { _ =>
withNewExecutionId {
var resultRDD = queryExecution.toRdd.noOpRun().map(
exprEnc.resolveAndBind(logicalPlan.output, sparkSession.sessionState.analyzer).fromRow)
sparkSession.createDataset(resultRDD)
}
}

/**
* Persist this Dataset with the given storage level.
* @param newLevel One of: `MEMORY_ONLY`, `MEMORY_AND_DISK`, `MEMORY_ONLY_SER`,
Expand Down