Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -1078,11 +1078,12 @@ abstract class TreeNode[BaseType <: TreeNode[BaseType]] extends Product with Tre
append(str)
append("\n")

if (innerChildren.nonEmpty) {
innerChildren.init.foreach(_.generateTreeString(
val innerChildrenLocal = innerChildren
if (innerChildrenLocal.nonEmpty) {
innerChildrenLocal.init.foreach(_.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ false, append, verbose,
addSuffix = addSuffix, maxFields = maxFields, printNodeId = printNodeId, indent = indent))
innerChildren.last.generateTreeString(
innerChildrenLocal.last.generateTreeString(
depth + 2, lastChildren :+ children.isEmpty :+ true, append, verbose,
addSuffix = addSuffix, maxFields = maxFields, printNodeId = printNodeId, indent = indent)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,8 +70,12 @@ object ExplainUtils extends AdaptiveSparkPlanHelper {
* Given a input physical plan, performs the following tasks.
* 1. Generates the explain output for the input plan excluding the subquery plans.
* 2. Generates the explain output for each subquery referenced in the plan.
*
* Note that, ideally this is a no-op as different explain actions operate on different plan,
* instances but cached plan is an exception. The `InMemoryRelation#innerChildren` use a shared
* plan instance across multi-queries. Add lock for this method to avoid tag race condition.
*/
def processPlan[T <: QueryPlan[T]](plan: T, append: String => Unit): Unit = {
def processPlan[T <: QueryPlan[T]](plan: T, append: String => Unit): Unit = synchronized {
try {
var currentOperatorID = 0
currentOperatorID = generateOperatorIDs(plan, currentOperatorID)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -378,17 +378,7 @@ case class InMemoryRelation(

@volatile var statsOfPlanToCache: Statistics = null


override lazy val innerChildren: Seq[SparkPlan] = {
// The cachedPlan needs to be cloned here because it does not get cloned when SparkPlan.clone is
// called. This is a problem because when the explain output is generated for
// a plan it traverses the innerChildren and modifies their TreeNode.tags. If the plan is not
// cloned, there is a thread safety issue in the case that two plans with a shared cache
// operator have explain called at the same time. The cachedPlan cannot be cloned because
// it contains stateful information so we only clone it for the purpose of generating the
// explain output.
Seq(cachedPlan.clone())
}
override def innerChildren: Seq[SparkPlan] = Seq(cachedPlan)

override def doCanonicalize(): logical.LogicalPlan =
copy(output = output.map(QueryPlan.normalizeExpressions(_, output)),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,27 +18,45 @@
package org.apache.spark.sql.execution.columnar

import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
import org.apache.spark.sql.functions.expr
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSessionBase
import org.apache.spark.storage.StorageLevel

class InMemoryRelationSuite extends SparkFunSuite with SharedSparkSessionBase {
test("SPARK-43157: Clone innerChildren cached plan") {
val d = spark.range(1)
val relation = InMemoryRelation(StorageLevel.MEMORY_ONLY, d.queryExecution, None)
val cloned = relation.clone().asInstanceOf[InMemoryRelation]

val relationCachedPlan = relation.innerChildren.head
val clonedCachedPlan = cloned.innerChildren.head

// verify the plans are not the same object but are logically equivalent
assert(!relationCachedPlan.eq(clonedCachedPlan))
assert(relationCachedPlan === clonedCachedPlan)
}
class InMemoryRelationSuite extends SparkFunSuite
with SharedSparkSessionBase with AdaptiveSparkPlanHelper {

test("SPARK-46779: InMemoryRelations with the same cached plan are semantically equivalent") {
val d = spark.range(1)
val r1 = InMemoryRelation(StorageLevel.MEMORY_ONLY, d.queryExecution, None)
val r2 = r1.withOutput(r1.output.map(_.newInstance()))
assert(r1.sameResult(r2))
}

test("SPARK-47177: Cached SQL plan do not display final AQE plan in explain string") {
def findIMRInnerChild(p: SparkPlan): SparkPlan = {
val tableCache = find(p) {
case _: InMemoryTableScanExec => true
case _ => false
}
assert(tableCache.isDefined)
tableCache.get.asInstanceOf[InMemoryTableScanExec].relation.innerChildren.head
}

withSQLConf(SQLConf.CAN_CHANGE_CACHED_PLAN_OUTPUT_PARTITIONING.key -> "true") {
val d1 = spark.range(1).withColumn("key", expr("id % 100"))
.groupBy("key").agg(Map("key" -> "count"))
val cached_d2 = d1.cache()
val df = cached_d2.withColumn("key2", expr("key % 10"))
.groupBy("key2").agg(Map("key2" -> "count"))

assert(findIMRInnerChild(df.queryExecution.executedPlan).treeString
.contains("AdaptiveSparkPlan isFinalPlan=false"))
df.collect()
assert(findIMRInnerChild(df.queryExecution.executedPlan).treeString
.contains("AdaptiveSparkPlan isFinalPlan=true"))
}
}
}