diff --git a/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala b/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala index f9e92eccac..2d673d4ca6 100644 --- a/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala +++ b/core/src/main/scala/org/apache/spark/sql/execution/joins/HashJoinExec.scala @@ -476,10 +476,6 @@ case class HashJoinExec(leftKeys: Seq[Expression], val produced = streamedPlan.asInstanceOf[CodegenSupport].produce(ctx, this) val beforeMap = ctx.freshName("beforeMap") - val skipScan = skipProcessForEmptyMap() - val skipCondition = if (skipScan) { - s"if($hashMapTerm.size() == 0) return;" - } else "" s""" boolean $keyIsUniqueTerm = true; @@ -495,7 +491,6 @@ case class HashJoinExec(leftKeys: Seq[Expression], final $entryClass[] $mapDataTerm = ($entryClass[])$hashMapTerm.data(); long $numRowsTerm = 0L; try { - $skipCondition ${session.evaluateFinallyCode(ctx, produced)} } finally { $numOutputRows.${metricAdd(numRowsTerm)}; @@ -503,13 +498,6 @@ case class HashJoinExec(leftKeys: Seq[Expression], """ } - private def skipProcessForEmptyMap() : Boolean = { - joinType match { - case Inner | LeftSemi => true - case _ => false - } - } - override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = { // variable that holds if relation is unique to optimize iteration