Skip to content

Commit 503852f

Browse files
author
Marcelo Vanzin
committed
Revert to previous parameter names.
1 parent a16d9f9 commit 503852f

File tree

1 file changed

+5
-5
lines changed

1 file changed

+5
-5
lines changed

core/src/main/scala/org/apache/spark/internal/io/SparkHadoopWriter.scala

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -76,17 +76,17 @@ object SparkHadoopWriter extends Logging {
7676
// Try to write all RDD partitions as a Hadoop OutputFormat.
7777
try {
7878
val ret = sparkContext.runJob(rdd, (context: TaskContext, iter: Iterator[(K, V)]) => {
79-
// SPARK-24552: Generate a unique "task ID" based on the stage and task atempt numbers.
79+
// SPARK-24552: Generate a unique "attempt ID" based on the stage and task atempt numbers.
8080
// Assumes that there won't be more than Short.MaxValue attempts, at least not concurrently.
81-
val taskId = (context.stageAttemptNumber << 16) | context.attemptNumber
81+
val attemptId = (context.stageAttemptNumber << 16) | context.attemptNumber
8282

8383
executeTask(
8484
context = context,
8585
config = config,
8686
jobTrackerId = jobTrackerId,
8787
commitJobId = commitJobId,
8888
sparkPartitionId = context.partitionId,
89-
sparkTaskId = taskId,
89+
sparkAttemptNumber = attemptId,
9090
committer = committer,
9191
iterator = iter)
9292
})
@@ -108,12 +108,12 @@ object SparkHadoopWriter extends Logging {
108108
jobTrackerId: String,
109109
commitJobId: Int,
110110
sparkPartitionId: Int,
111-
sparkTaskId: Int,
111+
sparkAttemptNumber: Int,
112112
committer: FileCommitProtocol,
113113
iterator: Iterator[(K, V)]): TaskCommitMessage = {
114114
// Set up a task.
115115
val taskContext = config.createTaskAttemptContext(
116-
jobTrackerId, commitJobId, sparkPartitionId, sparkTaskId)
116+
jobTrackerId, commitJobId, sparkPartitionId, sparkAttemptNumber)
117117
committer.setupTask(taskContext)
118118

119119
val (outputMetrics, callback) = initHadoopOutputMetrics(context)

0 commit comments

Comments
 (0)