Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 16 additions & 13 deletions core/src/main/scala/org/apache/spark/SparkConf.scala
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ private[spark] object SparkConf extends Logging {
DeprecatedConfig("spark.shuffle.spill", "1.6", "Not used anymore."),
DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."),
DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0",
"Please use the new excludedOnFailure options, spark.excludeOnFailure.*"),
"Not used anymore. Please use the new excludedOnFailure options, spark.excludeOnFailure.*"),
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We still maintain the revised deprecation warning message to lead a new configuration name.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So although users cannot use these configs after this PR, they will still be presented with the deprecation warning messages leading to new configs, right?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, right~

DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"),
DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"),
DeprecatedConfig("spark.rpc.numRetries", "2.2.0", "Not used anymore"),
Expand All @@ -680,29 +680,32 @@ private[spark] object SparkConf extends Logging {
DeprecatedConfig("spark.executor.plugins", "3.0.0",
"Feature replaced with new plugin API. See Monitoring documentation."),
DeprecatedConfig("spark.blacklist.enabled", "3.1.0",
"Please use spark.excludeOnFailure.enabled"),
"Not used anymore. Please use spark.excludeOnFailure.enabled"),
DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerExecutor", "3.1.0",
"Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"),
"Not used anymore. Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"),
DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerNode", "3.1.0",
"Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"),
"Not used anymore. Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"),
DeprecatedConfig("spark.blacklist.application.maxFailedTasksPerExecutor", "3.1.0",
"Please use spark.excludeOnFailure.application.maxFailedTasksPerExecutor"),
"Not used anymore. Please use " +
"spark.excludeOnFailure.application.maxFailedTasksPerExecutor"),
DeprecatedConfig("spark.blacklist.stage.maxFailedTasksPerExecutor", "3.1.0",
"Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"),
"Not used anymore. Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"),
DeprecatedConfig("spark.blacklist.application.maxFailedExecutorsPerNode", "3.1.0",
"Please use spark.excludeOnFailure.application.maxFailedExecutorsPerNode"),
"Not used anymore. Please use " +
"spark.excludeOnFailure.application.maxFailedExecutorsPerNode"),
DeprecatedConfig("spark.blacklist.stage.maxFailedExecutorsPerNode", "3.1.0",
"Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"),
"Not used anymore. Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"),
DeprecatedConfig("spark.blacklist.timeout", "3.1.0",
"Please use spark.excludeOnFailure.timeout"),
"Not used anymore. Please use spark.excludeOnFailure.timeout"),
DeprecatedConfig("spark.blacklist.application.fetchFailure.enabled", "3.1.0",
"Please use spark.excludeOnFailure.application.fetchFailure.enabled"),
"Not used anymore. Please use spark.excludeOnFailure.application.fetchFailure.enabled"),
DeprecatedConfig("spark.scheduler.blacklist.unschedulableTaskSetTimeout", "3.1.0",
"Please use spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"),
"Not used anymore. Please use " +
"spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"),
DeprecatedConfig("spark.blacklist.killBlacklistedExecutors", "3.1.0",
"Please use spark.excludeOnFailure.killExcludedExecutors"),
"Not used anymore. Please use spark.excludeOnFailure.killExcludedExecutors"),
DeprecatedConfig("spark.yarn.blacklist.executor.launch.blacklisting.enabled", "3.1.0",
"Please use spark.yarn.executor.launch.excludeOnFailure.enabled"),
"Not used anymore. Please use spark.yarn.executor.launch.excludeOnFailure.enabled"),
DeprecatedConfig("spark.network.remoteReadNioBufferConversion", "3.5.2",
"Please open a JIRA ticket to report it if you need to use this configuration."),
DeprecatedConfig("spark.shuffle.unsafe.file.output.buffer", "4.0.0",
Expand Down
12 changes: 0 additions & 12 deletions core/src/main/scala/org/apache/spark/internal/config/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -942,7 +942,6 @@ package object config {
private[spark] val EXCLUDE_ON_FAILURE_ENABLED =
ConfigBuilder("spark.excludeOnFailure.enabled")
.version("3.1.0")
.withAlternative("spark.blacklist.enabled")
.booleanConf
.createOptional

Expand All @@ -961,56 +960,48 @@ package object config {
private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR =
ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.task.maxTaskAttemptsPerExecutor")
.intConf
.createWithDefault(1)

private[spark] val MAX_TASK_ATTEMPTS_PER_NODE =
ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.task.maxTaskAttemptsPerNode")
.intConf
.createWithDefault(2)

private[spark] val MAX_FAILURES_PER_EXEC =
ConfigBuilder("spark.excludeOnFailure.application.maxFailedTasksPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.application.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)

private[spark] val MAX_FAILURES_PER_EXEC_STAGE =
ConfigBuilder("spark.excludeOnFailure.stage.maxFailedTasksPerExecutor")
.version("3.1.0")
.withAlternative("spark.blacklist.stage.maxFailedTasksPerExecutor")
.intConf
.createWithDefault(2)

private[spark] val MAX_FAILED_EXEC_PER_NODE =
ConfigBuilder("spark.excludeOnFailure.application.maxFailedExecutorsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.application.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)

private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE =
ConfigBuilder("spark.excludeOnFailure.stage.maxFailedExecutorsPerNode")
.version("3.1.0")
.withAlternative("spark.blacklist.stage.maxFailedExecutorsPerNode")
.intConf
.createWithDefault(2)

private[spark] val EXCLUDE_ON_FAILURE_TIMEOUT_CONF =
ConfigBuilder("spark.excludeOnFailure.timeout")
.version("3.1.0")
.withAlternative("spark.blacklist.timeout")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional

private[spark] val EXCLUDE_ON_FAILURE_KILL_ENABLED =
ConfigBuilder("spark.excludeOnFailure.killExcludedExecutors")
.version("3.1.0")
.withAlternative("spark.blacklist.killBlacklistedExecutors")
.booleanConf
.createWithDefault(false)

Expand All @@ -1025,14 +1016,12 @@ package object config {
ConfigBuilder("spark.scheduler.executorTaskExcludeOnFailureTime")
.internal()
.version("3.1.0")
.withAlternative("spark.scheduler.executorTaskBlacklistTime")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional

private[spark] val EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED =
ConfigBuilder("spark.excludeOnFailure.application.fetchFailure.enabled")
.version("3.1.0")
.withAlternative("spark.blacklist.application.fetchFailure.enabled")
.booleanConf
.createWithDefault(false)

Expand Down Expand Up @@ -1866,7 +1855,6 @@ package object config {
"before aborting a TaskSet which is unschedulable because all executors are " +
"excluded due to failures.")
.version("3.1.0")
.withAlternative("spark.scheduler.blacklist.unschedulableTaskSetTimeout")
.timeConf(TimeUnit.SECONDS)
.checkValue(v => v >= 0, "The value should be a non negative time value.")
.createWithDefault(120)
Expand Down
1 change: 1 addition & 0 deletions docs/core-migration-guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ license: |
- Since Spark 4.1, Spark will compress RDD checkpoints by default. To restore the behavior before Spark 4.1, you can set `spark.checkpoint.compress` to `false`.
- Since Spark 4.1, Spark uses Apache Hadoop Magic Committer for all S3 buckets by default. To restore the behavior before Spark 4.0, you can set `spark.hadoop.fs.s3a.committer.magic.enabled=false`.
- Since Spark 4.1, `java.lang.InternalError` encountered during file reading will no longer fail the task if the configuration `spark.sql.files.ignoreCorruptFiles` or the data source option `ignoreCorruptFiles` is set to `true`.
- Since Spark 4.1, Spark ignores `*.blacklist.*` alternative configuration names. To restore the behavior before Spark 4.1, you can use the corresponding configuration names instead which exists since Spark 3.1.0.

## Upgrading from Core 3.5 to 4.0

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,6 @@ package object config extends Logging {
private[spark] val YARN_EXECUTOR_LAUNCH_EXCLUDE_ON_FAILURE_ENABLED =
ConfigBuilder("spark.yarn.executor.launch.excludeOnFailure.enabled")
.version("3.1.0")
.withAlternative("spark.yarn.blacklist.executor.launch.blacklisting.enabled")
.booleanConf
.createWithDefault(false)

Expand Down