diff --git a/core/src/main/scala/org/apache/spark/SparkConf.scala b/core/src/main/scala/org/apache/spark/SparkConf.scala index 65b7fee9d889e..a1353bf7cec06 100644 --- a/core/src/main/scala/org/apache/spark/SparkConf.scala +++ b/core/src/main/scala/org/apache/spark/SparkConf.scala @@ -667,7 +667,7 @@ private[spark] object SparkConf extends Logging { DeprecatedConfig("spark.shuffle.spill", "1.6", "Not used anymore."), DeprecatedConfig("spark.rpc", "2.0", "Not used anymore."), DeprecatedConfig("spark.scheduler.executorTaskBlacklistTime", "2.1.0", - "Please use the new excludedOnFailure options, spark.excludeOnFailure.*"), + "Not used anymore. Please use the new excludedOnFailure options, spark.excludeOnFailure.*"), DeprecatedConfig("spark.yarn.am.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.executor.port", "2.0.0", "Not used anymore"), DeprecatedConfig("spark.rpc.numRetries", "2.2.0", "Not used anymore"), @@ -680,29 +680,32 @@ private[spark] object SparkConf extends Logging { DeprecatedConfig("spark.executor.plugins", "3.0.0", "Feature replaced with new plugin API. See Monitoring documentation."), DeprecatedConfig("spark.blacklist.enabled", "3.1.0", - "Please use spark.excludeOnFailure.enabled"), + "Not used anymore. Please use spark.excludeOnFailure.enabled"), DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerExecutor", "3.1.0", - "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"), + "Not used anymore. Please use spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor"), DeprecatedConfig("spark.blacklist.task.maxTaskAttemptsPerNode", "3.1.0", - "Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"), + "Not used anymore. Please use spark.excludeOnFailure.task.maxTaskAttemptsPerNode"), DeprecatedConfig("spark.blacklist.application.maxFailedTasksPerExecutor", "3.1.0", - "Please use spark.excludeOnFailure.application.maxFailedTasksPerExecutor"), + "Not used anymore. Please use " + + "spark.excludeOnFailure.application.maxFailedTasksPerExecutor"), DeprecatedConfig("spark.blacklist.stage.maxFailedTasksPerExecutor", "3.1.0", - "Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"), + "Not used anymore. Please use spark.excludeOnFailure.stage.maxFailedTasksPerExecutor"), DeprecatedConfig("spark.blacklist.application.maxFailedExecutorsPerNode", "3.1.0", - "Please use spark.excludeOnFailure.application.maxFailedExecutorsPerNode"), + "Not used anymore. Please use " + + "spark.excludeOnFailure.application.maxFailedExecutorsPerNode"), DeprecatedConfig("spark.blacklist.stage.maxFailedExecutorsPerNode", "3.1.0", - "Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"), + "Not used anymore. Please use spark.excludeOnFailure.stage.maxFailedExecutorsPerNode"), DeprecatedConfig("spark.blacklist.timeout", "3.1.0", - "Please use spark.excludeOnFailure.timeout"), + "Not used anymore. Please use spark.excludeOnFailure.timeout"), DeprecatedConfig("spark.blacklist.application.fetchFailure.enabled", "3.1.0", - "Please use spark.excludeOnFailure.application.fetchFailure.enabled"), + "Not used anymore. Please use spark.excludeOnFailure.application.fetchFailure.enabled"), DeprecatedConfig("spark.scheduler.blacklist.unschedulableTaskSetTimeout", "3.1.0", - "Please use spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"), + "Not used anymore. Please use " + + "spark.scheduler.excludeOnFailure.unschedulableTaskSetTimeout"), DeprecatedConfig("spark.blacklist.killBlacklistedExecutors", "3.1.0", - "Please use spark.excludeOnFailure.killExcludedExecutors"), + "Not used anymore. Please use spark.excludeOnFailure.killExcludedExecutors"), DeprecatedConfig("spark.yarn.blacklist.executor.launch.blacklisting.enabled", "3.1.0", - "Please use spark.yarn.executor.launch.excludeOnFailure.enabled"), + "Not used anymore. Please use spark.yarn.executor.launch.excludeOnFailure.enabled"), DeprecatedConfig("spark.network.remoteReadNioBufferConversion", "3.5.2", "Please open a JIRA ticket to report it if you need to use this configuration."), DeprecatedConfig("spark.shuffle.unsafe.file.output.buffer", "4.0.0", diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index d0f4806c49482..d413d06ffc94f 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -942,7 +942,6 @@ package object config { private[spark] val EXCLUDE_ON_FAILURE_ENABLED = ConfigBuilder("spark.excludeOnFailure.enabled") .version("3.1.0") - .withAlternative("spark.blacklist.enabled") .booleanConf .createOptional @@ -961,56 +960,48 @@ package object config { private[spark] val MAX_TASK_ATTEMPTS_PER_EXECUTOR = ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerExecutor") .version("3.1.0") - .withAlternative("spark.blacklist.task.maxTaskAttemptsPerExecutor") .intConf .createWithDefault(1) private[spark] val MAX_TASK_ATTEMPTS_PER_NODE = ConfigBuilder("spark.excludeOnFailure.task.maxTaskAttemptsPerNode") .version("3.1.0") - .withAlternative("spark.blacklist.task.maxTaskAttemptsPerNode") .intConf .createWithDefault(2) private[spark] val MAX_FAILURES_PER_EXEC = ConfigBuilder("spark.excludeOnFailure.application.maxFailedTasksPerExecutor") .version("3.1.0") - .withAlternative("spark.blacklist.application.maxFailedTasksPerExecutor") .intConf .createWithDefault(2) private[spark] val MAX_FAILURES_PER_EXEC_STAGE = ConfigBuilder("spark.excludeOnFailure.stage.maxFailedTasksPerExecutor") .version("3.1.0") - .withAlternative("spark.blacklist.stage.maxFailedTasksPerExecutor") .intConf .createWithDefault(2) private[spark] val MAX_FAILED_EXEC_PER_NODE = ConfigBuilder("spark.excludeOnFailure.application.maxFailedExecutorsPerNode") .version("3.1.0") - .withAlternative("spark.blacklist.application.maxFailedExecutorsPerNode") .intConf .createWithDefault(2) private[spark] val MAX_FAILED_EXEC_PER_NODE_STAGE = ConfigBuilder("spark.excludeOnFailure.stage.maxFailedExecutorsPerNode") .version("3.1.0") - .withAlternative("spark.blacklist.stage.maxFailedExecutorsPerNode") .intConf .createWithDefault(2) private[spark] val EXCLUDE_ON_FAILURE_TIMEOUT_CONF = ConfigBuilder("spark.excludeOnFailure.timeout") .version("3.1.0") - .withAlternative("spark.blacklist.timeout") .timeConf(TimeUnit.MILLISECONDS) .createOptional private[spark] val EXCLUDE_ON_FAILURE_KILL_ENABLED = ConfigBuilder("spark.excludeOnFailure.killExcludedExecutors") .version("3.1.0") - .withAlternative("spark.blacklist.killBlacklistedExecutors") .booleanConf .createWithDefault(false) @@ -1025,14 +1016,12 @@ package object config { ConfigBuilder("spark.scheduler.executorTaskExcludeOnFailureTime") .internal() .version("3.1.0") - .withAlternative("spark.scheduler.executorTaskBlacklistTime") .timeConf(TimeUnit.MILLISECONDS) .createOptional private[spark] val EXCLUDE_ON_FAILURE_FETCH_FAILURE_ENABLED = ConfigBuilder("spark.excludeOnFailure.application.fetchFailure.enabled") .version("3.1.0") - .withAlternative("spark.blacklist.application.fetchFailure.enabled") .booleanConf .createWithDefault(false) @@ -1866,7 +1855,6 @@ package object config { "before aborting a TaskSet which is unschedulable because all executors are " + "excluded due to failures.") .version("3.1.0") - .withAlternative("spark.scheduler.blacklist.unschedulableTaskSetTimeout") .timeConf(TimeUnit.SECONDS) .checkValue(v => v >= 0, "The value should be a non negative time value.") .createWithDefault(120) diff --git a/docs/core-migration-guide.md b/docs/core-migration-guide.md index edd5e3e7f85d6..a738363ace1d9 100644 --- a/docs/core-migration-guide.md +++ b/docs/core-migration-guide.md @@ -28,6 +28,7 @@ license: | - Since Spark 4.1, Spark will compress RDD checkpoints by default. To restore the behavior before Spark 4.1, you can set `spark.checkpoint.compress` to `false`. - Since Spark 4.1, Spark uses Apache Hadoop Magic Committer for all S3 buckets by default. To restore the behavior before Spark 4.0, you can set `spark.hadoop.fs.s3a.committer.magic.enabled=false`. - Since Spark 4.1, `java.lang.InternalError` encountered during file reading will no longer fail the task if the configuration `spark.sql.files.ignoreCorruptFiles` or the data source option `ignoreCorruptFiles` is set to `true`. +- Since Spark 4.1, Spark ignores `*.blacklist.*` alternative configuration names. To restore the behavior before Spark 4.1, you can use the corresponding configuration names instead which exists since Spark 3.1.0. ## Upgrading from Core 3.5 to 4.0 diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config/package.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config/package.scala index 51e5e0bfb9087..f719ca4677afc 100644 --- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config/package.scala +++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/config/package.scala @@ -433,7 +433,6 @@ package object config extends Logging { private[spark] val YARN_EXECUTOR_LAUNCH_EXCLUDE_ON_FAILURE_ENABLED = ConfigBuilder("spark.yarn.executor.launch.excludeOnFailure.enabled") .version("3.1.0") - .withAlternative("spark.yarn.blacklist.executor.launch.blacklisting.enabled") .booleanConf .createWithDefault(false)