diff --git a/core/src/main/scala/org/apache/spark/internal/config/package.scala b/core/src/main/scala/org/apache/spark/internal/config/package.scala index 1e7280005514..b07c0d60fc2b 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/package.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/package.scala @@ -631,17 +631,19 @@ package object config { private[spark] val MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM = ConfigBuilder("spark.maxRemoteBlockSizeFetchToMem") .doc("Remote block will be fetched to disk when size of the block is above this threshold " + - "in bytes. This is to avoid a giant request takes too much memory. We can enable this " + - "config by setting a specific value(e.g. 200m). Note this configuration will affect " + - "both shuffle fetch and block manager remote block fetch. For users who enabled " + - "external shuffle service, this feature can only be worked when external shuffle" + - "service is newer than Spark 2.2.") + "in bytes. This is to avoid a giant request takes too much memory. Note this " + + "configuration will affect both shuffle fetch and block manager remote block fetch. " + + "For users who enabled external shuffle service, this feature can only work when " + + "external shuffle service is at least 2.3.0.") .bytesConf(ByteUnit.BYTE) // fetch-to-mem is guaranteed to fail if the message is bigger than 2 GB, so we might // as well use fetch-to-disk in that case. The message includes some metadata in addition // to the block data itself (in particular UploadBlock has a lot of metadata), so we leave // extra room. - .createWithDefault(Int.MaxValue - 512) + .checkValue( + _ <= Int.MaxValue - 512, + "maxRemoteBlockSizeFetchToMem cannot be larger than (Int.MaxValue - 512) bytes.") + .createWithDefaultString("200m") private[spark] val TASK_METRICS_TRACK_UPDATED_BLOCK_STATUSES = ConfigBuilder("spark.taskMetrics.trackUpdatedBlockStatuses") diff --git a/docs/configuration.md b/docs/configuration.md index 7d3bbf93ae96..806e16af3640 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -626,19 +626,6 @@ Apart from these, the following properties are also available, and may be useful You can mitigate this issue by setting it to a lower value. -
spark.maxRemoteBlockSizeFetchToMemspark.shuffle.compressspark.maxRemoteBlockSizeFetchToMem