From 3f4051cd0895debe1c4feeee288890b397d5dfe1 Mon Sep 17 00:00:00 2001 From: s71955 Date: Fri, 24 Aug 2018 03:54:10 +0530 Subject: [PATCH] [SPARK-25073][SQL]When wild card is been used in load command system is throwing analysis exception ## What changes were proposed in this pull request? [SPARK-25073][Yarn] AM and Executor Memory validation message is not proper while submitting spark yarn applicationWhen the yarn.nodemanager.resource.memory-mb or yarn.scheduler.maximum-allocation-mb memory assignment is insufficient, Spark always reports an error request to adjust yarn.scheduler.maximum-allocation-mbeven though in message it shows the memory value of yarn.nodemanager.resource.memory-mb.As the error Message is bit misleading to the user we can modify the same, We can keep the error message same as executor memory validation message. ## How was this patch tested? Manually tested in hdfs-Yarn clustaer --- .../src/main/scala/org/apache/spark/deploy/yarn/Client.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala index 75614a41e0b62..698fc2ce8bf9d 100644 --- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala +++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala @@ -344,7 +344,8 @@ private[spark] class Client( if (amMem > maxMem) { throw new IllegalArgumentException(s"Required AM memory ($amMemory" + s"+$amMemoryOverhead MB) is above the max threshold ($maxMem MB) of this cluster! " + - "Please increase the value of 'yarn.scheduler.maximum-allocation-mb'.") + "Please check the values of 'yarn.scheduler.maximum-allocation-mb' and/or " + + "'yarn.nodemanager.resource.memory-mb'.") } logInfo("Will allocate AM container, with %d MB memory including %d MB overhead".format( amMem,