Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ private[spark] class Worker(
// application finishes.
val appLocalDirs = appDirectories.get(appId).getOrElse {
Utils.getOrCreateLocalRootDirs(conf).map { dir =>
Utils.createDirectory(dir).getAbsolutePath()
Utils.createDirectory(dir, conf=conf).getAbsolutePath()
}.toSeq
}
appDirectories(appId) = appLocalDirs
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ private[spark] class DiskBlockManager(blockManager: BlockManager, conf: SparkCon
private def createLocalDirs(conf: SparkConf): Array[File] = {
Utils.getOrCreateLocalRootDirs(conf).flatMap { rootDir =>
try {
val localDir = Utils.createDirectory(rootDir, "blockmgr")
val localDir = Utils.createDirectory(rootDir, "blockmgr", conf)
logInfo(s"Created local directory at $localDir")
Some(localDir)
} catch {
Expand Down
6 changes: 3 additions & 3 deletions core/src/main/scala/org/apache/spark/util/Utils.scala
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ private[spark] object Utils extends Logging {
* Create a directory inside the given parent directory. The directory is guaranteed to be
* newly created, and is not marked for automatic deletion.
*/
def createDirectory(root: String, namePrefix: String = "spark"): File = {
def createDirectory(root: String, namePrefix: String = "spark", conf: SparkConf = null): File = {
var attempts = 0
val maxAttempts = MAX_DIR_CREATION_ATTEMPTS
var dir: File = null
Expand All @@ -285,7 +285,7 @@ private[spark] object Utils extends Logging {
} else {
// Restrict file permissions via chmod if available.
// For Windows this step is ignored.
if (!isWindows && !chmod700(dir)) {
if (!isWindows && (conf==null || !isRunningInYarnContainer(conf)) && !chmod700(dir)) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'd open a new PR vs master. The logic here is getting a little hairy, although it's not crazy. How about just setting permission to 750?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Will do another PR.

Re: 750 - this isn't supported by the JDK, because group permissions are Posix-specific, so you'd need logic to say:

if (linux) { setPosixGroupReadable(); }

Also, if you explicitly set the permissions, it has the effect of overruling the setgid flag on the parent directory and you end up with the same group as the user. On YARN, the desired permissions of the files should be:

-rw-r-----    containeruser    yarn

The group is set from the setgid flag on the parent directories. Any explicit chmod from spark overrides this and you end up with a file that's chown'd containeruser:containeruser. Only way around this would be to somehow know the group that the YARN nodemanager process is running as, but that would start to get messy I think.

dir.delete()
dir = null
}
Expand Down Expand Up @@ -702,7 +702,7 @@ private[spark] object Utils extends Logging {
try {
val rootDir = new File(root)
if (rootDir.exists || rootDir.mkdirs()) {
Some(createDirectory(root).getAbsolutePath())
Some(createDirectory(root, conf=conf).getAbsolutePath())
} else {
logError(s"Failed to create dir in $root. Ignoring this directory.")
None
Expand Down