diff --git a/core/pom.xml b/core/pom.xml
index 822b5b1dd7cc..d28d57c9501a 100644
--- a/core/pom.xml
+++ b/core/pom.xml
@@ -38,12 +38,6 @@
net.java.dev.jets3t
jets3t
-
-
- commons-logging
- commons-logging
-
-
org.apache.curator
diff --git a/docs/building-with-maven.md b/docs/building-with-maven.md
index e447dfea3bac..cac01ded60d9 100644
--- a/docs/building-with-maven.md
+++ b/docs/building-with-maven.md
@@ -29,9 +29,22 @@ You can fix this by setting the `MAVEN_OPTS` variable as discussed before.
## Specifying the Hadoop version ##
-Because HDFS is not protocol-compatible across versions, if you want to read from HDFS, you'll need to build Spark against the specific HDFS version in your environment. You can do this through the "hadoop.version" property. If unset, Spark will build against Hadoop 1.0.4 by default.
-
-For Apache Hadoop versions 1.x, Cloudera CDH MRv1, and other Hadoop versions without YARN, use:
+Because HDFS is not protocol-compatible across versions, if you want to read from HDFS, you'll need to build Spark against the specific HDFS version in your environment. You can do this through the "hadoop.version" property. If unset, Spark will build against Hadoop 1.0.4 by default. Note that certain build profiles are required for particular Hadoop versions:
+
+
+
+ | Hadoop version | Profile required |
+
+
+ | 0.23.x | hadoop-0.23 |
+ | 1.x to 2.1.x | (none) |
+ | 2.2.x | hadoop-2.2 |
+ | 2.3.x | hadoop-2.3 |
+ | 2.4.x | hadoop-2.4 |
+
+
+
+For Apache Hadoop versions 1.x, Cloudera CDH "mr1" distributions, and other Hadoop versions without YARN, use:
# Apache Hadoop 1.2.1
$ mvn -Dhadoop.version=1.2.1 -DskipTests clean package
@@ -42,22 +55,40 @@ For Apache Hadoop versions 1.x, Cloudera CDH MRv1, and other Hadoop versions wit
# Apache Hadoop 0.23.x
$ mvn -Phadoop-0.23 -Dhadoop.version=0.23.7 -DskipTests clean package
-For Apache Hadoop 2.x, 0.23.x, Cloudera CDH MRv2, and other Hadoop versions with YARN, you can enable the "yarn-alpha" or "yarn" profile and set the "hadoop.version", "yarn.version" property. Note that Hadoop 0.23.X requires a special `-Phadoop-0.23` profile:
+For Apache Hadoop 2.x, 0.23.x, Cloudera CDH, and other Hadoop versions with YARN, you can enable the "yarn-alpha" or "yarn" profile and optionally set the "yarn.version" property if it is different from "hadoop.version". The additional build profile required depends on the YARN version:
+
+
+
+ | YARN version | Profile required |
+
+
+ | 0.23.x to 2.1.x | yarn-alpha |
+ | 2.2.x and later | yarn |
+
+
+
+Examples:
# Apache Hadoop 2.0.5-alpha
$ mvn -Pyarn-alpha -Dhadoop.version=2.0.5-alpha -DskipTests clean package
- # Cloudera CDH 4.2.0 with MapReduce v2
+ # Cloudera CDH 4.2.0
$ mvn -Pyarn-alpha -Dhadoop.version=2.0.0-cdh4.2.0 -DskipTests clean package
- # Apache Hadoop 2.2.X (e.g. 2.2.0 as below) and newer
- $ mvn -Pyarn -Dhadoop.version=2.2.0 -DskipTests clean package
-
# Apache Hadoop 0.23.x
- $ mvn -Pyarn-alpha -Phadoop-0.23 -Dhadoop.version=0.23.7 -Dyarn.version=0.23.7 -DskipTests clean package
+ $ mvn -Pyarn-alpha -Phadoop-0.23 -Dhadoop.version=0.23.7 -DskipTests clean package
+
+ # Apache Hadoop 2.2.X
+ $ mvn -Pyarn -Phadoop-2.2 -Dhadoop.version=2.2.0 -DskipTests clean package
+
+ # Apache Hadoop 2.3.X
+ $ mvn -Pyarn -Phadoop-2.3 -Dhadoop.version=2.3.0 -DskipTests clean package
+
+ # Apache Hadoop 2.4.X
+ $ mvn -Pyarn -Phadoop-2.4 -Dhadoop.version=2.4.0 -DskipTests clean package
# Different versions of HDFS and YARN.
- $ mvn -Pyarn-alpha -Dhadoop.version=2.3.0 -Dyarn.version=0.23.7 -DskipTests clean package
+ $ mvn -Pyarn-alpha -Phadoop-2.3 -Dhadoop.version=2.3.0 -Dyarn.version=0.23.7 -DskipTests clean package
## Spark Tests in Maven ##
diff --git a/pom.xml b/pom.xml
index ebd359a9de17..1c78701a2d4a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -129,6 +129,7 @@
0.3.6
3.0.0
1.7.4
+ 0.7.1
64m
512m
@@ -555,10 +556,18 @@
+
net.java.dev.jets3t
jets3t
- 0.7.1
+ ${jets3t.version}
+ runtime
+
+
+ commons-logging
+ commons-logging
+
+
org.apache.hadoop
@@ -830,36 +839,6 @@
-
-
- hadoop-0.23
-
-
- org.apache.avro
- avro
-
-
-
-
-
- yarn-alpha
-
- 2
-
- 0.23.7
-
-
-
-
- org.apache.avro
- avro
-
-
-
- yarn
-
-
-
@@ -894,17 +873,54 @@
+
+
- yarn
+ hadoop-0.23
+
+
+
+ org.apache.avro
+ avro
+
+
+
+
+
+ hadoop-2.2
+
+ 2.5.0
+
+
+
+
+ hadoop-2.3
- 2
- 2.2.0
2.5.0
+ 0.9.0
+
+
+
+ hadoop-2.4
+
+ 2.5.0
+ 0.9.0
+
+
+
+
+ yarn-alpha
yarn
+
+
+ yarn
+
+ yarn
+
diff --git a/project/SparkBuild.scala b/project/SparkBuild.scala
index 51f733511116..ba140a8dceb5 100644
--- a/project/SparkBuild.scala
+++ b/project/SparkBuild.scala
@@ -95,7 +95,7 @@ object SparkBuild extends Build {
lazy val hadoopVersion = Properties.envOrElse("SPARK_HADOOP_VERSION", DEFAULT_HADOOP_VERSION)
lazy val isNewHadoop = Properties.envOrNone("SPARK_IS_NEW_HADOOP") match {
case None => {
- val isNewHadoopVersion = "2.[2-9]+".r.findFirstIn(hadoopVersion).isDefined
+ val isNewHadoopVersion = "^2\\.[2-9]+".r.findFirstIn(hadoopVersion).isDefined
(isNewHadoopVersion|| DEFAULT_IS_NEW_HADOOP)
}
case Some(v) => v.toBoolean
@@ -297,6 +297,7 @@ object SparkBuild extends Build {
val chillVersion = "0.3.6"
val codahaleMetricsVersion = "3.0.0"
val jblasVersion = "1.2.3"
+ val jets3tVersion = if ("^2\\.[3-9]+".r.findFirstIn(hadoopVersion).isDefined) "0.9.0" else "0.7.1"
val jettyVersion = "8.1.14.v20131031"
val hiveVersion = "0.12.0"
val parquetVersion = "1.3.2"
@@ -342,7 +343,7 @@ object SparkBuild extends Build {
"colt" % "colt" % "1.2.0",
"org.apache.mesos" % "mesos" % "0.13.0",
"commons-net" % "commons-net" % "2.2",
- "net.java.dev.jets3t" % "jets3t" % "0.7.1" excludeAll(excludeCommonsLogging),
+ "net.java.dev.jets3t" % "jets3t" % jets3tVersion excludeAll(excludeCommonsLogging),
"org.apache.derby" % "derby" % "10.4.2.0" % "test",
"org.apache.hadoop" % hadoopClient % hadoopVersion excludeAll(excludeNetty, excludeAsm, excludeCommonsLogging, excludeSLF4J, excludeOldAsm),
"org.apache.curator" % "curator-recipes" % "2.4.0" excludeAll(excludeNetty),