Skip to content

Commit 2975677

Browse files
author
Egor Krivokon
committed
MapR [SPARK-922] Move latest Spark commits to 3.1.2 branch (apache#856)
1 parent 46a1e04 commit 2975677

File tree

2 files changed

+22
-5
lines changed

2 files changed

+22
-5
lines changed

sql/hive/pom.xml

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,18 @@
163163
<groupId>${hive.group}.shims</groupId>
164164
<artifactId>hive-shims-common</artifactId>
165165
</exclusion>
166+
<exclusion>
167+
<groupId>org.apache.hadoop</groupId>
168+
<artifactId>hadoop-common</artifactId>
169+
</exclusion>
170+
<exclusion>
171+
<groupId>org.apache.hadoop</groupId>
172+
<artifactId>hadoop-hdfs</artifactId>
173+
</exclusion>
174+
<exclusion>
175+
<groupId>org.apache.hadoop</groupId>
176+
<artifactId>hadoop-mapreduce-client-core</artifactId>
177+
</exclusion>
166178
</exclusions>
167179
</dependency>
168180
<!--

sql/hive/src/main/scala/org/apache/spark/sql/hive/TableReader.scala

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
2323

2424
import org.apache.hadoop.conf.Configuration
2525
import org.apache.hadoop.fs.{Path, PathFilter}
26+
import org.apache.hadoop.hive.maprdb.json.input.HiveMapRDBJsonInputFormat
2627
import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants._
2728
import org.apache.hadoop.hive.ql.exec.Utilities
2829
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition, Table => HiveTable}
@@ -31,23 +32,26 @@ import org.apache.hadoop.hive.serde2.Deserializer
3132
import org.apache.hadoop.hive.serde2.avro.AvroSerdeUtils.AvroTableProperties
3233
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorConverters, StructObjectInspector}
3334
import org.apache.hadoop.hive.serde2.objectinspector.primitive._
35+
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorConverters, StructObjectInspector}
3436
import org.apache.hadoop.io.Writable
35-
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat => oldInputClass, JobConf}
37+
import org.apache.hadoop.mapred.{FileInputFormat, JobConf, InputFormat => oldInputClass}
3638
import org.apache.hadoop.mapreduce.{InputFormat => newInputClass}
37-
3839
import org.apache.spark.broadcast.Broadcast
3940
import org.apache.spark.deploy.SparkHadoopUtil
4041
import org.apache.spark.internal.Logging
41-
import org.apache.spark.rdd.{EmptyRDD, HadoopRDD, NewHadoopRDD, RDD, UnionRDD}
42+
import org.apache.spark.rdd._
4243
import org.apache.spark.sql.SparkSession
43-
import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper}
4444
import org.apache.spark.sql.catalyst.analysis.CastSupport
4545
import org.apache.spark.sql.catalyst.expressions._
4646
import org.apache.spark.sql.catalyst.util.DateTimeUtils
47+
import org.apache.spark.sql.catalyst.{InternalRow, SQLConfHelper}
4748
import org.apache.spark.sql.internal.SQLConf
4849
import org.apache.spark.unsafe.types.UTF8String
4950
import org.apache.spark.util.{SerializableConfiguration, Utils}
5051

52+
import java.util.Properties
53+
import scala.collection.JavaConverters._
54+
5155
/**
5256
* A trait for subclasses that handle table scans.
5357
*/
@@ -308,7 +312,8 @@ class HadoopTableReader(
308312
*/
309313
private def createHadoopRDD(localTableDesc: TableDesc, inputPathStr: String): RDD[Writable] = {
310314
val inputFormatClazz = localTableDesc.getInputFileFormatClass
311-
if (classOf[newInputClass[_, _]].isAssignableFrom(inputFormatClazz)) {
315+
if (classOf[newInputClass[_, _]].isAssignableFrom(inputFormatClazz)
316+
&& !inputFormatClazz.isAssignableFrom(classOf[HiveMapRDBJsonInputFormat])) {
312317
createNewHadoopRDD(localTableDesc, inputPathStr)
313318
} else {
314319
createOldHadoopRDD(localTableDesc, inputPathStr)

0 commit comments

Comments
 (0)