@@ -23,6 +23,7 @@ import scala.collection.JavaConverters._
2323
2424import org .apache .hadoop .conf .Configuration
2525import org .apache .hadoop .fs .{Path , PathFilter }
26+ import org .apache .hadoop .hive .maprdb .json .input .HiveMapRDBJsonInputFormat
2627import org .apache .hadoop .hive .metastore .api .hive_metastoreConstants ._
2728import org .apache .hadoop .hive .ql .exec .Utilities
2829import org .apache .hadoop .hive .ql .metadata .{Partition => HivePartition , Table => HiveTable }
@@ -31,23 +32,26 @@ import org.apache.hadoop.hive.serde2.Deserializer
3132import org .apache .hadoop .hive .serde2 .avro .AvroSerdeUtils .AvroTableProperties
3233import org .apache .hadoop .hive .serde2 .objectinspector .{ObjectInspectorConverters , StructObjectInspector }
3334import org .apache .hadoop .hive .serde2 .objectinspector .primitive ._
35+ import org .apache .hadoop .hive .serde2 .objectinspector .{ObjectInspectorConverters , StructObjectInspector }
3436import org .apache .hadoop .io .Writable
35- import org .apache .hadoop .mapred .{FileInputFormat , InputFormat => oldInputClass , JobConf }
37+ import org .apache .hadoop .mapred .{FileInputFormat , JobConf , InputFormat => oldInputClass }
3638import org .apache .hadoop .mapreduce .{InputFormat => newInputClass }
37-
3839import org .apache .spark .broadcast .Broadcast
3940import org .apache .spark .deploy .SparkHadoopUtil
4041import org .apache .spark .internal .Logging
41- import org .apache .spark .rdd .{ EmptyRDD , HadoopRDD , NewHadoopRDD , RDD , UnionRDD }
42+ import org .apache .spark .rdd ._
4243import org .apache .spark .sql .SparkSession
43- import org .apache .spark .sql .catalyst .{InternalRow , SQLConfHelper }
4444import org .apache .spark .sql .catalyst .analysis .CastSupport
4545import org .apache .spark .sql .catalyst .expressions ._
4646import org .apache .spark .sql .catalyst .util .DateTimeUtils
47+ import org .apache .spark .sql .catalyst .{InternalRow , SQLConfHelper }
4748import org .apache .spark .sql .internal .SQLConf
4849import org .apache .spark .unsafe .types .UTF8String
4950import org .apache .spark .util .{SerializableConfiguration , Utils }
5051
52+ import java .util .Properties
53+ import scala .collection .JavaConverters ._
54+
5155/**
5256 * A trait for subclasses that handle table scans.
5357 */
@@ -308,7 +312,8 @@ class HadoopTableReader(
308312 */
309313 private def createHadoopRDD (localTableDesc : TableDesc , inputPathStr : String ): RDD [Writable ] = {
310314 val inputFormatClazz = localTableDesc.getInputFileFormatClass
311- if (classOf [newInputClass[_, _]].isAssignableFrom(inputFormatClazz)) {
315+ if (classOf [newInputClass[_, _]].isAssignableFrom(inputFormatClazz)
316+ && ! inputFormatClazz.isAssignableFrom(classOf [HiveMapRDBJsonInputFormat ])) {
312317 createNewHadoopRDD(localTableDesc, inputPathStr)
313318 } else {
314319 createOldHadoopRDD(localTableDesc, inputPathStr)
0 commit comments