File tree Expand file tree Collapse file tree 1 file changed +5
-1
lines changed
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet Expand file tree Collapse file tree 1 file changed +5
-1
lines changed Original file line number Diff line number Diff line change @@ -292,6 +292,10 @@ private[sql] class ParquetRelation(
292292 val assumeBinaryIsString = sqlContext.conf.isParquetBinaryAsString
293293 val assumeInt96IsTimestamp = sqlContext.conf.isParquetINT96AsTimestamp
294294
295+ // When using merged schema and the column of the given filter does not exist, Parquet emits
296+ // an error which is an issue of Parquet (PARQUET-389).
297+ val safeParquetFilterPushDown = ! shouldMergeSchemas && parquetFilterPushDown
298+
295299 // Parquet row group size. We will use this value as the value for
296300 // mapreduce.input.fileinputformat.split.minsize and mapred.min.split.size if the value
297301 // of these flags are smaller than the parquet row group size.
@@ -305,7 +309,7 @@ private[sql] class ParquetRelation(
305309 dataSchema,
306310 parquetBlockSize,
307311 useMetadataCache,
308- parquetFilterPushDown ,
312+ safeParquetFilterPushDown ,
309313 assumeBinaryIsString,
310314 assumeInt96IsTimestamp) _
311315
You can’t perform that action at this time.
0 commit comments