Skip to content

Commit 9ecb2ed

Browse files
committed
[SPARK-16948][SQL] Querying empty partitioned orc tables throws exception
1 parent fc14e2d commit 9ecb2ed

File tree

1 file changed

+4
-9
lines changed

1 file changed

+4
-9
lines changed

sql/hive/src/main/scala/org/apache/spark/sql/hive/orc/OrcFileFormat.scala

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,9 @@
1717

1818
package org.apache.spark.sql.hive.orc
1919

20-
import java.io.FileNotFoundException
2120
import java.net.URI
2221
import java.util.Properties
2322

24-
import scala.util.Try
25-
2623
import org.apache.hadoop.conf.Configuration
2724
import org.apache.hadoop.fs.{FileStatus, Path}
2825
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
@@ -57,12 +54,10 @@ class OrcFileFormat extends FileFormat with DataSourceRegister with Serializable
5754
sparkSession: SparkSession,
5855
options: Map[String, String],
5956
files: Seq[FileStatus]): Option[StructType] = {
60-
// Safe to ignore FileNotFoundException in case no files are found.
61-
val schema = Try(OrcFileOperator.readSchema(
62-
files.map(_.getPath.toUri.toString),
63-
Some(sparkSession.sessionState.newHadoopConf())))
64-
.recover { case _: FileNotFoundException => None }
65-
schema.get
57+
OrcFileOperator.readSchema(
58+
files.map(_.getPath.toUri.toString),
59+
Some(sparkSession.sessionState.newHadoopConf())
60+
)
6661
}
6762

6863
override def prepareWrite(

0 commit comments

Comments
 (0)