Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ abstract class FileTable(
inferSchema(fileIndex.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $name. It must be specified manually.")
s"Unable to infer schema for $formatName. It must be specified manually.")
}.asNullable

override lazy val schema: StructType = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -580,7 +580,7 @@ abstract class OrcQueryTest extends OrcTest {
val m1 = intercept[AnalysisException] {
testAllCorruptFiles()
}.getMessage
assert(m1.contains("Unable to infer schema"))
assert(m1.contains("Unable to infer schema for ORC"))
testAllCorruptFilesWithoutSchemaInfer()
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with Be
val message = intercept[AnalysisException] {
testRead(spark.read.csv(), Seq.empty, schema)
}.getMessage
assert(message.toLowerCase(Locale.ROOT).contains("unable to infer schema for csv"))
assert(message.contains("Unable to infer schema for CSV. It must be specified manually."))
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why remove toLowerCase(Locale.ROOT)?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Because both V1 and V2 show exactly the same message.
toLowerCase(Locale.ROOT) was added in the migration of CSV V2 https://github.com/apache/spark/pull/24005/files#diff-b9ddfbc9be8d83ecf100b3b8ff9610b9R431


testRead(spark.read.csv(dir), data, schema)
testRead(spark.read.csv(dir, dir), data ++ data, schema)
Expand Down