diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala index 7d666729bb434..3f3776bab8fa3 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala @@ -508,7 +508,7 @@ abstract class OrcQueryTest extends OrcTest { conf.setBoolean("hive.io.file.read.all.columns", false) val orcRecordReader = { - val file = new File(path).listFiles().find(_.getName.endsWith(".snappy.orc")).head + val file = new File(path).listFiles().find(_.getName.endsWith(".orc")).head val split = new FileSplit(new Path(file.toURI), 0, file.length, Array.empty[String]) val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0) val hadoopAttemptContext = new TaskAttemptContextImpl(conf, attemptId) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala index 1e98099361dfe..6166773fb0941 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcSourceSuite.scala @@ -332,8 +332,9 @@ abstract class OrcSuite test("SPARK-21839: Add SQL config for ORC compression") { val conf = spark.sessionState.conf - // Test if the default of spark.sql.orc.compression.codec is snappy - assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == SNAPPY.name()) + // Test if the default of spark.sql.orc.compression.codec is used. + assert(new OrcOptions(Map.empty[String, String], conf).compressionCodec == + SQLConf.ORC_COMPRESSION.defaultValueString.toUpperCase(Locale.ROOT)) // OrcOptions's parameters have a higher priority than SQL configuration. // `compression` -> `orc.compression` -> `spark.sql.orc.compression.codec` diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala index aa2f110ceac27..071035853b606 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/orc/OrcHadoopFsRelationSuite.scala @@ -107,16 +107,6 @@ class OrcHadoopFsRelationSuite extends HadoopFsRelationTest { checkAnswer(df, copyDf) } } - - test("Default compression codec is snappy for ORC compression") { - withTempPath { file => - spark.range(0, 10).write - .orc(file.getCanonicalPath) - val expectedCompressionKind = - OrcFileOperator.getFileReader(file.getCanonicalPath).get.getCompression - assert(OrcCompressionCodec.SNAPPY.name() === expectedCompressionKind.name()) - } - } } class HiveOrcHadoopFsRelationSuite extends OrcHadoopFsRelationSuite {