From ed16c96aa055def15d402657f86af2d3cdb4db08 Mon Sep 17 00:00:00 2001 From: Jacky Lee Date: Wed, 7 Dec 2022 17:44:01 +0800 Subject: [PATCH] remove wrong compress type check (#1178) Since the compresssion has been supported in #1014 . The extra compression check in ArrowConvertorExtension can be remove now. --- .../oap/spark/sql/ArrowConvertExtension.scala | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/arrow-data-source/standard/src/main/scala/com/intel/oap/spark/sql/ArrowConvertExtension.scala b/arrow-data-source/standard/src/main/scala/com/intel/oap/spark/sql/ArrowConvertExtension.scala index 40abd4efd..f0a72fbb1 100644 --- a/arrow-data-source/standard/src/main/scala/com/intel/oap/spark/sql/ArrowConvertExtension.scala +++ b/arrow-data-source/standard/src/main/scala/com/intel/oap/spark/sql/ArrowConvertExtension.scala @@ -43,21 +43,7 @@ case class ArrowConvertorRule(session: SparkSession) extends Rule[LogicalPlan] { case c: InsertIntoHadoopFsRelationCommand if c.fileFormat.isInstanceOf[ParquetFileFormat] && c.partitionColumns.isEmpty && c.bucketSpec.isEmpty => - // TODO: Support pass parquet config and writing with other codecs - // `compression`, `parquet.compression`(i.e., ParquetOutputFormat.COMPRESSION), and - // `spark.sql.parquet.compression.codec` - // are in order of precedence from highest to lowest. - val parquetCompressionConf = c.options.get(ParquetOutputFormat.COMPRESSION) - val codecName = c.options - .get("compression") - .orElse(parquetCompressionConf) - .getOrElse(session.sessionState.conf.parquetCompressionCodec) - .toLowerCase(Locale.ROOT) - if (codecName.equalsIgnoreCase("snappy")) { - c.copy(fileFormat = new ArrowFileFormat) - } else { - c - } + c.copy(fileFormat = new ArrowFileFormat) // Read path case l@ LogicalRelation(