diff --git a/native/core/src/parquet/parquet_exec.rs b/native/core/src/parquet/parquet_exec.rs index e49bfa981c..79c7e06c63 100644 --- a/native/core/src/parquet/parquet_exec.rs +++ b/native/core/src/parquet/parquet_exec.rs @@ -84,11 +84,11 @@ pub(crate) fn init_datasource_exec( // Determine the schema to use for ParquetSource // // Use data_schema only if both data_schema and data_filters are set - // let base_schema = match (&data_schema, &data_filters) { - // (Some(schema), Some(_)) => Arc::clone(schema), - // _ => Arc::clone(&required_schema), - // }; - let base_schema = required_schema; + let base_schema = match (&data_schema, &projection_vector) { + (Some(schema), Some(_)) => Arc::clone(schema), + _ => Arc::clone(&required_schema), + }; + //let base_schema = required_schema; // dbg!(&base_schema); // dbg!(&data_schema); // dbg!(&data_filters); diff --git a/native/spark-expr/src/conversion_funcs/cast.rs b/native/spark-expr/src/conversion_funcs/cast.rs index b63610afd6..41f70fd2a1 100644 --- a/native/spark-expr/src/conversion_funcs/cast.rs +++ b/native/spark-expr/src/conversion_funcs/cast.rs @@ -906,10 +906,10 @@ pub fn spark_cast( data_type: &DataType, cast_options: &SparkCastOptions, ) -> DataFusionResult { - let input_type = match &arg { - ColumnarValue::Array(array) => array.data_type().clone(), - ColumnarValue::Scalar(scalar) => scalar.data_type(), - }; + // let input_type = match &arg { + // ColumnarValue::Array(array) => array.data_type().clone(), + // ColumnarValue::Scalar(scalar) => scalar.data_type(), + // }; let result = match arg { ColumnarValue::Array(array) => { @@ -927,10 +927,10 @@ pub fn spark_cast( } }; - let result_type = match &result { - ColumnarValue::Array(array) => array.data_type().clone(), - ColumnarValue::Scalar(scalar) => scalar.data_type(), - }; + // let result_type = match &result { + // ColumnarValue::Array(array) => array.data_type().clone(), + // ColumnarValue::Scalar(scalar) => scalar.data_type(), + // }; // println!( // "spark_cast: {} -> {} (requested: {})", diff --git a/native/spark-expr/src/csv_funcs/to_csv.rs b/native/spark-expr/src/csv_funcs/to_csv.rs index 5816f79993..f41cb7f918 100644 --- a/native/spark-expr/src/csv_funcs/to_csv.rs +++ b/native/spark-expr/src/csv_funcs/to_csv.rs @@ -115,8 +115,8 @@ impl PhysicalExpr for ToCsv { ))) } - fn fmt_sql(&self, _: &mut Formatter<'_>) -> std::fmt::Result { - unimplemented!() + fn fmt_sql(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(self, f) } } diff --git a/native/spark-expr/src/unbound.rs b/native/spark-expr/src/unbound.rs index de9e6e2e3e..cf0adafa91 100644 --- a/native/spark-expr/src/unbound.rs +++ b/native/spark-expr/src/unbound.rs @@ -20,7 +20,7 @@ use arrow::datatypes::{DataType, Schema}; use datafusion::common::{internal_err, Result}; use datafusion::physical_expr::PhysicalExpr; use datafusion::physical_plan::ColumnarValue; -use std::fmt::Formatter; +use std::fmt::{Display, Formatter}; use std::{hash::Hash, sync::Arc}; /// This is similar to `UnKnownColumn` in DataFusion, but it has data type. @@ -64,8 +64,8 @@ impl PhysicalExpr for UnboundColumn { self } - fn fmt_sql(&self, _: &mut Formatter<'_>) -> std::fmt::Result { - unimplemented!() + fn fmt_sql(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + Display::fmt(self, f) } /// Get the data type of this expression, given the schema of the input