From 9878ee011b99a57a6c3f87c0205061307d1e8498 Mon Sep 17 00:00:00 2001 From: jakevin Date: Fri, 10 Mar 2023 00:53:30 +0800 Subject: [PATCH] minor: fix clippy problem in new version. (#5532) * minor: fix typo * minor: fix clippy problem in new version. * fix new typo --- datafusion/common/src/dfschema.rs | 2 +- datafusion/core/src/physical_plan/aggregates/no_grouping.rs | 2 +- datafusion/core/src/physical_plan/aggregates/row_hash.rs | 2 +- datafusion/expr/src/logical_plan/plan.rs | 4 ++-- datafusion/physical-expr/src/aggregate/count_distinct.rs | 2 +- datafusion/sql/src/expr/arrow_cast.rs | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/datafusion/common/src/dfschema.rs b/datafusion/common/src/dfschema.rs index 7c33196285ee..bb443f8aa564 100644 --- a/datafusion/common/src/dfschema.rs +++ b/datafusion/common/src/dfschema.rs @@ -519,7 +519,7 @@ impl From for SchemaRef { } // Hashing refers to a subset of fields considered in PartialEq. -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl Hash for DFSchema { fn hash(&self, state: &mut H) { self.fields.hash(state); diff --git a/datafusion/core/src/physical_plan/aggregates/no_grouping.rs b/datafusion/core/src/physical_plan/aggregates/no_grouping.rs index 6356009a4915..c13f005b0335 100644 --- a/datafusion/core/src/physical_plan/aggregates/no_grouping.rs +++ b/datafusion/core/src/physical_plan/aggregates/no_grouping.rs @@ -164,7 +164,7 @@ impl RecordBatchStream for AggregateStream { /// Perform group-by aggregation for the given [`RecordBatch`]. /// -/// If successfull, this returns the additional number of bytes that were allocated during this process. +/// If successful, this returns the additional number of bytes that were allocated during this process. /// /// TODO: Make this a member function fn aggregate_batch( diff --git a/datafusion/core/src/physical_plan/aggregates/row_hash.rs b/datafusion/core/src/physical_plan/aggregates/row_hash.rs index 5a5e36a2480e..de769bbe8fea 100644 --- a/datafusion/core/src/physical_plan/aggregates/row_hash.rs +++ b/datafusion/core/src/physical_plan/aggregates/row_hash.rs @@ -297,7 +297,7 @@ impl RecordBatchStream for GroupedHashAggregateStream { impl GroupedHashAggregateStream { /// Perform group-by aggregation for the given [`RecordBatch`]. /// - /// If successfull, this returns the additional number of bytes that were allocated during this process. + /// If successful, this returns the additional number of bytes that were allocated during this process. /// fn group_aggregate_batch(&mut self, batch: RecordBatch) -> Result { // Evaluate the grouping expressions: diff --git a/datafusion/expr/src/logical_plan/plan.rs b/datafusion/expr/src/logical_plan/plan.rs index f5866d5a96d5..c8a8be949b49 100644 --- a/datafusion/expr/src/logical_plan/plan.rs +++ b/datafusion/expr/src/logical_plan/plan.rs @@ -1620,7 +1620,7 @@ pub struct CreateExternalTable { } // Hashing refers to a subset of fields considered in PartialEq. -#[allow(clippy::derive_hash_xor_eq)] +#[allow(clippy::derived_hash_with_manual_eq)] impl Hash for CreateExternalTable { fn hash(&self, state: &mut H) { self.schema.hash(state); @@ -1719,7 +1719,7 @@ pub struct Analyze { } /// Extension operator defined outside of DataFusion -#[allow(clippy::derive_hash_xor_eq)] // see impl PartialEq for explanation +#[allow(clippy::derived_hash_with_manual_eq)] // see impl PartialEq for explanation #[derive(Clone, Eq, Hash)] pub struct Extension { /// The runtime extension operator diff --git a/datafusion/physical-expr/src/aggregate/count_distinct.rs b/datafusion/physical-expr/src/aggregate/count_distinct.rs index df4a9ab7b9b5..89a464bc0c96 100644 --- a/datafusion/physical-expr/src/aggregate/count_distinct.rs +++ b/datafusion/physical-expr/src/aggregate/count_distinct.rs @@ -112,7 +112,7 @@ impl DistinctCountAccumulator { .values .iter() .next() - .map(|vals| ScalarValue::size(vals) - std::mem::size_of_val(&vals)) + .map(|vals| ScalarValue::size(vals) - std::mem::size_of_val(vals)) .unwrap_or(0) } } diff --git a/datafusion/sql/src/expr/arrow_cast.rs b/datafusion/sql/src/expr/arrow_cast.rs index bc1313e2c114..49104ee05d1a 100644 --- a/datafusion/sql/src/expr/arrow_cast.rs +++ b/datafusion/sql/src/expr/arrow_cast.rs @@ -127,10 +127,10 @@ impl<'a> Parser<'a> { let data_type = self.parse_next_type()?; // ensure that there is no trailing content if self.tokenizer.next().is_some() { - return Err(make_error( + Err(make_error( self.val, &format!("checking trailing content after parsing '{data_type}'"), - )); + )) } else { Ok(data_type) }