Skip to content

Commit c0c9e88

Browse files
authored
flx clippy warnings (#8455)
* change get zero to first() Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * wake clone to wake_by_ref Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * more first() Signed-off-by: Ruihang Xia <waynestxia@gmail.com> * try_from() to from() Signed-off-by: Ruihang Xia <waynestxia@gmail.com> --------- Signed-off-by: Ruihang Xia <waynestxia@gmail.com>
1 parent 9be9073 commit c0c9e88

File tree

28 files changed

+51
-59
lines changed

28 files changed

+51
-59
lines changed

datafusion-examples/examples/custom_datasource.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ async fn search_accounts(
8080

8181
timeout(Duration::from_secs(10), async move {
8282
let result = dataframe.collect().await.unwrap();
83-
let record_batch = result.get(0).unwrap();
83+
let record_batch = result.first().unwrap();
8484

8585
assert_eq!(expected_result_length, record_batch.column(1).len());
8686
dbg!(record_batch.columns());

datafusion-examples/examples/memtable.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ async fn main() -> Result<()> {
4040

4141
timeout(Duration::from_secs(10), async move {
4242
let result = dataframe.collect().await.unwrap();
43-
let record_batch = result.get(0).unwrap();
43+
let record_batch = result.first().unwrap();
4444

4545
assert_eq!(1, record_batch.column(0).len());
4646
dbg!(record_batch.columns());

datafusion-examples/examples/simple_udtf.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ struct LocalCsvTableFunc {}
129129

130130
impl TableFunctionImpl for LocalCsvTableFunc {
131131
fn call(&self, exprs: &[Expr]) -> Result<Arc<dyn TableProvider>> {
132-
let Some(Expr::Literal(ScalarValue::Utf8(Some(ref path)))) = exprs.get(0) else {
132+
let Some(Expr::Literal(ScalarValue::Utf8(Some(ref path)))) = exprs.first() else {
133133
return plan_err!("read_csv requires at least one string argument");
134134
};
135135

datafusion/common/src/dfschema.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1476,8 +1476,8 @@ mod tests {
14761476
DFSchema::new_with_metadata([a, b].to_vec(), HashMap::new()).unwrap(),
14771477
);
14781478
let schema: Schema = df_schema.as_ref().clone().into();
1479-
let a_df = df_schema.fields.get(0).unwrap().field();
1480-
let a_arrow = schema.fields.get(0).unwrap();
1479+
let a_df = df_schema.fields.first().unwrap().field();
1480+
let a_arrow = schema.fields.first().unwrap();
14811481
assert_eq!(a_df.metadata(), a_arrow.metadata())
14821482
}
14831483

datafusion/common/src/error.rs

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -564,18 +564,16 @@ mod test {
564564
assert_eq!(
565565
err.split(DataFusionError::BACK_TRACE_SEP)
566566
.collect::<Vec<&str>>()
567-
.get(0)
567+
.first()
568568
.unwrap(),
569569
&"Error during planning: Err"
570570
);
571-
assert!(
572-
err.split(DataFusionError::BACK_TRACE_SEP)
573-
.collect::<Vec<&str>>()
574-
.get(1)
575-
.unwrap()
576-
.len()
577-
> 0
578-
);
571+
assert!(!err
572+
.split(DataFusionError::BACK_TRACE_SEP)
573+
.collect::<Vec<&str>>()
574+
.get(1)
575+
.unwrap()
576+
.is_empty());
579577
}
580578

581579
#[cfg(not(feature = "backtrace"))]

datafusion/common/src/utils.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,7 @@ pub fn bisect<const SIDE: bool>(
135135
) -> Result<usize> {
136136
let low: usize = 0;
137137
let high: usize = item_columns
138-
.get(0)
138+
.first()
139139
.ok_or_else(|| {
140140
DataFusionError::Internal("Column array shouldn't be empty".to_string())
141141
})?
@@ -186,7 +186,7 @@ pub fn linear_search<const SIDE: bool>(
186186
) -> Result<usize> {
187187
let low: usize = 0;
188188
let high: usize = item_columns
189-
.get(0)
189+
.first()
190190
.ok_or_else(|| {
191191
DataFusionError::Internal("Column array shouldn't be empty".to_string())
192192
})?

datafusion/core/benches/sort_limit_query_sql.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ fn create_context() -> Arc<Mutex<SessionContext>> {
9999
ctx_holder.lock().push(Arc::new(Mutex::new(ctx)))
100100
});
101101

102-
let ctx = ctx_holder.lock().get(0).unwrap().clone();
102+
let ctx = ctx_holder.lock().first().unwrap().clone();
103103
ctx
104104
}
105105

datafusion/core/benches/sql_query_with_io.rs

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -93,10 +93,9 @@ async fn setup_files(store: Arc<dyn ObjectStore>) {
9393
for partition in 0..TABLE_PARTITIONS {
9494
for file in 0..PARTITION_FILES {
9595
let data = create_parquet_file(&mut rng, file * FILE_ROWS);
96-
let location = Path::try_from(format!(
96+
let location = Path::from(format!(
9797
"{table_name}/partition={partition}/{file}.parquet"
98-
))
99-
.unwrap();
98+
));
10099
store.put(&location, data).await.unwrap();
101100
}
102101
}

datafusion/core/src/datasource/file_format/parquet.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1803,8 +1803,8 @@ mod tests {
18031803
// there is only one row group in one file.
18041804
assert_eq!(page_index.len(), 1);
18051805
assert_eq!(offset_index.len(), 1);
1806-
let page_index = page_index.get(0).unwrap();
1807-
let offset_index = offset_index.get(0).unwrap();
1806+
let page_index = page_index.first().unwrap();
1807+
let offset_index = offset_index.first().unwrap();
18081808

18091809
// 13 col in one row group
18101810
assert_eq!(page_index.len(), 13);

datafusion/core/src/datasource/file_format/write/demux.rs

Lines changed: 2 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -264,12 +264,9 @@ async fn hive_style_partitions_demuxer(
264264
// TODO: upstream RecordBatch::take to arrow-rs
265265
let take_indices = builder.finish();
266266
let struct_array: StructArray = rb.clone().into();
267-
let parted_batch = RecordBatch::try_from(
267+
let parted_batch = RecordBatch::from(
268268
arrow::compute::take(&struct_array, &take_indices, None)?.as_struct(),
269-
)
270-
.map_err(|_| {
271-
DataFusionError::Internal("Unexpected error partitioning batch!".into())
272-
})?;
269+
);
273270

274271
// Get or create channel for this batch
275272
let part_tx = match value_map.get_mut(&part_key) {

0 commit comments

Comments
 (0)