Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 34 additions & 1 deletion datafusion/datasource/src/source.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ use std::fmt;
use std::fmt::{Debug, Formatter};
use std::sync::Arc;

use datafusion_physical_expr::equivalence::ProjectionMapping;
use datafusion_physical_plan::execution_plan::{
Boundedness, EmissionType, SchedulingType,
};
Expand Down Expand Up @@ -320,7 +321,39 @@ impl ExecutionPlan for DataSourceExec {
&self,
projection: &ProjectionExec,
) -> Result<Option<Arc<dyn ExecutionPlan>>> {
self.data_source.try_swapping_with_projection(projection)
match self.data_source.try_swapping_with_projection(projection)? {
Some(new_plan) => {
if let Some(new_data_source_exec) =
new_plan.as_any().downcast_ref::<DataSourceExec>()
Comment on lines +326 to +327
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I cry every time I see a downcast of a trait to a specific type. I understand why it's necessary in this case, I think the existing API is to blame, it's worth moving forward with this to fix the (pretty bad IMO) bug BUT this is the sort of thing I hope a future refactor will avoid the need for.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe it is worth making a ticket to track the issue / proposed solution if there is not one already

Copy link
Contributor

@adriangb adriangb Aug 12, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good point, it's #15952, should have put that in the code comment

{
let projection_mapping = ProjectionMapping::try_new(
projection.expr().iter().cloned(),
&self.schema(),
)?;

// Project the equivalence properties to the new schema
let projected_eq_properties = self
.cache
.eq_properties
.project(&projection_mapping, new_data_source_exec.schema());

let preserved_exec = DataSourceExec {
data_source: Arc::clone(&new_data_source_exec.data_source),
cache: PlanProperties::new(
projected_eq_properties,
new_data_source_exec.cache.partitioning.clone(),
new_data_source_exec.cache.emission_type,
new_data_source_exec.cache.boundedness,
)
.with_scheduling_type(new_data_source_exec.cache.scheduling_type),
};
Ok(Some(Arc::new(preserved_exec)))
} else {
Ok(Some(new_plan))
}
}
None => Ok(None),
}
}

fn handle_child_pushdown_result(
Expand Down
Binary file added datafusion/sqllogictest/data/1.parquet
Binary file not shown.
Binary file added datafusion/sqllogictest/data/2.parquet
Binary file not shown.
32 changes: 32 additions & 0 deletions datafusion/sqllogictest/test_files/parquet_filter_pushdown.slt
Original file line number Diff line number Diff line change
Expand Up @@ -543,3 +543,35 @@ query TT
select val, part from t_pushdown where part = val AND part = 'a';
----
a a

statement ok
COPY (
SELECT
'00000000000000000000000000000001' AS trace_id,
'2023-10-01 00:00:00'::timestamptz AS start_timestamp,
'prod' as deployment_environment
)
TO 'data/1.parquet';
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BTW this writes to datafusion/sqllogictest/data/1.parquet which was also checked in. I hit issues during upgrade with this here (the data is different so the files being written become different):

#17175 (comment)

I have a fix and will forward port it to main, but wanted to drop a note here in case anyone else saw it


statement ok
COPY (
SELECT
'00000000000000000000000000000002' AS trace_id,
'2024-10-01 00:00:00'::timestamptz AS start_timestamp,
'staging' as deployment_environment
)
TO 'data/2.parquet';

statement ok
CREATE EXTERNAL TABLE t1 STORED AS PARQUET LOCATION 'data/';

statement ok
SET datafusion.execution.parquet.pushdown_filters = true;

query T
SELECT deployment_environment
FROM t1
WHERE trace_id = '00000000000000000000000000000002'
ORDER BY start_timestamp, trace_id;
----
staging