@@ -98,18 +98,20 @@ use object_store::ObjectStore;
9898/// # use datafusion_physical_expr::expressions::lit;
9999/// # use datafusion_datasource::source::DataSourceExec;
100100/// # use datafusion_common::config::TableParquetOptions;
101+ /// use datafusion_datasource::file::FileSource;
101102///
102103/// # let file_schema = Arc::new(Schema::empty());
103104/// # let object_store_url = ObjectStoreUrl::local_filesystem();
104105/// # let predicate = lit(true);
105- /// let source = Arc::new(
106- /// ParquetSource::new(TableParquetOptions::default(), config)
107- /// .with_predicate(predicate)
108- /// );
109106/// // Create a DataSourceExec for reading `file1.parquet` with a file size of 100MB
110- /// let config = FileScanConfigBuilder::new(object_store_url, file_schema, source )
107+ /// let config = FileScanConfigBuilder::new(object_store_url, file_schema)
111108/// .with_file(PartitionedFile::new("file1.parquet", 100*1024*1024)).build();
112- /// let exec =DataSourceExec::from_data_source(source);
109+ ///
110+ /// let source = ParquetSource::new(TableParquetOptions::default(), config)
111+ /// .with_predicate(predicate)
112+ /// .as_data_source();
113+ ///
114+ /// let exec = Arc::new(DataSourceExec::new(source));
113115/// ```
114116///
115117/// # Features
@@ -176,7 +178,7 @@ use object_store::ObjectStore;
176178/// # use datafusion_datasource::file::FileSource;
177179/// # use datafusion_datasource::file_scan_config::{FileScanConfig, FileScanConfigBuilder};
178180/// # use datafusion_datasource::PartitionedFile;
179- /// # use datafusion_datasource::source::DataSourceExec;///
181+ /// # use datafusion_datasource::source::DataSourceExec;
180182/// # use datafusion_datasource_parquet::source::ParquetSource;
181183///
182184/// fn parquet_exec() -> DataSourceExec { unimplemented!() }
@@ -194,7 +196,9 @@ use object_store::ObjectStore;
194196/// .with_file_groups(vec![file_group.clone()])
195197/// .build();
196198///
197- /// Arc::new(DataSourceExec::new(parquet_source.with_config(new_config)))
199+ /// let source = parquet_source.with_config(new_config).as_data_source();
200+ ///
201+ /// Arc::new(DataSourceExec::new(source))
198202/// })
199203/// .collect::<Vec<_>>();
200204/// ```
@@ -215,6 +219,7 @@ use object_store::ObjectStore;
215219/// ```
216220/// # use std::sync::Arc;
217221/// # use arrow::datatypes::{Schema, SchemaRef};
222+ /// # use datafusion_common::config::TableParquetOptions;
218223/// # use datafusion_datasource::PartitionedFile;
219224/// # use datafusion_datasource_parquet::ParquetAccessPlan;
220225/// # use datafusion_datasource::file_scan_config::{FileScanConfig, FileScanConfigBuilder};
@@ -233,11 +238,14 @@ use object_store::ObjectStore;
233238/// let partitioned_file = PartitionedFile::new("my_file.parquet", 1234)
234239/// .with_extensions(Arc::new(access_plan));
235240/// // create a FileScanConfig to scan this file
236- /// let config = FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), schema(), Arc::new(ParquetSource::new(TableParquetOptions::default(), config)) )
241+ /// let config = FileScanConfigBuilder::new(ObjectStoreUrl::local_filesystem(), schema())
237242/// .with_file(partitioned_file).build();
243+ ///
244+ /// let source = ParquetSource::new(TableParquetOptions::default(), config);
245+ ///
238246/// // this parquet DataSourceExec will not even try to read row groups 2 and 4. Additional
239247/// // pruning based on predicates may also happen
240- /// let exec =DataSourceExec::from_data_source(source);
248+ /// let exec = DataSourceExec::from_data_source(source);
241249/// ```
242250///
243251/// For a complete example, see the [`advanced_parquet_index` example]).
0 commit comments