Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions rust/sedona-geoparquet/src/file_opener.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ use datafusion::datasource::{
};
use datafusion_common::Result;
use datafusion_datasource_parquet::metadata::DFParquetMetadata;
use datafusion_execution::cache::cache_manager::FileMetadataCache;
use datafusion_physical_expr::PhysicalExpr;
use datafusion_physical_plan::metrics::{Count, ExecutionPlanMetricsSet, MetricBuilder};
use object_store::ObjectStore;
Expand Down Expand Up @@ -86,6 +87,7 @@ pub struct GeoParquetFileOpener {
file_schema: SchemaRef,
enable_pruning: bool,
metrics: GeoParquetFileOpenerMetrics,
file_metadata_cache: Option<Arc<dyn FileMetadataCache>>,
}

impl GeoParquetFileOpener {
Expand All @@ -98,6 +100,7 @@ impl GeoParquetFileOpener {
file_schema: SchemaRef,
enable_pruning: bool,
execution_plan_global_metrics: &ExecutionPlanMetricsSet,
file_metadata_cache: Option<Arc<dyn FileMetadataCache>>,
) -> Self {
Self {
inner,
Expand All @@ -107,6 +110,7 @@ impl GeoParquetFileOpener {
file_schema,
enable_pruning,
metrics: GeoParquetFileOpenerMetrics::new(execution_plan_global_metrics),
file_metadata_cache,
}
}
}
Expand All @@ -116,9 +120,11 @@ impl FileOpener for GeoParquetFileOpener {
let self_clone = self.clone();

Ok(Box::pin(async move {
let file_metadata_cache = self_clone.file_metadata_cache.or(None);
let parquet_metadata =
DFParquetMetadata::new(&self_clone.object_store, &file_meta.object_meta)
.with_metadata_size_hint(self_clone.metadata_size_hint)
.with_file_metadata_cache(file_metadata_cache)
.fetch_metadata()
.await?;

Expand Down
42 changes: 35 additions & 7 deletions rust/sedona-geoparquet/src/format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ use datafusion::{
use datafusion_catalog::{memory::DataSourceExec, Session};
use datafusion_common::{plan_err, GetExt, Result, Statistics};
use datafusion_datasource_parquet::metadata::DFParquetMetadata;
use datafusion_execution::cache::cache_manager::FileMetadataCache;
use datafusion_physical_expr::{LexRequirement, PhysicalExpr};
use datafusion_physical_plan::{
filter_pushdown::FilterPushdownPropagation, metrics::ExecutionPlanMetricsSet, ExecutionPlan,
Expand Down Expand Up @@ -189,11 +190,16 @@ impl FileFormat for GeoParquetFormat {
// copy more ParquetFormat code. It may be that caching at the object
// store level is the way to go here.
let metadatas: Vec<_> = futures::stream::iter(objects)
.map(|object| async move {
DFParquetMetadata::new(store.as_ref(), object)
.with_metadata_size_hint(self.inner().metadata_size_hint())
.fetch_metadata()
.await
.map(|object| {
let file_metadata_cache =
state.runtime_env().cache_manager.get_file_metadata_cache();
Comment on lines +194 to +195
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

with_file_metadata_cache() is called for each iteration of the loop (.map()), we need a clone for each separate iteration. get_file_metadata_cache() returns a cloned Arc, already, so no need to call another .clone().

https://github.com/apache/datafusion/blob/28755b1d7eb5222a8f5fb5417134dd6865ac1311/datafusion/execution/src/cache/cache_manager.rs#L174-L176

async move {
DFParquetMetadata::new(store.as_ref(), object)
.with_metadata_size_hint(self.inner().metadata_size_hint())
.with_file_metadata_cache(Some(file_metadata_cache))
.fetch_metadata()
.await
}
})
.boxed() // Workaround https://github.com/rust-lang/rust/issues/64552
.buffered(state.config_options().execution.meta_fetch_concurrency)
Expand Down Expand Up @@ -270,7 +276,7 @@ impl FileFormat for GeoParquetFormat {

async fn create_physical_plan(
&self,
_state: &dyn Session,
state: &dyn Session,
config: FileScanConfig,
) -> Result<Arc<dyn ExecutionPlan>> {
// A copy of ParquetSource::create_physical_plan() that ensures the underlying
Expand All @@ -287,6 +293,9 @@ impl FileFormat for GeoParquetFormat {
source = source.with_metadata_size_hint(metadata_size_hint)
}

source = source
.with_file_metadata_cache(state.runtime_env().cache_manager.get_file_metadata_cache());

let conf = FileScanConfigBuilder::from(config)
.with_source(Arc::new(source))
.build();
Expand Down Expand Up @@ -327,6 +336,8 @@ pub struct GeoParquetFileSource {
inner: ParquetSource,
metadata_size_hint: Option<usize>,
predicate: Option<Arc<dyn PhysicalExpr>>,
// pub(crate) parquet_file_reader_factory: Option<Arc<dyn ParquetFileReaderFactory>>,
file_metadata_cache: Option<Arc<dyn FileMetadataCache>>,
}

impl GeoParquetFileSource {
Expand All @@ -336,6 +347,7 @@ impl GeoParquetFileSource {
inner: ParquetSource::new(options.inner.clone()),
metadata_size_hint: None,
predicate: None,
file_metadata_cache: None,
}
}

Expand Down Expand Up @@ -383,6 +395,7 @@ impl GeoParquetFileSource {
inner: parquet_source.clone(),
metadata_size_hint,
predicate: new_predicate,
file_metadata_cache: None,
})
} else {
sedona_internal_err!("GeoParquetFileSource constructed from non-ParquetSource")
Expand All @@ -395,9 +408,18 @@ impl GeoParquetFileSource {
inner: self.inner.with_predicate(predicate.clone()),
metadata_size_hint: self.metadata_size_hint,
predicate: Some(predicate),
file_metadata_cache: self.file_metadata_cache.clone(),
}
}

pub fn with_file_metadata_cache(
mut self,
file_metadata_cache: Arc<dyn FileMetadataCache>,
) -> Self {
self.file_metadata_cache = Some(file_metadata_cache);
self
}

/// Apply a [SchemaAdapterFactory] to the inner [ParquetSource]
pub fn with_schema_adapter_factory(
&self,
Expand All @@ -419,6 +441,7 @@ impl GeoParquetFileSource {
inner: parquet_source,
metadata_size_hint: self.metadata_size_hint,
predicate: self.predicate.clone(),
file_metadata_cache: self.file_metadata_cache.clone(),
}
}

Expand All @@ -428,6 +451,7 @@ impl GeoParquetFileSource {
inner: self.inner.clone().with_metadata_size_hint(hint),
metadata_size_hint: Some(hint),
predicate: self.predicate.clone(),
file_metadata_cache: self.file_metadata_cache.clone(),
}
}
}
Expand Down Expand Up @@ -458,6 +482,7 @@ impl FileSource for GeoParquetFileSource {
// HACK: Since there is no public API to set inner's metrics, so we use
// inner's metrics as the ExecutionPlan-global metrics
self.inner.metrics(),
self.file_metadata_cache.clone(),
))
}

Expand All @@ -469,11 +494,14 @@ impl FileSource for GeoParquetFileSource {
let inner_result = self.inner.try_pushdown_filters(filters.clone(), config)?;
match &inner_result.updated_node {
Some(updated_node) => {
let updated_inner = Self::try_from_file_source(
let mut updated_inner = Self::try_from_file_source(
updated_node.clone(),
self.metadata_size_hint,
None,
)?;
if let Some(file_metadata_cache) = self.file_metadata_cache.clone() {
updated_inner = updated_inner.with_file_metadata_cache(file_metadata_cache);
}
Ok(inner_result.with_updated_node(Arc::new(updated_inner)))
}
None => Ok(inner_result),
Expand Down
Loading