Skip to content

Commit

Permalink
build: update to df 38 (#503)
Browse files Browse the repository at this point in the history
  • Loading branch information
tshauck authored May 13, 2024
1 parent fbc6d80 commit 4b2620d
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 40 deletions.
96 changes: 63 additions & 33 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ resolver = "2"
[workspace.dependencies]
arrow = { version = "51.0.0" }
async-trait = "0.1.80"
datafusion = { version = "37", features = ["compression", "parquet"] }
datafusion = { version = "38", features = ["compression", "parquet"] }
futures = "0.3"
noodles = { version = "0.71" }
object_store = { version = "0.9" }
Expand Down
2 changes: 1 addition & 1 deletion exon/exon-cli/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ version.workspace = true
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html

[dependencies]
datafusion-cli = { version = "37" }
datafusion-cli = { version = "38" }
clap = { version = "4", features = ["derive", "cargo"] }
datafusion = { workspace = true }
exon = { path = "../exon-core", version = "0.20.0", features = ["default"] }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ use arrow::{
};

use datafusion::{
common::{DFField, DFSchema},
common::DFSchema,
datasource::listing::{ListingTableUrl, PartitionedFile},
error::{DataFusionError, Result},
execution::context::SessionState,
Expand Down Expand Up @@ -146,6 +146,7 @@ pub async fn pruned_partition_list<'a>(
partition_values: partition_values.clone(),
range: None,
extensions: None,
statistics: None,
})
}));

Expand Down Expand Up @@ -262,10 +263,10 @@ async fn prune_partitions(
// let fields: Fields = partition_cols.collect();
let schema = Arc::new(Schema::new(partition_cols.to_vec()));

let df_schema = DFSchema::new_with_metadata(
let df_schema = DFSchema::from_unqualifed_fields(
partition_cols
.iter()
.map(|f| DFField::new_unqualified(f.name(), f.data_type().clone(), f.is_nullable())) // TODO: use qualified name, remove clone
.map(|f| Field::new(f.name(), f.data_type().clone(), f.is_nullable())) // TODO: use qualified name, remove clone
.collect(),
Default::default(),
)?;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ use datafusion::{
error::Result,
execution::context::SessionState,
logical_expr::{Expr, LogicalPlan},
physical_plan::{insert::FileSinkExec, ExecutionPlan, PhysicalExpr},
physical_plan::{insert::DataSinkExec, ExecutionPlan, PhysicalExpr},
physical_planner::{DefaultPhysicalPlanner, PhysicalPlanner},
};

Expand All @@ -48,7 +48,7 @@ impl PhysicalPlanner for ExonPhysicalPlanner {
let runtime = session_state.runtime_env();

// try to downcast plan as FileSinkExec
if let Some(file_sink) = plan.as_any().downcast_ref::<FileSinkExec>() {
if let Some(file_sink) = plan.as_any().downcast_ref::<DataSinkExec>() {
let sink = file_sink.sink();

// Try to downcast to a ParquetSink
Expand Down
Binary file modified exon/exon-core/test-data/datasources/fasta/test.parquet
Binary file not shown.

0 comments on commit 4b2620d

Please sign in to comment.