Skip to content
This repository has been archived by the owner on Feb 18, 2024. It is now read-only.

Fixed float to i128 cast #817

Merged
merged 1 commit into from
Feb 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 6 additions & 7 deletions src/compute/cast/primitive_to.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use std::hash::Hash;

use num_traits::{AsPrimitive, Float};
use num_traits::{AsPrimitive, Float, ToPrimitive};

use crate::error::Result;
use crate::{
Expand Down Expand Up @@ -203,10 +203,8 @@ pub fn float_to_decimal<T>(
to_scale: usize,
) -> PrimitiveArray<i128>
where
T: NativeType + Float,
T: NativeType + Float + ToPrimitive,
f64: AsPrimitive<T>,
i128: From<T>,
T: AsPrimitive<f64>,
{
// 1.2 => 12
let multiplier: T = (10_f64).powi(to_scale as i32).as_();
Expand All @@ -218,7 +216,7 @@ where

let values = from.iter().map(|x| {
x.and_then(|x| {
let x = i128::from(*x * multiplier);
let x = (*x * multiplier).to_i128().unwrap();
if x > max_for_precision || x < min_for_precision {
None
} else {
Expand All @@ -237,10 +235,11 @@ pub(super) fn float_to_decimal_dyn<T>(
scale: usize,
) -> Result<Box<dyn Array>>
where
T: NativeType + AsPrimitive<i128>,
T: NativeType + Float + ToPrimitive,
f64: AsPrimitive<T>,
{
let from = from.as_any().downcast_ref().unwrap();
Ok(Box::new(integer_to_decimal::<T>(from, precision, scale)))
Ok(Box::new(float_to_decimal::<T>(from, precision, scale)))
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is the fix

}

/// Cast [`PrimitiveArray`] as a [`PrimitiveArray`]
Expand Down
19 changes: 14 additions & 5 deletions tests/it/compute/cast.rs
Original file line number Diff line number Diff line change
Expand Up @@ -254,19 +254,28 @@ fn int32_to_decimal() {
#[test]
fn float32_to_decimal() {
let array = Float32Array::from(&[
Some(2.0),
Some(2.4),
Some(10.0),
Some(1.123_456_8),
Some(-2.0),
Some(-10.0),
Some(-100.0), // can't be represented in (1,0)
Some(-100.01), // can't be represented in (1,0)
None,
]);

let b = cast(&array, &DataType::Decimal(1, 0), CastOptions::default()).unwrap();
let b = cast(&array, &DataType::Decimal(10, 2), CastOptions::default()).unwrap();
let c = b.as_any().downcast_ref::<PrimitiveArray<i128>>().unwrap();

let expected = Int128Array::from(&[Some(2), Some(10), Some(-2), Some(-10), None, None])
.to(DataType::Decimal(1, 0));
let expected = Int128Array::from(&[
Some(240),
Some(1000),
Some(112),
Some(-200),
Some(-1000),
Some(-10001),
None,
])
.to(DataType::Decimal(10, 2));
assert_eq!(c, &expected)
}

Expand Down