diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 9b1a384a0d06a..c7714afc4fa26 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -1219,6 +1219,66 @@ macro_rules! int_impl { } } + /// Wrapping (modular) division. Computes `floor(self / other)`, + /// wrapping around at the boundary of the type. + /// + /// The only case where such wrapping can occur is when one + /// divides `MIN / -1` on a signed type (where `MIN` is the + /// negative minimal value for the type); this is equivalent + /// to `-MIN`, a positive value that is too large to represent + /// in the type. In such a case, this function returns `MIN` + /// itself.. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_div(self, rhs: $T) -> $T { + self.overflowing_div(rhs).0 + } + + /// Wrapping (modular) remainder. Computes `self % other`, + /// wrapping around at the boundary of the type. + /// + /// Such wrap-around never actually occurs mathematically; + /// implementation artifacts make `x % y` illegal for `MIN / + /// -1` on a signed type illegal (where `MIN` is the negative + /// minimal value). In such a case, this function returns `0`. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_rem(self, rhs: $T) -> $T { + self.overflowing_rem(rhs).0 + } + + /// Wrapping (modular) negation. Computes `-self`, + /// wrapping around at the boundary of the type. + /// + /// The only case where such wrapping can occur is when one + /// negates `MIN` on a signed type (where `MIN` is the + /// negative minimal value for the type); this is a positive + /// value that is too large to represent in the type. In such + /// a case, this function returns `MIN` itself. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_neg(self) -> $T { + self.overflowing_neg().0 + } + + /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, + /// where `mask` removes any high-order bits of `rhs` that + /// would cause the shift to exceed the bitwidth of the type. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_shl(self, rhs: u32) -> $T { + self.overflowing_shl(rhs).0 + } + + /// Panic-free bitwise shift-left; yields `self >> mask(rhs)`, + /// where `mask` removes any high-order bits of `rhs` that + /// would cause the shift to exceed the bitwidth of the type. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_shr(self, rhs: u32) -> $T { + self.overflowing_shr(rhs).0 + } + /// Raises self to the power of `exp`, using exponentiation by squaring. /// /// # Examples @@ -1739,6 +1799,66 @@ macro_rules! uint_impl { } } + /// Wrapping (modular) division. Computes `floor(self / other)`, + /// wrapping around at the boundary of the type. + /// + /// The only case where such wrapping can occur is when one + /// divides `MIN / -1` on a signed type (where `MIN` is the + /// negative minimal value for the type); this is equivalent + /// to `-MIN`, a positive value that is too large to represent + /// in the type. In such a case, this function returns `MIN` + /// itself.. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_div(self, rhs: $T) -> $T { + self.overflowing_div(rhs).0 + } + + /// Wrapping (modular) remainder. Computes `self % other`, + /// wrapping around at the boundary of the type. + /// + /// Such wrap-around never actually occurs mathematically; + /// implementation artifacts make `x % y` illegal for `MIN / + /// -1` on a signed type illegal (where `MIN` is the negative + /// minimal value). In such a case, this function returns `0`. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_rem(self, rhs: $T) -> $T { + self.overflowing_rem(rhs).0 + } + + /// Wrapping (modular) negation. Computes `-self`, + /// wrapping around at the boundary of the type. + /// + /// The only case where such wrapping can occur is when one + /// negates `MIN` on a signed type (where `MIN` is the + /// negative minimal value for the type); this is a positive + /// value that is too large to represent in the type. In such + /// a case, this function returns `MIN` itself. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_neg(self) -> $T { + self.overflowing_neg().0 + } + + /// Panic-free bitwise shift-left; yields `self << mask(rhs)`, + /// where `mask` removes any high-order bits of `rhs` that + /// would cause the shift to exceed the bitwidth of the type. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_shl(self, rhs: u32) -> $T { + self.overflowing_shl(rhs).0 + } + + /// Panic-free bitwise shift-left; yields `self >> mask(rhs)`, + /// where `mask` removes any high-order bits of `rhs` that + /// would cause the shift to exceed the bitwidth of the type. + #[unstable(feature = "core", since = "1.0.0")] + #[inline(always)] + pub fn wrapping_shr(self, rhs: u32) -> $T { + self.overflowing_shr(rhs).0 + } + /// Raises self to the power of `exp`, using exponentiation by squaring. /// /// # Examples diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs index 28276d0bf0168..aa84708816b87 100644 --- a/src/libcore/num/wrapping.rs +++ b/src/libcore/num/wrapping.rs @@ -48,6 +48,7 @@ pub trait OverflowingOps { fn overflowing_div(self, rhs: Self) -> (Self, bool); fn overflowing_rem(self, rhs: Self) -> (Self, bool); + fn overflowing_neg(self) -> (Self, bool); fn overflowing_shl(self, rhs: u32) -> (Self, bool); fn overflowing_shr(self, rhs: u32) -> (Self, bool); @@ -231,7 +232,7 @@ macro_rules! signed_overflowing_impl { #[inline(always)] fn overflowing_div(self, rhs: $t) -> ($t, bool) { if self == $t::MIN && rhs == -1 { - (1, true) + (self, true) } else { (self/rhs, false) } @@ -255,6 +256,15 @@ macro_rules! signed_overflowing_impl { (self >> (rhs & self::shift_max::$t), (rhs > self::shift_max::$t)) } + + #[inline(always)] + fn overflowing_neg(self) -> ($t, bool) { + if self == $t::MIN { + ($t::MIN, true) + } else { + (-self, false) + } + } } )*) } @@ -300,6 +310,11 @@ macro_rules! unsigned_overflowing_impl { (self >> (rhs & self::shift_max::$t), (rhs > self::shift_max::$t)) } + + #[inline(always)] + fn overflowing_neg(self) -> ($t, bool) { + ((!self).wrapping_add(1), true) + } } )*) } @@ -341,6 +356,11 @@ impl OverflowingOps for usize { (r as usize, f) } #[inline(always)] + fn overflowing_neg(self) -> (usize, bool) { + let (r, f) = (self as u64).overflowing_neg(); + (r as usize, f) + } + #[inline(always)] fn overflowing_shl(self, rhs: u32) -> (usize, bool) { let (r, f) = (self as u64).overflowing_shl(rhs); (r as usize, f) @@ -386,6 +406,11 @@ impl OverflowingOps for usize { (r as usize, f) } #[inline(always)] + fn overflowing_neg(self) -> (usize, bool) { + let (r, f) = (self as u32).overflowing_neg(); + (r as usize, f) + } + #[inline(always)] fn overflowing_shl(self, rhs: u32) -> (usize, bool) { let (r, f) = (self as u32).overflowing_shl(rhs); (r as usize, f) @@ -431,6 +456,11 @@ impl OverflowingOps for isize { (r as isize, f) } #[inline(always)] + fn overflowing_neg(self) -> (isize, bool) { + let (r, f) = (self as i64).overflowing_neg(); + (r as isize, f) + } + #[inline(always)] fn overflowing_shl(self, rhs: u32) -> (isize, bool) { let (r, f) = (self as i64).overflowing_shl(rhs); (r as isize, f) @@ -476,6 +506,11 @@ impl OverflowingOps for isize { (r as isize, f) } #[inline(always)] + fn overflowing_neg(self) -> (isize, bool) { + let (r, f) = (self as i32).overflowing_neg(); + (r as isize, f) + } + #[inline(always)] fn overflowing_shl(self, rhs: u32) -> (isize, bool) { let (r, f) = (self as i32).overflowing_shl(rhs); (r as isize, f) diff --git a/src/test/run-pass/wrapping-int-api.rs b/src/test/run-pass/wrapping-int-api.rs new file mode 100644 index 0000000000000..e195d624fe529 --- /dev/null +++ b/src/test/run-pass/wrapping-int-api.rs @@ -0,0 +1,236 @@ +// Copyright 2012 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![feature(core)] + +// Test inherent wrapping_* methods for {i,u}{size,8,16,32,64}. + +use std::{i8, i16, i32, i64, isize}; +use std::{u8, u16, u32, u64, usize}; + +fn main() { + assert_eq!( i8::MAX.wrapping_add(1), i8::MIN); + assert_eq!( i16::MAX.wrapping_add(1), i16::MIN); + assert_eq!( i32::MAX.wrapping_add(1), i32::MIN); + assert_eq!( i64::MAX.wrapping_add(1), i64::MIN); + assert_eq!(isize::MAX.wrapping_add(1), isize::MIN); + + assert_eq!( i8::MIN.wrapping_sub(1), i8::MAX); + assert_eq!( i16::MIN.wrapping_sub(1), i16::MAX); + assert_eq!( i32::MIN.wrapping_sub(1), i32::MAX); + assert_eq!( i64::MIN.wrapping_sub(1), i64::MAX); + assert_eq!(isize::MIN.wrapping_sub(1), isize::MAX); + + assert_eq!( u8::MAX.wrapping_add(1), u8::MIN); + assert_eq!( u16::MAX.wrapping_add(1), u16::MIN); + assert_eq!( u32::MAX.wrapping_add(1), u32::MIN); + assert_eq!( u64::MAX.wrapping_add(1), u64::MIN); + assert_eq!(usize::MAX.wrapping_add(1), usize::MIN); + + assert_eq!( u8::MIN.wrapping_sub(1), u8::MAX); + assert_eq!( u16::MIN.wrapping_sub(1), u16::MAX); + assert_eq!( u32::MIN.wrapping_sub(1), u32::MAX); + assert_eq!( u64::MIN.wrapping_sub(1), u64::MAX); + assert_eq!(usize::MIN.wrapping_sub(1), usize::MAX); + + assert_eq!((0xfe_u8 as i8).wrapping_mul(16), + (0xe0_u8 as i8)); + assert_eq!((0xfedc_u16 as i16).wrapping_mul(16), + (0xedc0_u16 as i16)); + assert_eq!((0xfedc_ba98_u32 as i32).wrapping_mul(16), + (0xedcb_a980_u32 as i32)); + assert_eq!((0xfedc_ba98_7654_3217_u64 as i64).wrapping_mul(16), + (0xedcb_a987_6543_2170_u64 as i64)); + + match () { + #[cfg(target_pointer_width = "32")] + () => { + assert_eq!((0xfedc_ba98_u32 as isize).wrapping_mul(16), + (0xedcb_a980_u32 as isize)); + } + #[cfg(target_pointer_width = "64")] + () => { + assert_eq!((0xfedc_ba98_7654_3217_u64 as isize).wrapping_mul(16), + (0xedcb_a987_6543_2170_u64 as isize)); + } + } + + assert_eq!((0xfe as u8).wrapping_mul(16), + (0xe0 as u8)); + assert_eq!((0xfedc as u16).wrapping_mul(16), + (0xedc0 as u16)); + assert_eq!((0xfedc_ba98 as u32).wrapping_mul(16), + (0xedcb_a980 as u32)); + assert_eq!((0xfedc_ba98_7654_3217 as u64).wrapping_mul(16), + (0xedcb_a987_6543_2170 as u64)); + + match () { + #[cfg(target_pointer_width = "32")] + () => { + assert_eq!((0xfedc_ba98 as usize).wrapping_mul(16), + (0xedcb_a980 as usize)); + } + #[cfg(target_pointer_width = "64")] + () => { + assert_eq!((0xfedc_ba98_7654_3217 as usize).wrapping_mul(16), + (0xedcb_a987_6543_2170 as usize)); + } + } + + macro_rules! check_mul_no_wrap { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_mul($f), ($e) * $f); } + } + macro_rules! check_mul_wraps { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_mul($f), $e); } + } + + check_mul_no_wrap!(0xfe_u8 as i8, -1); + check_mul_no_wrap!(0xfedc_u16 as i16, -1); + check_mul_no_wrap!(0xfedc_ba98_u32 as i32, -1); + check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1); + check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1); + + check_mul_no_wrap!(0xfe_u8 as i8, -2); + check_mul_no_wrap!(0xfedc_u16 as i16, -2); + check_mul_no_wrap!(0xfedc_ba98_u32 as i32, -2); + check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2); + check_mul_no_wrap!(0xfedc_ba98_fedc_ba98_u64 as u64 as isize, -2); + + check_mul_no_wrap!(0xfe_u8 as i8, 2); + check_mul_no_wrap!(0xfedc_u16 as i16, 2); + check_mul_no_wrap!(0xfedc_ba98_u32 as i32, 2); + check_mul_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2); + check_mul_no_wrap!(0xfedc_ba98_fedc_ba98_u64 as u64 as isize, 2); + + check_mul_wraps!(0x80_u8 as i8, -1); + check_mul_wraps!(0x8000_u16 as i16, -1); + check_mul_wraps!(0x8000_0000_u32 as i32, -1); + check_mul_wraps!(0x8000_0000_0000_0000_u64 as i64, -1); + match () { + #[cfg(target_pointer_width = "32")] + () => { + check_mul_wraps!(0x8000_0000_u32 as isize, -1); + } + #[cfg(target_pointer_width = "64")] + () => { + check_mul_wraps!(0x8000_0000_0000_0000_u64 as isize, -1); + } + } + + macro_rules! check_div_no_wrap { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_div($f), ($e) / $f); } + } + macro_rules! check_div_wraps { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_div($f), $e); } + } + + check_div_no_wrap!(0xfe_u8 as i8, -1); + check_div_no_wrap!(0xfedc_u16 as i16, -1); + check_div_no_wrap!(0xfedc_ba98_u32 as i32, -1); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1); + + check_div_no_wrap!(0xfe_u8 as i8, -2); + check_div_no_wrap!(0xfedc_u16 as i16, -2); + check_div_no_wrap!(0xfedc_ba98_u32 as i32, -2); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -2); + + check_div_no_wrap!(0xfe_u8 as i8, 2); + check_div_no_wrap!(0xfedc_u16 as i16, 2); + check_div_no_wrap!(0xfedc_ba98_u32 as i32, 2); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2); + check_div_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, 2); + + check_div_wraps!(-128 as i8, -1); + check_div_wraps!(0x8000_u16 as i16, -1); + check_div_wraps!(0x8000_0000_u32 as i32, -1); + check_div_wraps!(0x8000_0000_0000_0000_u64 as i64, -1); + match () { + #[cfg(target_pointer_width = "32")] + () => { + check_div_wraps!(0x8000_0000_u32 as isize, -1); + } + #[cfg(target_pointer_width = "64")] + () => { + check_div_wraps!(0x8000_0000_0000_0000_u64 as isize, -1); + } + } + + + macro_rules! check_rem_no_wrap { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_rem($f), ($e) % $f); } + } + macro_rules! check_rem_wraps { + ($e:expr, $f:expr) => { assert_eq!(($e).wrapping_rem($f), 0); } + } + + check_rem_no_wrap!(0xfe_u8 as i8, -1); + check_rem_no_wrap!(0xfedc_u16 as i16, -1); + check_rem_no_wrap!(0xfedc_ba98_u32 as i32, -1); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -1); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -1); + + check_rem_no_wrap!(0xfe_u8 as i8, -2); + check_rem_no_wrap!(0xfedc_u16 as i16, -2); + check_rem_no_wrap!(0xfedc_ba98_u32 as i32, -2); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, -2); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, -2); + + check_rem_no_wrap!(0xfe_u8 as i8, 2); + check_rem_no_wrap!(0xfedc_u16 as i16, 2); + check_rem_no_wrap!(0xfedc_ba98_u32 as i32, 2); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64, 2); + check_rem_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize, 2); + + check_rem_wraps!(0x80_u8 as i8, -1); + check_rem_wraps!(0x8000_u16 as i16, -1); + check_rem_wraps!(0x8000_0000_u32 as i32, -1); + check_rem_wraps!(0x8000_0000_0000_0000_u64 as i64, -1); + match () { + #[cfg(target_pointer_width = "32")] + () => { + check_rem_wraps!(0x8000_0000_u32 as isize, -1); + } + #[cfg(target_pointer_width = "64")] + () => { + check_rem_wraps!(0x8000_0000_0000_0000_u64 as isize, -1); + } + } + + macro_rules! check_neg_no_wrap { + ($e:expr) => { assert_eq!(($e).wrapping_neg(), -($e)); } + } + macro_rules! check_neg_wraps { + ($e:expr) => { assert_eq!(($e).wrapping_neg(), ($e)); } + } + + check_neg_no_wrap!(0xfe_u8 as i8); + check_neg_no_wrap!(0xfedc_u16 as i16); + check_neg_no_wrap!(0xfedc_ba98_u32 as i32); + check_neg_no_wrap!(0xfedc_ba98_7654_3217_u64 as i64); + check_neg_no_wrap!(0xfedc_ba98_7654_3217_u64 as u64 as isize); + + check_neg_wraps!(0x80_u8 as i8); + check_neg_wraps!(0x8000_u16 as i16); + check_neg_wraps!(0x8000_0000_u32 as i32); + check_neg_wraps!(0x8000_0000_0000_0000_u64 as i64); + match () { + #[cfg(target_pointer_width = "32")] + () => { + check_neg_wraps!(0x8000_0000_u32 as isize); + } + #[cfg(target_pointer_width = "64")] + () => { + check_neg_wraps!(0x8000_0000_0000_0000_u64 as isize); + } + } + +}