@@ -177,7 +177,7 @@ macro_rules! checked_op {
177
177
178
178
// `Int` + `SignedInt` implemented for signed integers
179
179
macro_rules! int_impl {
180
- ( $ActualT: ident, $UnsignedT: ty, $BITS: expr,
180
+ ( $SelfT : ty , $ ActualT: ident, $UnsignedT: ty, $BITS: expr,
181
181
$add_with_overflow: path,
182
182
$sub_with_overflow: path,
183
183
$mul_with_overflow: path) => {
@@ -850,6 +850,16 @@ macro_rules! int_impl {
850
850
/// ```
851
851
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
852
852
#[ inline( always) ]
853
+ #[ cfg( not( stage0) ) ]
854
+ pub fn wrapping_shl( self , rhs: u32 ) -> Self {
855
+ unsafe {
856
+ intrinsics:: unchecked_shl( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
857
+ }
858
+ }
859
+
860
+ /// Stage 0
861
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
862
+ #[ cfg( stage0) ]
853
863
pub fn wrapping_shl( self , rhs: u32 ) -> Self {
854
864
self . overflowing_shl( rhs) . 0
855
865
}
@@ -875,6 +885,16 @@ macro_rules! int_impl {
875
885
/// ```
876
886
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
877
887
#[ inline( always) ]
888
+ #[ cfg( not( stage0) ) ]
889
+ pub fn wrapping_shr( self , rhs: u32 ) -> Self {
890
+ unsafe {
891
+ intrinsics:: unchecked_shr( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
892
+ }
893
+ }
894
+
895
+ /// Stage 0
896
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
897
+ #[ cfg( stage0) ]
878
898
pub fn wrapping_shr( self , rhs: u32 ) -> Self {
879
899
self . overflowing_shr( rhs) . 0
880
900
}
@@ -1089,6 +1109,15 @@ macro_rules! int_impl {
1089
1109
/// ```
1090
1110
#[ inline]
1091
1111
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1112
+ #[ cfg( not( stage0) ) ]
1113
+ pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
1114
+ ( self . wrapping_shl( rhs) , ( rhs > ( $BITS - 1 ) ) )
1115
+ }
1116
+
1117
+ /// Stage 0
1118
+ #[ inline]
1119
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1120
+ #[ cfg( stage0) ]
1092
1121
pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
1093
1122
( self << ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
1094
1123
}
@@ -1111,6 +1140,15 @@ macro_rules! int_impl {
1111
1140
/// ```
1112
1141
#[ inline]
1113
1142
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1143
+ #[ cfg( not( stage0) ) ]
1144
+ pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
1145
+ ( self . wrapping_shr( rhs) , ( rhs > ( $BITS - 1 ) ) )
1146
+ }
1147
+
1148
+ /// Stage 0
1149
+ #[ inline]
1150
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
1151
+ #[ cfg( stage0) ]
1114
1152
pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
1115
1153
( self >> ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
1116
1154
}
@@ -1268,39 +1306,39 @@ macro_rules! int_impl {
1268
1306
1269
1307
#[ lang = "i8" ]
1270
1308
impl i8 {
1271
- int_impl ! { i8 , u8 , 8 ,
1309
+ int_impl ! { i8 , i8 , u8 , 8 ,
1272
1310
intrinsics:: add_with_overflow,
1273
1311
intrinsics:: sub_with_overflow,
1274
1312
intrinsics:: mul_with_overflow }
1275
1313
}
1276
1314
1277
1315
#[ lang = "i16" ]
1278
1316
impl i16 {
1279
- int_impl ! { i16 , u16 , 16 ,
1317
+ int_impl ! { i16 , i16 , u16 , 16 ,
1280
1318
intrinsics:: add_with_overflow,
1281
1319
intrinsics:: sub_with_overflow,
1282
1320
intrinsics:: mul_with_overflow }
1283
1321
}
1284
1322
1285
1323
#[ lang = "i32" ]
1286
1324
impl i32 {
1287
- int_impl ! { i32 , u32 , 32 ,
1325
+ int_impl ! { i32 , i32 , u32 , 32 ,
1288
1326
intrinsics:: add_with_overflow,
1289
1327
intrinsics:: sub_with_overflow,
1290
1328
intrinsics:: mul_with_overflow }
1291
1329
}
1292
1330
1293
1331
#[ lang = "i64" ]
1294
1332
impl i64 {
1295
- int_impl ! { i64 , u64 , 64 ,
1333
+ int_impl ! { i64 , i64 , u64 , 64 ,
1296
1334
intrinsics:: add_with_overflow,
1297
1335
intrinsics:: sub_with_overflow,
1298
1336
intrinsics:: mul_with_overflow }
1299
1337
}
1300
1338
1301
1339
#[ lang = "i128" ]
1302
1340
impl i128 {
1303
- int_impl ! { i128 , u128 , 128 ,
1341
+ int_impl ! { i128 , i128 , u128 , 128 ,
1304
1342
intrinsics:: add_with_overflow,
1305
1343
intrinsics:: sub_with_overflow,
1306
1344
intrinsics:: mul_with_overflow }
@@ -1309,7 +1347,7 @@ impl i128 {
1309
1347
#[ cfg( target_pointer_width = "16" ) ]
1310
1348
#[ lang = "isize" ]
1311
1349
impl isize {
1312
- int_impl ! { i16 , u16 , 16 ,
1350
+ int_impl ! { isize , i16 , u16 , 16 ,
1313
1351
intrinsics:: add_with_overflow,
1314
1352
intrinsics:: sub_with_overflow,
1315
1353
intrinsics:: mul_with_overflow }
@@ -1318,7 +1356,7 @@ impl isize {
1318
1356
#[ cfg( target_pointer_width = "32" ) ]
1319
1357
#[ lang = "isize" ]
1320
1358
impl isize {
1321
- int_impl ! { i32 , u32 , 32 ,
1359
+ int_impl ! { isize , i32 , u32 , 32 ,
1322
1360
intrinsics:: add_with_overflow,
1323
1361
intrinsics:: sub_with_overflow,
1324
1362
intrinsics:: mul_with_overflow }
@@ -1327,15 +1365,15 @@ impl isize {
1327
1365
#[ cfg( target_pointer_width = "64" ) ]
1328
1366
#[ lang = "isize" ]
1329
1367
impl isize {
1330
- int_impl ! { i64 , u64 , 64 ,
1368
+ int_impl ! { isize , i64 , u64 , 64 ,
1331
1369
intrinsics:: add_with_overflow,
1332
1370
intrinsics:: sub_with_overflow,
1333
1371
intrinsics:: mul_with_overflow }
1334
1372
}
1335
1373
1336
1374
// `Int` + `UnsignedInt` implemented for unsigned integers
1337
1375
macro_rules! uint_impl {
1338
- ( $ActualT: ty, $BITS: expr,
1376
+ ( $SelfT : ty , $ ActualT: ty, $BITS: expr,
1339
1377
$ctpop: path,
1340
1378
$ctlz: path,
1341
1379
$cttz: path,
@@ -1978,6 +2016,16 @@ macro_rules! uint_impl {
1978
2016
/// ```
1979
2017
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
1980
2018
#[ inline( always) ]
2019
+ #[ cfg( not( stage0) ) ]
2020
+ pub fn wrapping_shl( self , rhs: u32 ) -> Self {
2021
+ unsafe {
2022
+ intrinsics:: unchecked_shl( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
2023
+ }
2024
+ }
2025
+
2026
+ /// Stage 0
2027
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2028
+ #[ cfg( stage0) ]
1981
2029
pub fn wrapping_shl( self , rhs: u32 ) -> Self {
1982
2030
self . overflowing_shl( rhs) . 0
1983
2031
}
@@ -2003,6 +2051,16 @@ macro_rules! uint_impl {
2003
2051
/// ```
2004
2052
#[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2005
2053
#[ inline( always) ]
2054
+ #[ cfg( not( stage0) ) ]
2055
+ pub fn wrapping_shr( self , rhs: u32 ) -> Self {
2056
+ unsafe {
2057
+ intrinsics:: unchecked_shr( self , ( rhs & ( $BITS - 1 ) ) as $SelfT)
2058
+ }
2059
+ }
2060
+
2061
+ /// Stage 0
2062
+ #[ stable( feature = "num_wrapping" , since = "1.2.0" ) ]
2063
+ #[ cfg( stage0) ]
2006
2064
pub fn wrapping_shr( self , rhs: u32 ) -> Self {
2007
2065
self . overflowing_shr( rhs) . 0
2008
2066
}
@@ -2170,6 +2228,15 @@ macro_rules! uint_impl {
2170
2228
/// ```
2171
2229
#[ inline]
2172
2230
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2231
+ #[ cfg( not( stage0) ) ]
2232
+ pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
2233
+ ( self . wrapping_shl( rhs) , ( rhs > ( $BITS - 1 ) ) )
2234
+ }
2235
+
2236
+ /// Stage 0
2237
+ #[ inline]
2238
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2239
+ #[ cfg( stage0) ]
2173
2240
pub fn overflowing_shl( self , rhs: u32 ) -> ( Self , bool ) {
2174
2241
( self << ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
2175
2242
}
@@ -2192,6 +2259,16 @@ macro_rules! uint_impl {
2192
2259
/// ```
2193
2260
#[ inline]
2194
2261
#[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2262
+ #[ cfg( not( stage0) ) ]
2263
+ pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
2264
+ ( self . wrapping_shr( rhs) , ( rhs > ( $BITS - 1 ) ) )
2265
+
2266
+ }
2267
+
2268
+ /// Stage 0
2269
+ #[ inline]
2270
+ #[ stable( feature = "wrapping" , since = "1.7.0" ) ]
2271
+ #[ cfg( stage0) ]
2195
2272
pub fn overflowing_shr( self , rhs: u32 ) -> ( Self , bool ) {
2196
2273
( self >> ( rhs & ( $BITS - 1 ) ) , ( rhs > ( $BITS - 1 ) ) )
2197
2274
}
@@ -2292,7 +2369,7 @@ macro_rules! uint_impl {
2292
2369
2293
2370
#[ lang = "u8" ]
2294
2371
impl u8 {
2295
- uint_impl ! { u8 , 8 ,
2372
+ uint_impl ! { u8 , u8 , 8 ,
2296
2373
intrinsics:: ctpop,
2297
2374
intrinsics:: ctlz,
2298
2375
intrinsics:: cttz,
@@ -2304,7 +2381,7 @@ impl u8 {
2304
2381
2305
2382
#[ lang = "u16" ]
2306
2383
impl u16 {
2307
- uint_impl ! { u16 , 16 ,
2384
+ uint_impl ! { u16 , u16 , 16 ,
2308
2385
intrinsics:: ctpop,
2309
2386
intrinsics:: ctlz,
2310
2387
intrinsics:: cttz,
@@ -2316,7 +2393,7 @@ impl u16 {
2316
2393
2317
2394
#[ lang = "u32" ]
2318
2395
impl u32 {
2319
- uint_impl ! { u32 , 32 ,
2396
+ uint_impl ! { u32 , u32 , 32 ,
2320
2397
intrinsics:: ctpop,
2321
2398
intrinsics:: ctlz,
2322
2399
intrinsics:: cttz,
@@ -2328,7 +2405,7 @@ impl u32 {
2328
2405
2329
2406
#[ lang = "u64" ]
2330
2407
impl u64 {
2331
- uint_impl ! { u64 , 64 ,
2408
+ uint_impl ! { u64 , u64 , 64 ,
2332
2409
intrinsics:: ctpop,
2333
2410
intrinsics:: ctlz,
2334
2411
intrinsics:: cttz,
@@ -2340,7 +2417,7 @@ impl u64 {
2340
2417
2341
2418
#[ lang = "u128" ]
2342
2419
impl u128 {
2343
- uint_impl ! { u128 , 128 ,
2420
+ uint_impl ! { u128 , u128 , 128 ,
2344
2421
intrinsics:: ctpop,
2345
2422
intrinsics:: ctlz,
2346
2423
intrinsics:: cttz,
@@ -2353,7 +2430,7 @@ impl u128 {
2353
2430
#[ cfg( target_pointer_width = "16" ) ]
2354
2431
#[ lang = "usize" ]
2355
2432
impl usize {
2356
- uint_impl ! { u16 , 16 ,
2433
+ uint_impl ! { usize , u16 , 16 ,
2357
2434
intrinsics:: ctpop,
2358
2435
intrinsics:: ctlz,
2359
2436
intrinsics:: cttz,
@@ -2365,7 +2442,7 @@ impl usize {
2365
2442
#[ cfg( target_pointer_width = "32" ) ]
2366
2443
#[ lang = "usize" ]
2367
2444
impl usize {
2368
- uint_impl ! { u32 , 32 ,
2445
+ uint_impl ! { usize , u32 , 32 ,
2369
2446
intrinsics:: ctpop,
2370
2447
intrinsics:: ctlz,
2371
2448
intrinsics:: cttz,
@@ -2378,7 +2455,7 @@ impl usize {
2378
2455
#[ cfg( target_pointer_width = "64" ) ]
2379
2456
#[ lang = "usize" ]
2380
2457
impl usize {
2381
- uint_impl ! { u64 , 64 ,
2458
+ uint_impl ! { usize , u64 , 64 ,
2382
2459
intrinsics:: ctpop,
2383
2460
intrinsics:: ctlz,
2384
2461
intrinsics:: cttz,
0 commit comments