67
67
use crate :: cmp:: Ordering ;
68
68
use crate :: fmt;
69
69
use crate :: hash;
70
- use crate :: intrinsics:: { self , abort, is_aligned_and_not_null, is_nonoverlapping } ;
70
+ use crate :: intrinsics:: { self , abort, is_aligned_and_not_null} ;
71
71
use crate :: mem:: { self , MaybeUninit } ;
72
72
73
73
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
@@ -394,7 +394,8 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
394
394
/// ```
395
395
#[ inline]
396
396
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
397
- pub unsafe fn swap < T > ( x : * mut T , y : * mut T ) {
397
+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
398
+ pub const unsafe fn swap < T > ( x : * mut T , y : * mut T ) {
398
399
// Give ourselves some scratch space to work with.
399
400
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
400
401
let mut tmp = MaybeUninit :: < T > :: uninit ( ) ;
@@ -451,16 +452,8 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
451
452
/// ```
452
453
#[ inline]
453
454
#[ stable( feature = "swap_nonoverlapping" , since = "1.27.0" ) ]
454
- pub unsafe fn swap_nonoverlapping < T > ( x : * mut T , y : * mut T , count : usize ) {
455
- if cfg ! ( debug_assertions)
456
- && !( is_aligned_and_not_null ( x)
457
- && is_aligned_and_not_null ( y)
458
- && is_nonoverlapping ( x, y, count) )
459
- {
460
- // Not panicking to keep codegen impact smaller.
461
- abort ( ) ;
462
- }
463
-
455
+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
456
+ pub const unsafe fn swap_nonoverlapping < T > ( x : * mut T , y : * mut T , count : usize ) {
464
457
let x = x as * mut u8 ;
465
458
let y = y as * mut u8 ;
466
459
let len = mem:: size_of :: < T > ( ) * count;
@@ -470,7 +463,8 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
470
463
}
471
464
472
465
#[ inline]
473
- pub ( crate ) unsafe fn swap_nonoverlapping_one < T > ( x : * mut T , y : * mut T ) {
466
+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
467
+ pub ( crate ) const unsafe fn swap_nonoverlapping_one < T > ( x : * mut T , y : * mut T ) {
474
468
// For types smaller than the block optimization below,
475
469
// just swap directly to avoid pessimizing codegen.
476
470
if mem:: size_of :: < T > ( ) < 32 {
@@ -488,7 +482,8 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
488
482
}
489
483
490
484
#[ inline]
491
- unsafe fn swap_nonoverlapping_bytes ( x : * mut u8 , y : * mut u8 , len : usize ) {
485
+ #[ rustc_const_unstable( feature = "const_swap" , issue = "83163" ) ]
486
+ const unsafe fn swap_nonoverlapping_bytes ( x : * mut u8 , y : * mut u8 , len : usize ) {
492
487
// The approach here is to utilize simd to swap x & y efficiently. Testing reveals
493
488
// that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
494
489
// Haswell E processors. LLVM is more able to optimize if we give a struct a
@@ -589,7 +584,8 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
589
584
/// ```
590
585
#[ inline]
591
586
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
592
- pub unsafe fn replace < T > ( dst : * mut T , mut src : T ) -> T {
587
+ #[ rustc_const_unstable( feature = "const_replace" , issue = "83164" ) ]
588
+ pub const unsafe fn replace < T > ( dst : * mut T , mut src : T ) -> T {
593
589
// SAFETY: the caller must guarantee that `dst` is valid to be
594
590
// cast to a mutable reference (valid for writes, aligned, initialized),
595
591
// and cannot overlap `src` since `dst` must point to a distinct
0 commit comments