@@ -52,39 +52,44 @@ pub fn fill_bytes_via_next<R: RngCore + ?Sized>(rng: &mut R, dest: &mut [u8]) {
52
52
}
53
53
}
54
54
55
- /// Contract: implementing type must be memory-safe to observe as a byte array
56
- /// (implies no uninitialised padding).
57
- unsafe trait ToLe : Copy {
55
+ trait Observable : Copy {
58
56
type Bytes : AsRef < [ u8 ] > ;
59
57
fn to_le_bytes ( self ) -> Self :: Bytes ;
58
+
59
+ // Contract: observing self is memory-safe (implies no uninitialised padding)
60
+ fn as_byte_slice ( x : & [ Self ] ) -> & [ u8 ] ;
60
61
}
61
- unsafe impl ToLe for u32 {
62
+ impl Observable for u32 {
62
63
type Bytes = [ u8 ; 4 ] ;
63
64
fn to_le_bytes ( self ) -> Self :: Bytes {
64
65
self . to_le_bytes ( )
65
66
}
67
+ fn as_byte_slice ( x : & [ Self ] ) -> & [ u8 ] {
68
+ let ptr = x. as_ptr ( ) as * const u8 ;
69
+ let len = x. len ( ) * core:: mem:: size_of :: < Self > ( ) ;
70
+ unsafe { core:: slice:: from_raw_parts ( ptr, len) }
71
+ }
66
72
}
67
- unsafe impl ToLe for u64 {
73
+ impl Observable for u64 {
68
74
type Bytes = [ u8 ; 8 ] ;
69
75
fn to_le_bytes ( self ) -> Self :: Bytes {
70
76
self . to_le_bytes ( )
71
77
}
78
+ fn as_byte_slice ( x : & [ Self ] ) -> & [ u8 ] {
79
+ let ptr = x. as_ptr ( ) as * const u8 ;
80
+ let len = x. len ( ) * core:: mem:: size_of :: < Self > ( ) ;
81
+ unsafe { core:: slice:: from_raw_parts ( ptr, len) }
82
+ }
72
83
}
73
84
74
- fn fill_via_chunks < T : ToLe > ( src : & [ T ] , dest : & mut [ u8 ] ) -> ( usize , usize ) {
85
+ fn fill_via_chunks < T : Observable > ( src : & [ T ] , dest : & mut [ u8 ] ) -> ( usize , usize ) {
75
86
let size = core:: mem:: size_of :: < T > ( ) ;
76
87
let byte_len = min ( src. len ( ) * size, dest. len ( ) ) ;
77
88
let num_chunks = ( byte_len + size - 1 ) / size;
78
89
79
90
if cfg ! ( target_endian = "little" ) {
80
91
// On LE we can do a simple copy, which is 25-50% faster:
81
- unsafe {
82
- core:: ptr:: copy_nonoverlapping (
83
- src. as_ptr ( ) as * const u8 ,
84
- dest. as_mut_ptr ( ) ,
85
- byte_len,
86
- ) ;
87
- }
92
+ dest[ ..byte_len] . copy_from_slice ( & T :: as_byte_slice ( & src[ ..num_chunks] ) [ ..byte_len] ) ;
88
93
} else {
89
94
// This code is valid on all arches, but slower than the above:
90
95
let mut i = 0 ;
0 commit comments