@@ -264,9 +264,18 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
264
264
265
265
/// Byte accessors.
266
266
impl < Tag : Provenance , Extra > Allocation < Tag , Extra > {
267
- /// The last argument controls whether we error out when there are uninitialized
268
- /// or pointer bytes. You should never call this, call `get_bytes` or
269
- /// `get_bytes_with_uninit_and_ptr` instead,
267
+ /// This is the entirely abstraction-violating way to just grab the raw bytes without
268
+ /// caring about relocations. It just deduplicates some code between `read_scalar`
269
+ /// and `get_bytes_internal`.
270
+ fn get_bytes_even_more_internal ( & self , range : AllocRange ) -> & [ u8 ] {
271
+ & self . bytes [ range. start . bytes_usize ( ) ..range. end ( ) . bytes_usize ( ) ]
272
+ }
273
+
274
+ /// The last argument controls whether we error out when there are uninitialized or pointer
275
+ /// bytes. However, we *always* error when there are relocations overlapping the edges of the
276
+ /// range.
277
+ ///
278
+ /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead,
270
279
///
271
280
/// This function also guarantees that the resulting pointer will remain stable
272
281
/// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
@@ -287,7 +296,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
287
296
self . check_relocation_edges ( cx, range) ?;
288
297
}
289
298
290
- Ok ( & self . bytes [ range . start . bytes_usize ( ) .. range. end ( ) . bytes_usize ( ) ] )
299
+ Ok ( self . get_bytes_even_more_internal ( range) )
291
300
}
292
301
293
302
/// Checks that these bytes are initialized and not pointer bytes, and then return them
@@ -373,6 +382,9 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
373
382
374
383
/// Reads a *non-ZST* scalar.
375
384
///
385
+ /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
386
+ /// supports that) provenance is entirely ignored.
387
+ ///
376
388
/// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
377
389
/// for ZSTness anyway due to integer pointers being valid for ZSTs.
378
390
///
@@ -382,35 +394,47 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
382
394
& self ,
383
395
cx : & impl HasDataLayout ,
384
396
range : AllocRange ,
397
+ read_provenance : bool ,
385
398
) -> AllocResult < ScalarMaybeUninit < Tag > > {
386
- // `get_bytes_with_uninit_and_ptr` tests relocation edges.
387
- // We deliberately error when loading data that partially has provenance, or partially
388
- // initialized data (that's the check below), into a scalar. The LLVM semantics of this are
389
- // unclear so we are conservative. See <https://github.com/rust-lang/rust/issues/69488> for
390
- // further discussion.
391
- let bytes = self . get_bytes_with_uninit_and_ptr ( cx, range) ?;
392
- // Uninit check happens *after* we established that the alignment is correct.
393
- // We must not return `Ok()` for unaligned pointers!
399
+ if read_provenance {
400
+ assert_eq ! ( range. size, cx. data_layout( ) . pointer_size) ;
401
+ }
402
+
403
+ // First and foremost, if anything is uninit, bail.
394
404
if self . is_init ( range) . is_err ( ) {
395
405
// This inflates uninitialized bytes to the entire scalar, even if only a few
396
406
// bytes are uninitialized.
397
407
return Ok ( ScalarMaybeUninit :: Uninit ) ;
398
408
}
399
- // Now we do the actual reading.
400
- let bits = read_target_uint ( cx. data_layout ( ) . endian , bytes) . unwrap ( ) ;
401
- // See if we got a pointer.
402
- if range. size != cx. data_layout ( ) . pointer_size {
403
- // Not a pointer.
404
- // *Now*, we better make sure that the inside is free of relocations too.
405
- self . check_relocations ( cx, range) ?;
406
- } else {
407
- // Maybe a pointer.
408
- if let Some ( & prov) = self . relocations . get ( & range. start ) {
409
- let ptr = Pointer :: new ( prov, Size :: from_bytes ( bits) ) ;
410
- return Ok ( ScalarMaybeUninit :: from_pointer ( ptr, cx) ) ;
411
- }
409
+
410
+ // If we are doing a pointer read, and there is a relocation exactly where we
411
+ // are reading, then we can put data and relocation back together and return that.
412
+ if read_provenance && let Some ( & prov) = self . relocations . get ( & range. start ) {
413
+ // We already checked init and relocations, so we can use this function.
414
+ let bytes = self . get_bytes_even_more_internal ( range) ;
415
+ let bits = read_target_uint ( cx. data_layout ( ) . endian , bytes) . unwrap ( ) ;
416
+ let ptr = Pointer :: new ( prov, Size :: from_bytes ( bits) ) ;
417
+ return Ok ( ScalarMaybeUninit :: from_pointer ( ptr, cx) ) ;
412
418
}
413
- // We don't. Just return the bits.
419
+
420
+ // If we are *not* reading a pointer, and we can just ignore relocations,
421
+ // then do exactly that.
422
+ if !read_provenance && Tag :: OFFSET_IS_ADDR {
423
+ // We just strip provenance.
424
+ let bytes = self . get_bytes_even_more_internal ( range) ;
425
+ let bits = read_target_uint ( cx. data_layout ( ) . endian , bytes) . unwrap ( ) ;
426
+ return Ok ( ScalarMaybeUninit :: Scalar ( Scalar :: from_uint ( bits, range. size ) ) ) ;
427
+ }
428
+
429
+ // It's complicated. Better make sure there is no provenance anywhere.
430
+ // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then
431
+ // `read_pointer` is true and we ideally would distinguish the following two cases:
432
+ // - The entire `range` is covered by 2 relocations for the same provenance.
433
+ // Then we should return a pointer with that provenance.
434
+ // - The range has inhomogeneous provenance. Then we should return just the
435
+ // underlying bits.
436
+ let bytes = self . get_bytes ( cx, range) ?;
437
+ let bits = read_target_uint ( cx. data_layout ( ) . endian , bytes) . unwrap ( ) ;
414
438
Ok ( ScalarMaybeUninit :: Scalar ( Scalar :: from_uint ( bits, range. size ) ) )
415
439
}
416
440
@@ -513,8 +537,9 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
513
537
let start = range. start ;
514
538
let end = range. end ( ) ;
515
539
516
- // We need to handle clearing the relocations from parts of a pointer. See
517
- // <https://github.com/rust-lang/rust/issues/87184> for details.
540
+ // We need to handle clearing the relocations from parts of a pointer.
541
+ // FIXME: Miri should preserve partial relocations; see
542
+ // https://github.com/rust-lang/miri/issues/2181.
518
543
if first < start {
519
544
if Tag :: ERR_ON_PARTIAL_PTR_OVERWRITE {
520
545
return Err ( AllocError :: PartialPointerOverwrite ( first) ) ;
0 commit comments