@@ -396,38 +396,99 @@ fn slice_write_vectored(
396
396
Ok ( nwritten)
397
397
}
398
398
399
- // Resizing write implementation
400
- fn vec_write < A > ( pos_mut : & mut u64 , vec : & mut Vec < u8 , A > , buf : & [ u8 ] ) -> io:: Result < usize >
401
- where
402
- A : Allocator ,
403
- {
399
+ /// Reserves the required space, and pads the vec with 0s if necessary.
400
+ fn reserve_and_pad < A : Allocator > (
401
+ pos_mut : & mut u64 ,
402
+ vec : & mut Vec < u8 , A > ,
403
+ buf_len : usize ,
404
+ ) -> io:: Result < usize > {
404
405
let pos: usize = ( * pos_mut) . try_into ( ) . map_err ( |_| {
405
406
io:: const_io_error!(
406
407
ErrorKind :: InvalidInput ,
407
408
"cursor position exceeds maximum possible vector length" ,
408
409
)
409
410
} ) ?;
410
- // Make sure the internal buffer is as least as big as where we
411
- // currently are
412
- let len = vec. len ( ) ;
413
- if len < pos {
414
- // use `resize` so that the zero filling is as efficient as possible
415
- vec. resize ( pos, 0 ) ;
416
- }
417
- // Figure out what bytes will be used to overwrite what's currently
418
- // there (left), and what will be appended on the end (right)
419
- {
420
- let space = vec. len ( ) - pos;
421
- let ( left, right) = buf. split_at ( cmp:: min ( space, buf. len ( ) ) ) ;
422
- vec[ pos..pos + left. len ( ) ] . copy_from_slice ( left) ;
423
- vec. extend_from_slice ( right) ;
411
+
412
+ // For safety reasons, we don't want these numbers to overflow
413
+ // otherwise our allocation won't be enough
414
+ let desired_cap = pos. saturating_add ( buf_len) ;
415
+ if desired_cap > vec. capacity ( ) {
416
+ // We want our vec's total capacity
417
+ // to have room for (pos+buf_len) bytes. Reserve allocates
418
+ // based on additional elements from the length, so we need to
419
+ // reserve the difference
420
+ vec. reserve ( desired_cap - vec. len ( ) ) ;
421
+ }
422
+ // Pad if pos is above the current len.
423
+ if pos > vec. len ( ) {
424
+ let diff = pos - vec. len ( ) ;
425
+ // Unfortunately, `resize()` would suffice but the optimiser does not
426
+ // realise the `reserve` it does can be eliminated. So we do it manually
427
+ // to eliminate that extra branch
428
+ let spare = vec. spare_capacity_mut ( ) ;
429
+ debug_assert ! ( spare. len( ) >= diff) ;
430
+ // Safety: we have allocated enough capacity for this.
431
+ // And we are only writing, not reading
432
+ unsafe {
433
+ spare. get_unchecked_mut ( ..diff) . fill ( core:: mem:: MaybeUninit :: new ( 0 ) ) ;
434
+ vec. set_len ( pos) ;
435
+ }
424
436
}
425
437
438
+ Ok ( pos)
439
+ }
440
+
441
+ /// Writes the slice to the vec without allocating
442
+ /// # Safety: vec must have buf.len() spare capacity
443
+ unsafe fn vec_write_unchecked < A > ( pos : usize , vec : & mut Vec < u8 , A > , buf : & [ u8 ] ) -> usize
444
+ where
445
+ A : Allocator ,
446
+ {
447
+ debug_assert ! ( vec. capacity( ) >= pos + buf. len( ) ) ;
448
+ vec. as_mut_ptr ( ) . add ( pos) . copy_from ( buf. as_ptr ( ) , buf. len ( ) ) ;
449
+ pos + buf. len ( )
450
+ }
451
+
452
+ /// Resizing write implementation for [`Cursor`]
453
+ ///
454
+ /// Cursor is allowed to have a pre-allocated and initialised
455
+ /// vector body, but with a position of 0. This means the [`Write`]
456
+ /// will overwrite the contents of the vec.
457
+ ///
458
+ /// This also allows for the vec body to be empty, but with a position of N.
459
+ /// This means that [`Write`] will pad the vec with 0 initially,
460
+ /// before writing anything from that point
461
+ fn vec_write < A > ( pos_mut : & mut u64 , vec : & mut Vec < u8 , A > , buf : & [ u8 ] ) -> io:: Result < usize >
462
+ where
463
+ A : Allocator ,
464
+ {
465
+ let buf_len = buf. len ( ) ;
466
+ let mut pos = reserve_and_pad ( pos_mut, vec, buf_len) ?;
467
+
468
+ // Write the buf then progress the vec forward if necessary
469
+ // Safety: we have ensured that the capacity is available
470
+ // and that all bytes get written up to pos
471
+ unsafe {
472
+ pos = vec_write_unchecked ( pos, vec, buf) ;
473
+ if pos > vec. len ( ) {
474
+ vec. set_len ( pos) ;
475
+ }
476
+ } ;
477
+
426
478
// Bump us forward
427
- * pos_mut = ( pos + buf . len ( ) ) as u64 ;
428
- Ok ( buf . len ( ) )
479
+ * pos_mut += buf_len as u64 ;
480
+ Ok ( buf_len )
429
481
}
430
482
483
+ /// Resizing write_vectored implementation for [`Cursor`]
484
+ ///
485
+ /// Cursor is allowed to have a pre-allocated and initialised
486
+ /// vector body, but with a position of 0. This means the [`Write`]
487
+ /// will overwrite the contents of the vec.
488
+ ///
489
+ /// This also allows for the vec body to be empty, but with a position of N.
490
+ /// This means that [`Write`] will pad the vec with 0 initially,
491
+ /// before writing anything from that point
431
492
fn vec_write_vectored < A > (
432
493
pos_mut : & mut u64 ,
433
494
vec : & mut Vec < u8 , A > ,
@@ -436,11 +497,26 @@ fn vec_write_vectored<A>(
436
497
where
437
498
A : Allocator ,
438
499
{
439
- let mut nwritten = 0 ;
440
- for buf in bufs {
441
- nwritten += vec_write ( pos_mut, vec, buf) ?;
500
+ // For safety reasons, we don't want this sum to overflow ever.
501
+ // If this saturates, the reserve should panic to avoid any unsound writing.
502
+ let buf_len = bufs. iter ( ) . fold ( 0usize , |a, b| a. saturating_add ( b. len ( ) ) ) ;
503
+ let mut pos = reserve_and_pad ( pos_mut, vec, buf_len) ?;
504
+
505
+ // Write the buf then progress the vec forward if necessary
506
+ // Safety: we have ensured that the capacity is available
507
+ // and that all bytes get written up to the last pos
508
+ unsafe {
509
+ for buf in bufs {
510
+ pos = vec_write_unchecked ( pos, vec, buf) ;
511
+ }
512
+ if pos > vec. len ( ) {
513
+ vec. set_len ( pos) ;
514
+ }
442
515
}
443
- Ok ( nwritten)
516
+
517
+ // Bump us forward
518
+ * pos_mut += buf_len as u64 ;
519
+ Ok ( buf_len)
444
520
}
445
521
446
522
#[ stable( feature = "rust1" , since = "1.0.0" ) ]
0 commit comments