@@ -405,7 +405,7 @@ where
405405// - pass large buffers to readers that do not initialize the spare capacity. this can amortize per-call overheads
406406// - and finally pass not-too-small and not-too-large buffers to Windows read APIs because they manage to suffer from both problems
407407// at the same time, i.e. small reads suffer from syscall overhead, all reads incur costs proportional to buffer size (#110650)
408- //
408+ // - also avoid <4 byte reads as this may split UTF-8 code points, which can be a problem for Windows console reads (#142847)
409409pub ( crate ) fn default_read_to_end < R : Read + ?Sized > (
410410 r : & mut R ,
411411 buf : & mut Vec < u8 > ,
@@ -452,7 +452,7 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
452452 let mut consecutive_short_reads = 0 ;
453453
454454 loop {
455- if buf. len ( ) == buf . capacity ( ) && buf. capacity ( ) == start_cap {
455+ if buf. spare_capacity_mut ( ) . len ( ) < PROBE_SIZE && buf. capacity ( ) == start_cap {
456456 // The buffer might be an exact fit. Let's read into a probe buffer
457457 // and see if it returns `Ok(0)`. If so, we've avoided an
458458 // unnecessary doubling of the capacity. But if not, append the
@@ -462,12 +462,13 @@ pub(crate) fn default_read_to_end<R: Read + ?Sized>(
462462 if read == 0 {
463463 return Ok ( buf. len ( ) - start_len) ;
464464 }
465+ // In the case of very short reads, continue to use the stack buffer
466+ // until either we reach the end or we need to reallocate.
467+ continue ;
465468 }
466469
467- if buf. len ( ) == buf. capacity ( ) {
468- // buf is full, need more space
469- buf. try_reserve ( PROBE_SIZE ) ?;
470- }
470+ // Avoid unnecessarily short reads by ensuring there's at least PROBE_SIZE space available.
471+ buf. try_reserve ( PROBE_SIZE ) ?;
471472
472473 let mut spare = buf. spare_capacity_mut ( ) ;
473474 let buf_len = cmp:: min ( spare. len ( ) , max_read_size) ;
0 commit comments