Skip to content

Commit

Permalink
fix: Avoid infinitely looping on sync flush with short buffer writers
Browse files Browse the repository at this point in the history
Closes #47
  • Loading branch information
oyvindln committed Jul 4, 2020
1 parent 77227c8 commit 99a1a75
Show file tree
Hide file tree
Showing 4 changed files with 59 additions and 2 deletions.
14 changes: 12 additions & 2 deletions src/compress.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ pub fn compress_data_dynamic_n<W: Write>(
}

if deflate_state.lz77_state.is_last_block() {
// The last block has already been written, so we don't ave anything to compress.
// The last block has already been written, so we don't have anything to compress.
break;
}

Expand All @@ -133,6 +133,11 @@ pub fn compress_data_dynamic_n<W: Write>(
flush,
);

if written > 0 {
// Reset sync status if we compress more data.
deflate_state.sync_was_output_last = false;
}

// Bytes written in this call
bytes_written += written;
// Total bytes written since the compression process started
Expand Down Expand Up @@ -253,7 +258,12 @@ pub fn compress_data_dynamic_n<W: Write>(
if status == LZ77Status::Finished {
// This flush mode means that there should be an empty stored block at the end.
if flush == Flush::Sync {
write_stored_block(&[], &mut deflate_state.encoder_state.writer, false);
// Only write this if we didn't already write it to the output buffer in a previous
// call.
if deflate_state.sync_was_output_last == false {
write_stored_block(&[], &mut deflate_state.encoder_state.writer, false);
deflate_state.sync_was_output_last = true;
}
} else if !deflate_state.lz77_state.is_last_block() {
// Make sure a block with the last block header has been output.
// Not sure this can actually happen, but we make sure to finish properly
Expand Down
8 changes: 8 additions & 0 deletions src/deflate_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,13 @@ pub struct DeflateState<W: Write> {
/// Number of bytes written as calculated by sum of block input lengths.
/// Used to check that they are correct when `debug_assertions` are enabled.
pub bytes_written_control: DebugCounter,
/// Whether the last data written to the output buffer was a sync flush.
/// Need to keep track of this in order to avoid infinitely looping on writers
/// that can't output the sync flush bytes in one go.
/// This is implemented in a somewhat clunky manner at the moment,
/// ideally it should be done in a more fail-safe way to avoid
/// further bugs.
pub sync_was_output_last: bool,
}

impl<W: Write> DeflateState<W> {
Expand All @@ -108,6 +115,7 @@ impl<W: Write> DeflateState<W> {
output_buf_pos: 0,
flush_mode: Flush::None,
bytes_written_control: DebugCounter::default(),
sync_was_output_last: false,
}
}

Expand Down
3 changes: 3 additions & 0 deletions src/writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,9 @@ pub fn compress_until_done<W: Write>(
}
}

// Reset sync status so it's possible to sync again later.
deflate_state.sync_was_output_last = false;

debug_assert_eq!(
deflate_state.bytes_written,
deflate_state.bytes_written_control.get()
Expand Down
36 changes: 36 additions & 0 deletions tests/test.rs
Original file line number Diff line number Diff line change
Expand Up @@ -159,3 +159,39 @@ fn afl_regressions_default_compression() {
}
}
}


mod issue_47 {
use std::io::{self, Write};

#[test]
fn issue_47() {
let _ = deflate::write::ZlibEncoder::new(SmallWriter::new(vec![], 2), deflate::Compression::Fast).flush();
}

struct SmallWriter<W: Write> {
writer: W,
small: usize,
}

impl<W: Write> SmallWriter<W> {
fn new(writer: W, buf_len: usize) -> SmallWriter<W> {
SmallWriter {
writer,
small: buf_len,
}
}
}

impl<W: Write> Write for SmallWriter<W> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// Never write more than `small` bytes at a time.
let small = buf.len().min(self.small);
self.writer.write(&buf[..small])
}

fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
}

0 comments on commit 99a1a75

Please sign in to comment.