From 0d9773f416d381658b6181ac99236a34e6c13846 Mon Sep 17 00:00:00 2001 From: Patrice CONGO <39051536+congop@users.noreply.github.com> Date: Sat, 29 Jan 2022 16:42:15 +0100 Subject: [PATCH 1/3] Fix for Identify() failing on empty and small files: - Issue - Identify(,bytes.NewReader([]byte{})) returns an fmt.wrapError of io.EOF - Identify(,bytes.NewReader([]byte{'a'})) returns an fmt.wrapError of io.ErrUnexpectedEOF - the expected outcome is archiver.ErrNoMatch (i.e. not a compressed stream nor an archive) - Cause: lack of handling of io.EOF and io.ErrUnexpectedEOF outcomes of io.ReadFull() - Fix - consists in handling io.EOF and io.ErrUnexpectedEOF as cases of the stream not containing enough bytes - and returning the available bytes up to the requested size - @see archiver.head() --- bz2.go | 4 +- formats_test.go | 247 ++++++++++++++++++++++++++++++++++++++++++++++++ gz.go | 4 +- lz4.go | 4 +- rar.go | 13 ++- streamhead.go | 27 ++++++ sz.go | 4 +- xz.go | 4 +- zip.go | 4 +- zstd.go | 4 +- 10 files changed, 298 insertions(+), 17 deletions(-) create mode 100644 formats_test.go create mode 100644 streamhead.go diff --git a/bz2.go b/bz2.go index b62192d4..21e96dc6 100644 --- a/bz2.go +++ b/bz2.go @@ -28,8 +28,8 @@ func (bz Bz2) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(bzip2Header)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(bzip2Header))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, bzip2Header) diff --git a/formats_test.go b/formats_test.go new file mode 100644 index 00000000..ec42e047 --- /dev/null +++ b/formats_test.go @@ -0,0 +1,247 @@ +package archiver + +import ( + "bytes" + "context" + "io" + "io/fs" + "os" + "strings" + "testing" +) + +func TestIdentifyCanAssessSmallOrNoConternt(t *testing.T) { + type args struct { + stream io.ReadSeeker + } + tests := []struct { + name string + args args + }{ + { + name: "should return nomatch for an empty stream", + args: args{ + stream: bytes.NewReader([]byte{}), + }, + }, + { + name: "should return nomatch for a stream with content size less than known header", + args: args{ + stream: bytes.NewReader([]byte{'a'}), + }, + }, + { + name: "should return nomatch for a stream with content size greater then known header size and not supported format", + args: args{ + stream: bytes.NewReader([]byte(strings.Repeat("this is a txt content", 2))), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := Identify("", tt.args.stream) + if got != nil { + t.Errorf("no Format expected for non archive and not compressed stream: found Format= %v", got.Name()) + return + } + if ErrNoMatch != err { + t.Fatalf("ErrNoMatch expected for non archive and not compressed stream: err :=%#v", err) + return + } + + }) + } +} + +func compress( + t *testing.T, compName string, content []byte, + openwriter func(w io.Writer) (io.WriteCloser, error), +) []byte { + buf := bytes.NewBuffer(make([]byte, 0, 128)) + cwriter, err := openwriter(buf) + if err != nil { + t.Fatalf("fail to open compression writer: compression-name=%s, err=%#v", compName, err) + return nil + } + _, err = cwriter.Write(content) + if err != nil { + cerr := cwriter.Close() + t.Fatalf( + "fail to write using compression writer: compression-name=%s, err=%#v, close-err=%#v", + compName, err, cerr) + return nil + } + err = cwriter.Close() + if err != nil { + t.Fatalf("fail to close compression writer: compression-name=%s, err=%#v", compName, err) + return nil + } + return buf.Bytes() +} + +func archive(t *testing.T, arch Archiver, fname string, fileInfo fs.FileInfo) []byte { + files := []File{ + {FileInfo: fileInfo, NameInArchive: "tmp.txt", + Open: func() (io.ReadCloser, error) { + return os.Open(fname) + }}, + } + buf := bytes.NewBuffer(make([]byte, 0, 128)) + err := arch.Archive(context.TODO(), buf, files) + if err != nil { + t.Fatalf("fail to create archive: err=%#v", err) + return nil + } + return buf.Bytes() + +} + +type writeNopCloser struct { + io.Writer +} + +func (wnc writeNopCloser) Close() error { + return nil +} + +func newWriteNopCloser(w io.Writer) (io.WriteCloser, error) { + return writeNopCloser{w}, nil +} + +func newTmpTextFile(t *testing.T, content string) (string, fs.FileInfo) { + + tmpTxtFile, err := os.CreateTemp("", "TestIdentifyFindFormatByStreamContent-tmp-*.txt") + if err != nil { + t.Fatalf("fail to create tmp test file for archive tests: err=%v", err) + return "", nil + } + fname := tmpTxtFile.Name() + + if _, err = tmpTxtFile.Write([]byte(content)); err != nil { + tmpTxtFile.Close() + os.Remove(fname) + t.Fatalf("fail to write content to tmp-txt-file: err=%#v", err) + return "", nil + } + if err = tmpTxtFile.Close(); err != nil { + os.Remove(fname) + t.Fatalf("fail to close tmp-txt-file: err=%#v", err) + return "", nil + } + fi, err := os.Stat(fname) + if err != nil { + os.Remove(fname) + t.Fatalf("fail to get tmp-txt-file stats: err=%v", err) + return "", nil + } + return fname, fi +} + +func TestIdentifyFindFormatByStreamContent(t *testing.T) { + tmpTxtFileName, tmpTxtFileInfo := newTmpTextFile(t, "this is text") + t.Cleanup(func() { + os.Remove(tmpTxtFileName) + }) + + tests := []struct { + name string + content []byte + openCompressionWriter func(w io.Writer) (io.WriteCloser, error) + compressorName string + wantFormatName string + }{ + //TODO add test case for brotli when Brotli.Match() by stream content is implemented + { + name: "should recognize bz2", + openCompressionWriter: Bz2{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".bz2", + wantFormatName: ".bz2", + }, + { + name: "should recognize gz", + openCompressionWriter: Gz{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".gz", + wantFormatName: ".gz", + }, + { + name: "should recognize lz4", + openCompressionWriter: Lz4{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".lz4", + wantFormatName: ".lz4", + }, + { + name: "should recognize sz", + openCompressionWriter: Sz{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".sz", + wantFormatName: ".sz", + }, + { + name: "should recognize xz", + openCompressionWriter: Xz{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".xz", + wantFormatName: ".xz", + }, + { + name: "should recognize zst", + openCompressionWriter: Zstd{}.OpenWriter, + content: []byte("this is text"), + compressorName: ".zst", + wantFormatName: ".zst", + }, + { + name: "should recognize tar", + openCompressionWriter: newWriteNopCloser, + content: archive(t, Tar{}, tmpTxtFileName, tmpTxtFileInfo), + compressorName: "", + wantFormatName: ".tar", + }, + { + name: "should recognize tar.gz", + openCompressionWriter: Gz{}.OpenWriter, + content: archive(t, Tar{}, tmpTxtFileName, tmpTxtFileInfo), + compressorName: ".gz", + wantFormatName: ".tar.gz", + }, + { + name: "should recognize zip", + openCompressionWriter: newWriteNopCloser, + content: archive(t, Zip{}, tmpTxtFileName, tmpTxtFileInfo), + compressorName: "", + wantFormatName: ".zip", + }, + { + name: "should recognize rar by v5.0 header", + openCompressionWriter: newWriteNopCloser, + content: rarHeaderV5_0[:], + compressorName: "", + wantFormatName: ".rar", + }, + { + name: "should recognize rar by v1.5 header", + openCompressionWriter: newWriteNopCloser, + content: rarHeaderV1_5[:], + compressorName: "", + wantFormatName: ".rar", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stream := bytes.NewReader(compress(t, tt.compressorName, tt.content, tt.openCompressionWriter)) + got, err := Identify("", stream) + if err != nil { + t.Fatalf("should have found a corresponding Format: err :=%+v", err) + return + } + if tt.wantFormatName != got.Name() { + t.Errorf("unexpected format found: expected=%s actual:%s", tt.wantFormatName, got.Name()) + return + } + + }) + } +} diff --git a/gz.go b/gz.go index ae6b4ef5..7bf27505 100644 --- a/gz.go +++ b/gz.go @@ -36,8 +36,8 @@ func (gz Gz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(gzHeader)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(gzHeader))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, gzHeader) diff --git a/lz4.go b/lz4.go index 659c3975..c0db15da 100644 --- a/lz4.go +++ b/lz4.go @@ -28,8 +28,8 @@ func (lz Lz4) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(lz4Header)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(lz4Header))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, lz4Header) diff --git a/rar.go b/rar.go index 213bed1b..5564321a 100644 --- a/rar.go +++ b/rar.go @@ -41,11 +41,18 @@ func (r Rar) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header (there are two versions; allocate buffer for larger one) - buf := make([]byte, len(rarHeaderV5_0)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(rarHeaderV5_0))) + if err != nil { return mr, err } - mr.ByStream = bytes.Equal(buf[:len(rarHeaderV1_5)], rarHeaderV1_5) || bytes.Equal(buf, rarHeaderV5_0) + + availLengthBufV1_5 := len(rarHeaderV1_5) + if availLengthBufV1_5 > len(buf) { + // because there may not be enough bytes in the stream + availLengthBufV1_5 = len(buf) + } + + mr.ByStream = bytes.Equal(buf[:availLengthBufV1_5], rarHeaderV1_5) || bytes.Equal(buf, rarHeaderV5_0) return mr, nil } diff --git a/streamhead.go b/streamhead.go new file mode 100644 index 00000000..6550028c --- /dev/null +++ b/streamhead.go @@ -0,0 +1,27 @@ +package archiver + +import "io" + +// head returns the first maxBytes from the stream. +// It will return less than maxBytes if the stream does not contain enough data. +// head will happily return an empty array if stream is nil or maxBytes is 0. +func head(stream io.Reader, maxBytes uint) ([]byte, error) { + if stream == nil || maxBytes == 0 { + return []byte{}, nil + } + buf := make([]byte, maxBytes) + // we are interested in reading at most maxBytes. + // This seems to be the same feature as provided by io.Reader.Read(). + // It is not because: + // -- io.ReadFull() will put some extra effort to fully read up to the buf size until an EOF. + // -- and io.Reader.Read() will not. + n, err := io.ReadFull(stream, buf) + + // Ignoring the following errors, because they means stream contains less than maxBytes: + // - io.EOF --> the stream is empty + // - io.ErrUnexpectedEOF --> the stream has less than mayBytes + if err != nil && !(err == io.ErrUnexpectedEOF || err == io.EOF) { + return nil, err + } + return buf[:n], nil +} diff --git a/sz.go b/sz.go index 577e331a..708c5e11 100644 --- a/sz.go +++ b/sz.go @@ -26,8 +26,8 @@ func (sz Sz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(snappyHeader)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(snappyHeader))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, snappyHeader) diff --git a/xz.go b/xz.go index eaf43471..ccd52097 100644 --- a/xz.go +++ b/xz.go @@ -27,8 +27,8 @@ func (x Xz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(xzHeader)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(xzHeader))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, xzHeader) diff --git a/zip.go b/zip.go index fd4a4418..ecd26238 100644 --- a/zip.go +++ b/zip.go @@ -91,8 +91,8 @@ func (z Zip) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(zipHeader)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(zipHeader))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, zipHeader) diff --git a/zstd.go b/zstd.go index cd310a31..0b65706c 100644 --- a/zstd.go +++ b/zstd.go @@ -29,8 +29,8 @@ func (zs Zstd) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf := make([]byte, len(zstdHeader)) - if _, err := io.ReadFull(stream, buf); err != nil { + buf, err := head(stream, uint(len(zstdHeader))) + if err != nil { return mr, err } mr.ByStream = bytes.Equal(buf, zstdHeader) From 98f22ccd08059cc7eb4fcccb713ab3dc764cded0 Mon Sep 17 00:00:00 2001 From: Patrice CONGO <39051536+congop@users.noreply.github.com> Date: Mon, 31 Jan 2022 10:52:40 +0100 Subject: [PATCH 2/3] Add tests to guard against false posivives in cases where a known header ends with 0. --- formats_test.go | 47 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 46 insertions(+), 1 deletion(-) diff --git a/formats_test.go b/formats_test.go index ec42e047..1d8937a6 100644 --- a/formats_test.go +++ b/formats_test.go @@ -10,7 +10,52 @@ import ( "testing" ) -func TestIdentifyCanAssessSmallOrNoConternt(t *testing.T) { +func TestIdentifyDoesNotMatchContentFromTrimmedKnownHeaderHaving0Suffix(t *testing.T) { + //Using the outcome of <> without minding <> + // may lead to a mis-caraterization for cases with known header ending with 0 + // because the default byte value in a declared array is 0. + // This test guards against those cases. + tests := []struct { + name string + header []byte + }{ + { + name: "rarv5_0", + header: rarHeaderV5_0[:], + }, + { + name: "rarv1_5", + header: rarHeaderV1_5[:], + }, + { + name: "xz", + header: xzHeader[:], + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + headerLen := len(tt.header) + if headerLen == 0 || tt.header[headerLen-1] != 0 { + t.Errorf("header expected to end with 0: header=%v", tt.header) + return + } + headerTrimmed := tt.header[:headerLen-1] + stream := bytes.NewReader(headerTrimmed) + got, err := Identify("", stream) + if got != nil { + t.Errorf("no Format expected for trimmed know %s header: found Format= %v", tt.name, got.Name()) + return + } + if ErrNoMatch != err { + t.Fatalf("ErrNoMatch expected for for trimmed know %s header: err :=%#v", tt.name, err) + return + } + + }) + } +} + +func TestIdentifyCanAssessSmallOrNoContent(t *testing.T) { type args struct { stream io.ReadSeeker } From 087da42990284c640cb0a757ae25d5fc00e7ec7a Mon Sep 17 00:00:00 2001 From: Matthew Holt Date: Sun, 13 Mar 2022 23:15:41 -0600 Subject: [PATCH 3/3] Some cleanup --- bz2.go | 2 +- formats.go | 26 ++++++++++++++++++++++++++ formats_test.go | 23 ++++++++++------------- gz.go | 2 +- lz4.go | 2 +- rar.go | 13 ++++++------- streamhead.go | 27 --------------------------- sz.go | 2 +- xz.go | 2 +- zip.go | 2 +- zstd.go | 2 +- 11 files changed, 49 insertions(+), 54 deletions(-) delete mode 100644 streamhead.go diff --git a/bz2.go b/bz2.go index 21e96dc6..57a278f4 100644 --- a/bz2.go +++ b/bz2.go @@ -28,7 +28,7 @@ func (bz Bz2) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(bzip2Header))) + buf, err := readAtMost(stream, len(bzip2Header)) if err != nil { return mr, err } diff --git a/formats.go b/formats.go index c80b31e4..5140e218 100644 --- a/formats.go +++ b/formats.go @@ -2,6 +2,7 @@ package archiver import ( "context" + "errors" "fmt" "io" "strings" @@ -119,6 +120,31 @@ func identifyOne(format Format, filename string, stream io.ReadSeeker, comp Comp return format.Match(filename, stream) } +// readAtMost reads at most n bytes from the stream. A nil, empty, or short +// stream is not an error. The returned slice of bytes may have length < n +// without an error. +func readAtMost(stream io.Reader, n int) ([]byte, error) { + if stream == nil || n <= 0 { + return []byte{}, nil + } + + buf := make([]byte, n) + nr, err := io.ReadFull(stream, buf) + + // Return the bytes read if there was no error OR if the + // error was EOF (stream was empty) or UnexpectedEOF (stream + // had less than n). We ignore those errors because we aren't + // required to read the full n bytes; so an empty or short + // stream is not actually an error. + if err == nil || + errors.Is(err, io.EOF) || + errors.Is(err, io.ErrUnexpectedEOF) { + return buf[:nr], nil + } + + return nil, err +} + // CompressedArchive combines a compression format on top of an archive // format (e.g. "tar.gz") and provides both functionalities in a single // type. It ensures that archive functions are wrapped by compressors and diff --git a/formats_test.go b/formats_test.go index 1d8937a6..f3021644 100644 --- a/formats_test.go +++ b/formats_test.go @@ -11,8 +11,8 @@ import ( ) func TestIdentifyDoesNotMatchContentFromTrimmedKnownHeaderHaving0Suffix(t *testing.T) { - //Using the outcome of <> without minding <> - // may lead to a mis-caraterization for cases with known header ending with 0 + // Using the outcome of `n, err := io.ReadFull(stream, buf)` without minding n + // may lead to a mis-characterization for cases with known header ending with 0x0 // because the default byte value in a declared array is 0. // This test guards against those cases. tests := []struct { @@ -20,16 +20,16 @@ func TestIdentifyDoesNotMatchContentFromTrimmedKnownHeaderHaving0Suffix(t *testi header []byte }{ { - name: "rarv5_0", - header: rarHeaderV5_0[:], + name: "rar_v5.0", + header: rarHeaderV5_0, }, { - name: "rarv1_5", - header: rarHeaderV1_5[:], + name: "rar_v1.5", + header: rarHeaderV1_5, }, { name: "xz", - header: xzHeader[:], + header: xzHeader, }, } for _, tt := range tests { @@ -141,13 +141,9 @@ func archive(t *testing.T, arch Archiver, fname string, fileInfo fs.FileInfo) [] } -type writeNopCloser struct { - io.Writer -} +type writeNopCloser struct{ io.Writer } -func (wnc writeNopCloser) Close() error { - return nil -} +func (wnc writeNopCloser) Close() error { return nil } func newWriteNopCloser(w io.Writer) (io.WriteCloser, error) { return writeNopCloser{w}, nil @@ -179,6 +175,7 @@ func newTmpTextFile(t *testing.T, content string) (string, fs.FileInfo) { t.Fatalf("fail to get tmp-txt-file stats: err=%v", err) return "", nil } + return fname, fi } diff --git a/gz.go b/gz.go index 7bf27505..e747d030 100644 --- a/gz.go +++ b/gz.go @@ -36,7 +36,7 @@ func (gz Gz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(gzHeader))) + buf, err := readAtMost(stream, len(gzHeader)) if err != nil { return mr, err } diff --git a/lz4.go b/lz4.go index c0db15da..aaa22a54 100644 --- a/lz4.go +++ b/lz4.go @@ -28,7 +28,7 @@ func (lz Lz4) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(lz4Header))) + buf, err := readAtMost(stream, len(lz4Header)) if err != nil { return mr, err } diff --git a/rar.go b/rar.go index 5564321a..917a9bcc 100644 --- a/rar.go +++ b/rar.go @@ -41,18 +41,17 @@ func (r Rar) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header (there are two versions; allocate buffer for larger one) - buf, err := head(stream, uint(len(rarHeaderV5_0))) + buf, err := readAtMost(stream, len(rarHeaderV5_0)) if err != nil { return mr, err } - availLengthBufV1_5 := len(rarHeaderV1_5) - if availLengthBufV1_5 > len(buf) { - // because there may not be enough bytes in the stream - availLengthBufV1_5 = len(buf) - } + matchedV1_5 := len(buf) >= len(rarHeaderV1_5) && + bytes.Equal(rarHeaderV1_5, buf[:len(rarHeaderV1_5)]) + matchedV5_0 := len(buf) >= len(rarHeaderV5_0) && + bytes.Equal(rarHeaderV5_0, buf[:len(rarHeaderV5_0)]) - mr.ByStream = bytes.Equal(buf[:availLengthBufV1_5], rarHeaderV1_5) || bytes.Equal(buf, rarHeaderV5_0) + mr.ByStream = matchedV1_5 || matchedV5_0 return mr, nil } diff --git a/streamhead.go b/streamhead.go deleted file mode 100644 index 6550028c..00000000 --- a/streamhead.go +++ /dev/null @@ -1,27 +0,0 @@ -package archiver - -import "io" - -// head returns the first maxBytes from the stream. -// It will return less than maxBytes if the stream does not contain enough data. -// head will happily return an empty array if stream is nil or maxBytes is 0. -func head(stream io.Reader, maxBytes uint) ([]byte, error) { - if stream == nil || maxBytes == 0 { - return []byte{}, nil - } - buf := make([]byte, maxBytes) - // we are interested in reading at most maxBytes. - // This seems to be the same feature as provided by io.Reader.Read(). - // It is not because: - // -- io.ReadFull() will put some extra effort to fully read up to the buf size until an EOF. - // -- and io.Reader.Read() will not. - n, err := io.ReadFull(stream, buf) - - // Ignoring the following errors, because they means stream contains less than maxBytes: - // - io.EOF --> the stream is empty - // - io.ErrUnexpectedEOF --> the stream has less than mayBytes - if err != nil && !(err == io.ErrUnexpectedEOF || err == io.EOF) { - return nil, err - } - return buf[:n], nil -} diff --git a/sz.go b/sz.go index 708c5e11..9d10604a 100644 --- a/sz.go +++ b/sz.go @@ -26,7 +26,7 @@ func (sz Sz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(snappyHeader))) + buf, err := readAtMost(stream, len(snappyHeader)) if err != nil { return mr, err } diff --git a/xz.go b/xz.go index ccd52097..4e1b6b41 100644 --- a/xz.go +++ b/xz.go @@ -27,7 +27,7 @@ func (x Xz) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(xzHeader))) + buf, err := readAtMost(stream, len(xzHeader)) if err != nil { return mr, err } diff --git a/zip.go b/zip.go index ecd26238..62d7212a 100644 --- a/zip.go +++ b/zip.go @@ -91,7 +91,7 @@ func (z Zip) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(zipHeader))) + buf, err := readAtMost(stream, len(zipHeader)) if err != nil { return mr, err } diff --git a/zstd.go b/zstd.go index 0b65706c..fe07b76f 100644 --- a/zstd.go +++ b/zstd.go @@ -29,7 +29,7 @@ func (zs Zstd) Match(filename string, stream io.Reader) (MatchResult, error) { } // match file header - buf, err := head(stream, uint(len(zstdHeader))) + buf, err := readAtMost(stream, len(zstdHeader)) if err != nil { return mr, err }