diff --git a/src/compress/flate/flate_test.go b/src/compress/flate/flate_test.go index 341d807131..83c20498cc 100644 --- a/src/compress/flate/flate_test.go +++ b/src/compress/flate/flate_test.go @@ -272,3 +272,81 @@ func TestTruncatedStreams(t *testing.T) { } } } + +// Verify that flate.Reader.Read returns (n, io.EOF) instead +// of (n, nil) + (0, io.EOF) when possible. +// +// This helps net/http.Transport reuse HTTP/1 connections more +// aggressively. +// +// See https://github.com/google/go-github/pull/317 for background. +func TestReaderEarlyEOF(t *testing.T) { + testSizes := []int{ + 1, 2, 3, 4, 5, 6, 7, 8, + 100, 1000, 10000, 100000, + 128, 1024, 16384, 131072, + + // Testing multiples of windowSize triggers the case + // where Read will fail to return an early io.EOF. + windowSize * 1, windowSize * 2, windowSize * 3, + } + + var maxSize int + for _, n := range testSizes { + if maxSize < n { + maxSize = n + } + } + + readBuf := make([]byte, 40) + data := make([]byte, maxSize) + for i := range data { + data[i] = byte(i) + } + + for _, sz := range testSizes { + if testing.Short() && sz > windowSize { + continue + } + for _, flush := range []bool{true, false} { + earlyEOF := true // Do we expect early io.EOF? + + var buf bytes.Buffer + w, _ := NewWriter(&buf, 5) + w.Write(data[:sz]) + if flush { + // If a Flush occurs after all the actual data, the flushing + // semantics dictate that we will observe a (0, io.EOF) since + // Read must return data before it knows that the stream ended. + w.Flush() + earlyEOF = false + } + w.Close() + + r := NewReader(&buf) + for { + n, err := r.Read(readBuf) + if err == io.EOF { + // If the availWrite == windowSize, then that means that the + // previous Read returned because the write buffer was full + // and it just so happened that the stream had no more data. + // This situation is rare, but unavoidable. + if r.(*decompressor).dict.availWrite() == windowSize { + earlyEOF = false + } + + if n == 0 && earlyEOF { + t.Errorf("On size:%d flush:%v, Read() = (0, io.EOF), want (n, io.EOF)", sz, flush) + } + if n != 0 && !earlyEOF { + t.Errorf("On size:%d flush:%v, Read() = (%d, io.EOF), want (0, io.EOF)", sz, flush, n) + } + break + } + if err != nil { + t.Fatal(err) + } + } + } + } +} diff --git a/src/compress/flate/inflate.go b/src/compress/flate/inflate.go index 6b0657b799..d5f55eab34 100644 --- a/src/compress/flate/inflate.go +++ b/src/compress/flate/inflate.go @@ -299,15 +299,6 @@ type decompressor struct { } func (f *decompressor) nextBlock() { - if f.final { - if f.dict.availRead() > 0 { - f.toRead = f.dict.readFlush() - f.step = (*decompressor).nextBlock - return - } - f.err = io.EOF - return - } for f.nb < 1+2 { if f.err = f.moreBits(); f.err != nil { return @@ -345,6 +336,9 @@ func (f *decompressor) Read(b []byte) (int, error) { if len(f.toRead) > 0 { n := copy(b, f.toRead) f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } return n, nil } if f.err != nil { @@ -512,8 +506,7 @@ readLiteral: } goto readLiteral case v == 256: - // Done with huffman block; read next block. - f.step = (*decompressor).nextBlock + f.finishBlock() return // otherwise, reference to older data case v < 265: @@ -648,7 +641,7 @@ func (f *decompressor) dataBlock() { if n == 0 { f.toRead = f.dict.readFlush() - f.step = (*decompressor).nextBlock + f.finishBlock() return } @@ -681,6 +674,16 @@ func (f *decompressor) copyData() { f.step = (*decompressor).copyData return } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } f.step = (*decompressor).nextBlock }