Use a sync.Pool to recycle the zlib readers. Speeds up the decoding of database blocks significantly.

This commit is contained in:
Sascha L. Teichmann 2015-08-08 16:01:23 +02:00
parent ab1a86384b
commit c4c35dc10d
1 changed files with 32 additions and 3 deletions

View File

@ -5,6 +5,7 @@
package common
import (
"bytes"
"compress/zlib"
"encoding/binary"
"errors"
@ -57,6 +58,27 @@ type (
}
)
// zlibEmpty is a minimal zlib stream with zero length.
// zlib.NewReader needs a valid zlib stream to start with
// even if Reset is called directly afterwards.
var zlibEmpty = []byte{
0x78, 0x9c, 0x00, 0x00,
0x00, 0xff, 0xff, 0x01,
0x00, 0x00, 0xff, 0xff,
0x00, 0x00, 0x00, 0x01}
// zlibReaderPool is a pool of zlib Readers to be reused
// for decoding the compressed parts of database blocks.
// Decoding blocks relies heavly on zlib decompression.
// Reusing the internal structures of already allocated
// zlib readers speeds up the decoding significantly.
var zlibReaderPool = sync.Pool{
New: func() interface{} {
reader, _ := zlib.NewReader(bytes.NewBuffer(zlibEmpty))
return reader
},
}
// The content of the map and the meta data are compressed with zlib.
// Unfortunately the byte length of this two structures are not stored
// explicitly in the block data. To access the informations behind
@ -81,12 +103,19 @@ func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error)
offset = 4
}
zr := zlibReaderPool.Get().(interface {
io.ReadCloser
zlib.Resetter
})
defer func() {
zr.Close() // This sould not be necessary.
zlibReaderPool.Put(zr)
}()
buf := posBuf{Data: data[offset:]}
var zr io.ReadCloser
if zr, err = zlib.NewReader(&buf); err != nil {
if err = zr.Reset(&buf, nil); err != nil {
return
}
defer zr.Close()
mapContent := make([]byte, uncompressedLen)