Added a recyling pool for bulk strings written in redis client to help performance.

This commit is contained in:
Sascha L. Teichmann 2017-02-25 17:00:17 +01:00
parent b2ca97568e
commit 228bd9026b

View File

@ -9,6 +9,7 @@ import (
"fmt"
"net"
"strconv"
"sync"
)
type RedisClient struct {
@ -30,11 +31,6 @@ func (client *RedisClient) Close() error {
return client.conn.Close()
}
func redisLength(prefix byte, s int) []byte {
buf := append(make([]byte, 0, 16), prefix)
return append(strconv.AppendInt(buf, int64(s), 10), '\r', '\n')
}
var (
writeArray4 = []byte("*4\r\n")
hspatial = []byte("HSPATIAL")
@ -42,14 +38,47 @@ var (
ignore = []byte("IGNORE")
)
// stringPool is a pool to help recycle bulk strings
// for writing. Experimented with sync.Pool and
// channel based leaky buffers but the mutex based
// version performs best in this case.
type stringPool struct {
list [][]byte
sync.Mutex
}
func (sp *stringPool) alloc() (l []byte) {
sp.Lock()
if n := len(sp.list); n > 0 {
l = sp.list[n-1]
sp.list[n-1] = nil
sp.list = sp.list[:n-1]
sp.Unlock()
} else {
sp.Unlock()
l = make([]byte, 0, 32)
}
return
}
func (sp *stringPool) free(b []byte) {
b = b[:0]
sp.Lock()
sp.list = append(sp.list, b)
sp.Unlock()
}
var spool stringPool
func (client *RedisClient) writeBulkString(data []byte) (err error) {
if _, err = client.conn.Write(redisLength('$', len(data))); err != nil {
return
}
if _, err = client.conn.Write(data); err != nil {
return
}
_, err = client.conn.Write(nl)
buf := spool.alloc()
buf = append(buf, '$')
buf = strconv.AppendInt(buf, int64(len(data)), 10)
buf = append(buf, nl...)
buf = append(buf, data...)
buf = append(buf, nl...)
_, err = client.conn.Write(buf)
spool.free(buf)
return
}