Added a recyling pool for bulk strings written in redis client to help performance.

This commit is contained in:
Sascha L. Teichmann 2017-02-25 17:00:17 +01:00
parent b2ca97568e
commit 228bd9026b
1 changed files with 41 additions and 12 deletions

View File

@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"net" "net"
"strconv" "strconv"
"sync"
) )
type RedisClient struct { type RedisClient struct {
@ -30,11 +31,6 @@ func (client *RedisClient) Close() error {
return client.conn.Close() return client.conn.Close()
} }
func redisLength(prefix byte, s int) []byte {
buf := append(make([]byte, 0, 16), prefix)
return append(strconv.AppendInt(buf, int64(s), 10), '\r', '\n')
}
var ( var (
writeArray4 = []byte("*4\r\n") writeArray4 = []byte("*4\r\n")
hspatial = []byte("HSPATIAL") hspatial = []byte("HSPATIAL")
@ -42,14 +38,47 @@ var (
ignore = []byte("IGNORE") ignore = []byte("IGNORE")
) )
// stringPool is a pool to help recycle bulk strings
// for writing. Experimented with sync.Pool and
// channel based leaky buffers but the mutex based
// version performs best in this case.
type stringPool struct {
list [][]byte
sync.Mutex
}
func (sp *stringPool) alloc() (l []byte) {
sp.Lock()
if n := len(sp.list); n > 0 {
l = sp.list[n-1]
sp.list[n-1] = nil
sp.list = sp.list[:n-1]
sp.Unlock()
} else {
sp.Unlock()
l = make([]byte, 0, 32)
}
return
}
func (sp *stringPool) free(b []byte) {
b = b[:0]
sp.Lock()
sp.list = append(sp.list, b)
sp.Unlock()
}
var spool stringPool
func (client *RedisClient) writeBulkString(data []byte) (err error) { func (client *RedisClient) writeBulkString(data []byte) (err error) {
if _, err = client.conn.Write(redisLength('$', len(data))); err != nil { buf := spool.alloc()
return buf = append(buf, '$')
} buf = strconv.AppendInt(buf, int64(len(data)), 10)
if _, err = client.conn.Write(data); err != nil { buf = append(buf, nl...)
return buf = append(buf, data...)
} buf = append(buf, nl...)
_, err = client.conn.Write(nl) _, err = client.conn.Write(buf)
spool.free(buf)
return return
} }