Support v29 blocks

This commit is contained in:
Sascha L. Teichmann 2024-01-06 01:40:03 +01:00
parent 827d73bed0
commit 8a344b5a6d
1 changed files with 198 additions and 81 deletions

View File

@ -92,6 +92,132 @@ type posBuf struct {
Pos int
}
type bigEndian struct {
data []byte
err error
}
func (be *bigEndian) Err() error {
return be.err
}
func (be *bigEndian) u8() uint8 {
if be.err != nil {
return 0
}
if len(be.data) >= 1 {
x := be.data[0]
be.data = be.data[1:]
return x
}
be.err = ErrBlockTruncated
return 0
}
func (be *bigEndian) u16() uint16 {
if be.err != nil {
return 0
}
if len(be.data) >= 2 {
x := binary.BigEndian.Uint16(be.data)
be.data = be.data[2:]
return x
}
be.err = ErrBlockTruncated
return 0
}
func (be *bigEndian) u32() uint32 {
if be.err != nil {
return 0
}
if len(be.data) >= 4 {
x := binary.BigEndian.Uint32(be.data)
be.data = be.data[4:]
return x
}
be.err = ErrBlockTruncated
return 0
}
func (be *bigEndian) str(l int) string {
if be.err != nil {
return ""
}
if len(be.data) >= l {
s := string(be.data[:l])
be.data = be.data[l:]
return s
}
be.err = ErrBlockTruncated
return ""
}
func decode29(data []byte, colors *Colors) (*DecodedBlock, error) {
dec, err := zstd.NewReader(nil)
if err != nil {
return nil, err
}
content, err := dec.DecodeAll(data, nil)
if err != nil {
return nil, err
}
be := bigEndian{data: content}
_ = be.u8() // flags
_ = be.u16() // lightning_complete
_ = be.u32() // timestamp
_ = be.u8() // name_id_mapping_version
airID, ignoreID := int32(-1), int32(-1)
indexMap := make(map[int32]int32)
transparent := false
numNameIDMappings := be.u16()
for i := uint16(0); i < numNameIDMappings; i++ {
id := int32(be.u16())
name := be.str(int(be.u16()))
switch name {
case "air":
airID = id
case "ignore":
ignoreID = id
default:
if index, found := colors.NameIndex[name]; found {
indexMap[id] = index
if !transparent && colors.IsTransparent(index) {
transparent = true
}
} else {
logMissing(name)
}
}
}
_ = be.u8() // content_width
_ = be.u8() // params_width
if err := be.Err(); err != nil {
return nil, err
}
mapContent := make([]byte, 2*4096)
if len(be.data) < len(mapContent) {
return nil, ErrBlockTruncated
}
copy(mapContent, be.data)
return &DecodedBlock{
Version: data[0],
Transparent: transparent,
MapContent: mapContent,
AirID: airID,
IgnoreID: ignoreID,
IndexMap: indexMap,
}, nil
}
func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error) {
dataLen := len(data)
@ -101,102 +227,93 @@ func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error)
version := data[0]
//var mapContent := make([]byte, uncompressedLen)
var mapContent []byte
if version >= 29 {
return decode29(data, colors)
}
var offset int
if version >= 29 {
dec, err := zstd.NewReader(nil)
if err != nil {
return nil, err
}
mapContent, err = dec.DecodeAll(data[1:], nil)
if err != nil {
return nil, err
}
} else {
contentWidth := Min(int(data[2]), 2)
paramsWidth := Min(int(data[3]), 2)
contentWidth := Min(int(data[2]), 2)
paramsWidth := Min(int(data[3]), 2)
uncompressedLen := nodeCount * (contentWidth + paramsWidth)
uncompressedLen := nodeCount * (contentWidth + paramsWidth)
switch {
case version >= 27:
offset = 6
case version >= 22:
offset = 4
default:
offset = 2
}
switch {
case version >= 27:
offset = 6
case version >= 22:
offset = 4
default:
offset = 2
}
zr := zlibReaderPool.Get().(interface {
io.ReadCloser
zlib.Resetter
})
defer func() {
zr.Close() // This sould not be necessary.
zlibReaderPool.Put(zr)
}()
zr := zlibReaderPool.Get().(interface {
io.ReadCloser
zlib.Resetter
})
defer func() {
zr.Close() // This sould not be necessary.
zlibReaderPool.Put(zr)
}()
buf := posBuf{Data: data[offset:]}
if err = zr.Reset(&buf, nil); err != nil {
return
}
buf := posBuf{Data: data[offset:]}
if err = zr.Reset(&buf, nil); err != nil {
return
}
mapContent = make([]byte, uncompressedLen)
mapContent := make([]byte, uncompressedLen)
var k int
k, err = io.ReadFull(zr, mapContent)
if err != nil {
return
}
var k int
k, err = io.ReadFull(zr, mapContent)
if err != nil {
return
}
if k != uncompressedLen {
err = ErrMapContentSizeMismatch
return
}
if k != uncompressedLen {
err = ErrMapContentSizeMismatch
return
}
// There is a bug before Go 1.7 that enforces
// to add 4 as an offset after the compressed
// geometry data. This is resolved via build tags
// and definitions in pre17offset.go and
// post17offset.go.
offset += buf.Pos + afterCompressOfs
buf.Pos = 0
// There is a bug before Go 1.7 that enforces
// to add 4 as an offset after the compressed
// geometry data. This is resolved via build tags
// and definitions in pre17offset.go and
// post17offset.go.
offset += buf.Pos + afterCompressOfs
buf.Pos = 0
if offset >= dataLen {
return nil, ErrBlockTruncated
}
buf.Data = data[offset:]
if err = zr.(zlib.Resetter).Reset(&buf, nil); err != nil {
return
}
// Discard the meta data.
if _, err = io.Copy(io.Discard, zr); err != nil {
return
}
offset += buf.Pos
switch {
case version <= 21:
offset += 2
case version == 23:
offset++
case version == 24:
if offset >= dataLen {
return nil, ErrBlockTruncated
}
buf.Data = data[offset:]
if err = zr.(zlib.Resetter).Reset(&buf, nil); err != nil {
return
}
// Discard the meta data.
if _, err = io.Copy(io.Discard, zr); err != nil {
return
}
offset += buf.Pos
switch {
case version <= 21:
offset += 2
case version == 23:
offset++
case version == 24:
if offset >= dataLen {
ver := data[offset]
offset++
if ver == 1 {
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
ver := data[offset]
offset++
if ver == 1 {
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
num := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2 + 10*num
}
num := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2 + 10*num
}
}