mirror of
https://bitbucket.org/s_l_teichmann/mtsatellite
synced 2024-11-14 06:10:22 +01:00
327 lines
7.2 KiB
Go
327 lines
7.2 KiB
Go
// Copyright 2014, 2015 by Sascha L. Teichmann
|
|
// Use of this source code is governed by the MIT license
|
|
// that can be found in the LICENSE file.
|
|
|
|
package common
|
|
|
|
import (
|
|
"bytes"
|
|
"compress/zlib"
|
|
"encoding/binary"
|
|
"errors"
|
|
"io"
|
|
"io/ioutil"
|
|
"log"
|
|
"sync"
|
|
)
|
|
|
|
// Error returned if a Producer has run to its end.
|
|
var (
|
|
ErrNoMoreBlocks = errors.New("No more blocks.")
|
|
ErrMapContentSizeMismatch = errors.New("Content size does not match.")
|
|
ErrBlockTruncated = errors.New("Block is truncated.")
|
|
)
|
|
|
|
const (
|
|
mapBlockSize = 16
|
|
nodeCount = mapBlockSize * mapBlockSize * mapBlockSize
|
|
)
|
|
|
|
type (
|
|
// Block data from Minetest database.
|
|
Block struct {
|
|
Coord Coord
|
|
Data []byte
|
|
}
|
|
// BlockProducer is used to over a existing Minetest database
|
|
// and return its content block by block.
|
|
BlockProducer interface {
|
|
// error is ErrNoMoreBlocks if it run out of blocks.
|
|
Next(*Block) error
|
|
// Closes the open database connections.
|
|
Close() error
|
|
}
|
|
|
|
// BlockConsumer is used to store blocks in a new Minetest database.
|
|
BlockConsumer interface {
|
|
Consume(*Block) error
|
|
// Closes the open database connections.
|
|
Close() error
|
|
}
|
|
|
|
DecodedBlock struct {
|
|
Version byte
|
|
Transparent bool
|
|
MapContent []byte
|
|
AirID int32
|
|
IgnoreID int32
|
|
IndexMap map[int32]int32
|
|
}
|
|
)
|
|
|
|
// zlibEmpty is a minimal zlib stream with zero length.
|
|
// zlib.NewReader needs a valid zlib stream to start with
|
|
// even if Reset is called directly afterwards.
|
|
var zlibEmpty = []byte{
|
|
0x78, 0x9c, 0x00, 0x00,
|
|
0x00, 0xff, 0xff, 0x01,
|
|
0x00, 0x00, 0xff, 0xff,
|
|
0x00, 0x00, 0x00, 0x01}
|
|
|
|
// zlibReaderPool is a pool of zlib Readers to be reused
|
|
// for decoding the compressed parts of database blocks.
|
|
// Decoding blocks relies heavly on zlib decompression.
|
|
// Reusing the internal structures of already allocated
|
|
// zlib readers speeds up the decoding significantly.
|
|
var zlibReaderPool = sync.Pool{
|
|
New: func() interface{} {
|
|
reader, _ := zlib.NewReader(bytes.NewBuffer(zlibEmpty))
|
|
return reader
|
|
},
|
|
}
|
|
|
|
// The content of the map and the meta data are compressed with zlib.
|
|
// Unfortunately the byte length of this two structures are not stored
|
|
// explicitly in the block data. To access the informations behind
|
|
// them (e.g. the node id mappings) we have to count the bytes consumed
|
|
// by the zlib reader and continue our extraction process behind this
|
|
// offset. posBuf implements such a counting reader source.
|
|
type posBuf struct {
|
|
Data []byte
|
|
Pos int
|
|
}
|
|
|
|
func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error) {
|
|
|
|
dataLen := len(data)
|
|
if dataLen < 4 {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
|
|
version := data[0]
|
|
|
|
contentWidth := int(data[2])
|
|
paramsWidth := int(data[3])
|
|
|
|
uncompressedLen := nodeCount * (contentWidth + paramsWidth)
|
|
|
|
offset := 2
|
|
if version >= 22 {
|
|
offset = 4
|
|
}
|
|
|
|
zr := zlibReaderPool.Get().(interface {
|
|
io.ReadCloser
|
|
zlib.Resetter
|
|
})
|
|
defer func() {
|
|
zr.Close() // This sould not be necessary.
|
|
zlibReaderPool.Put(zr)
|
|
}()
|
|
|
|
buf := posBuf{Data: data[offset:]}
|
|
if err = zr.Reset(&buf, nil); err != nil {
|
|
return
|
|
}
|
|
|
|
mapContent := make([]byte, uncompressedLen)
|
|
|
|
var k int
|
|
k, err = io.ReadFull(zr, mapContent)
|
|
if err != nil {
|
|
return
|
|
}
|
|
|
|
if k != uncompressedLen {
|
|
err = ErrMapContentSizeMismatch
|
|
return
|
|
}
|
|
|
|
// There is a bug before Go 1.7 that enforces
|
|
// to add 4 as an offset after the compressed
|
|
// geometry data. This is resolved via build tags
|
|
// and definitions in pre17offset.go and
|
|
// post17offset.go.
|
|
offset += buf.Pos + afterCompressOfs
|
|
buf.Pos = 0
|
|
if offset >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
buf.Data = data[offset:]
|
|
|
|
if err = zr.(zlib.Resetter).Reset(&buf, nil); err != nil {
|
|
return
|
|
}
|
|
|
|
// Discard the meta data.
|
|
if _, err = io.Copy(ioutil.Discard, zr); err != nil {
|
|
return
|
|
}
|
|
|
|
offset += buf.Pos
|
|
|
|
switch {
|
|
case version <= 21:
|
|
offset += 2
|
|
case version == 23:
|
|
offset++
|
|
case version == 24:
|
|
if offset >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
ver := data[offset]
|
|
offset++
|
|
if ver == 1 {
|
|
if offset+1 >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
num := int(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += 2 + 10*num
|
|
}
|
|
}
|
|
|
|
offset++
|
|
if offset+1 >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
numStaticObjects := int(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += 2
|
|
for i := 0; i < numStaticObjects; i++ {
|
|
offset += 13
|
|
if offset+1 >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
dataSize := int(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += dataSize + 2
|
|
}
|
|
offset += 4
|
|
|
|
airID, ignoreID := int32(-1), int32(-1)
|
|
indexMap := make(map[int32]int32)
|
|
var transparent bool
|
|
if version >= 22 {
|
|
offset++
|
|
if offset+1 >= dataLen {
|
|
return nil, ErrBlockTruncated
|
|
}
|
|
numMappings := int(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += 2
|
|
|
|
// Be a bit more tolerant with truncated node name table.
|
|
// We should probably issue an error here, too!?
|
|
const outOfBounds = "Offset in node id table out of bounds. Ignored."
|
|
|
|
for i := 0; i < numMappings; i++ {
|
|
if offset+1 >= dataLen {
|
|
log.Println(outOfBounds)
|
|
break
|
|
}
|
|
nodeID := int32(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += 2
|
|
if offset+1 >= dataLen {
|
|
log.Println(outOfBounds)
|
|
break
|
|
}
|
|
nameLen := int(binary.BigEndian.Uint16(data[offset:]))
|
|
offset += 2
|
|
if offset+nameLen-1 >= dataLen {
|
|
log.Println(outOfBounds)
|
|
break
|
|
}
|
|
name := string(data[offset : offset+nameLen])
|
|
offset += nameLen
|
|
switch name {
|
|
case "air":
|
|
airID = nodeID
|
|
case "ignore":
|
|
ignoreID = nodeID
|
|
default:
|
|
if index, found := colors.NameIndex[name]; found {
|
|
indexMap[nodeID] = index
|
|
if !transparent && colors.IsTransparent(index) {
|
|
transparent = true
|
|
}
|
|
} else {
|
|
logMissing(name)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
db = &DecodedBlock{
|
|
Version: version,
|
|
Transparent: transparent,
|
|
MapContent: mapContent,
|
|
AirID: airID,
|
|
IgnoreID: ignoreID,
|
|
IndexMap: indexMap}
|
|
|
|
return
|
|
}
|
|
|
|
var missingColors = struct {
|
|
sync.Mutex
|
|
cols map[string]struct{}
|
|
}{cols: map[string]struct{}{}}
|
|
|
|
func logMissing(name string) {
|
|
missingColors.Lock()
|
|
defer missingColors.Unlock()
|
|
if _, found := missingColors.cols[name]; !found {
|
|
missingColors.cols[name] = struct{}{}
|
|
log.Printf("Missing color entry for %s.\n", name)
|
|
}
|
|
}
|
|
|
|
func (db *DecodedBlock) AirOnly() bool {
|
|
return db.AirID != -1 && len(db.IndexMap) == 0
|
|
}
|
|
|
|
func (db *DecodedBlock) Content(x, y, z int) (content int32, found bool) {
|
|
pos := z<<8 + y<<4 + x
|
|
|
|
switch {
|
|
case db.Version >= 24:
|
|
pos <<= 1
|
|
content = int32(db.MapContent[pos])<<8 | int32(db.MapContent[pos+1])
|
|
case db.Version >= 20:
|
|
if db.MapContent[pos] <= 0x80 {
|
|
content = int32(db.MapContent[pos])
|
|
} else {
|
|
content = int32(db.MapContent[pos])<<4 | int32(db.MapContent[pos+0x2000])>>4
|
|
}
|
|
default:
|
|
return
|
|
}
|
|
if content != db.AirID && content != db.IgnoreID {
|
|
content, found = db.IndexMap[content]
|
|
}
|
|
return
|
|
}
|
|
|
|
func (pb *posBuf) Read(p []byte) (int, error) {
|
|
pl := len(p)
|
|
ml := len(pb.Data)
|
|
if pb.Pos >= ml {
|
|
return 0, io.EOF
|
|
}
|
|
rest := ml - pb.Pos
|
|
if pl > rest {
|
|
copy(p, pb.Data[pb.Pos:])
|
|
pb.Pos = ml
|
|
return rest, io.EOF
|
|
}
|
|
copy(p, pb.Data[pb.Pos:pb.Pos+pl])
|
|
pb.Pos += pl
|
|
return pl, nil
|
|
}
|
|
|
|
func (pb *posBuf) ReadByte() (byte, error) {
|
|
if pb.Pos >= len(pb.Data) {
|
|
return 0, io.EOF
|
|
}
|
|
c := pb.Data[pb.Pos]
|
|
pb.Pos++
|
|
return c, nil
|
|
}
|