Remplissage du dépôt
This commit is contained in:
102
common/area.go
Normal file
102
common/area.go
Normal file
@ -0,0 +1,102 @@
|
||||
// Copyright 2014, 2015, 2017 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
type Area struct {
|
||||
X1, Z1 int16
|
||||
X2, Z2 int16
|
||||
}
|
||||
|
||||
func (a Area) contains(x, z int16) bool {
|
||||
return x >= a.X1 && x <= a.X2 && z >= a.Z1 && z <= a.Z2
|
||||
}
|
||||
|
||||
func (a Area) higher() bool {
|
||||
return a.Z2-a.Z1 > a.X2-a.X1
|
||||
}
|
||||
|
||||
func areasContain(areas []Area, x, z int16) bool {
|
||||
for _, r := range areas {
|
||||
if r.contains(x, z) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// recalculate implements a greedy algorithm to figure out
|
||||
// a list of disjunct areas of free regions in the domain
|
||||
// to the (x, z) block plane.
|
||||
// oldAreas are searched and found free areas are appended
|
||||
// to newAreas which ist return.
|
||||
// This is useful to spatial query only blocks from db
|
||||
// that are not below already rendered blocks.
|
||||
func (area Area) recalculate(r *Renderer, nareas []Area) []Area {
|
||||
yM := r.yMin
|
||||
|
||||
const ex = 1
|
||||
const ez = 2
|
||||
|
||||
nas := len(nareas)
|
||||
|
||||
for z := area.Z1; z <= area.Z2; z++ {
|
||||
row := z * int16(r.width)
|
||||
for x := area.X1; x <= area.X2; x++ {
|
||||
// Uncovered and not in list of new areas?
|
||||
if yM[row+x] > math.MinInt32 || areasContain(nareas[nas:], x, z) {
|
||||
continue
|
||||
}
|
||||
a := Area{X1: x, Z1: z, X2: x, Z2: z}
|
||||
// Try to extend the area in x and/or z till no further extension is possible.
|
||||
ext:
|
||||
for extend := ex | ez; extend != 0; {
|
||||
// If we extending in both directions a the current area
|
||||
// is higher than wide we gain more block if extend
|
||||
// in the x direction first.
|
||||
if (extend == ex|ez && a.higher()) || extend&ex == ex { // check x
|
||||
nx := a.X2 + 1
|
||||
if nx > area.X2 { // reached border of area
|
||||
extend &= ^ex
|
||||
continue
|
||||
}
|
||||
// Check column right of the current area if its fully free.
|
||||
for nz := a.Z1; nz <= a.Z2; nz++ {
|
||||
if yM[nz*int16(r.width)+nx] > math.MinInt32 ||
|
||||
areasContain(nareas[nas:], nx, nz) {
|
||||
extend &= ^ex
|
||||
continue ext
|
||||
}
|
||||
}
|
||||
// free -> extend
|
||||
a.X2 = nx
|
||||
} else if extend&ez == ez { // check z
|
||||
nz := a.Z2 + 1
|
||||
if nz > area.Z2 {
|
||||
extend &= ^ez
|
||||
continue
|
||||
}
|
||||
// Check line right below the current area if its free.
|
||||
row2 := nz * int16(r.width)
|
||||
for nx := a.X1; nx <= a.X2; nx++ {
|
||||
if yM[row2+nx] > math.MinInt32 ||
|
||||
areasContain(nareas[nas:], nx, nz) {
|
||||
extend &= ^ez
|
||||
continue ext
|
||||
}
|
||||
}
|
||||
// free -> extend
|
||||
a.Z2 = nz
|
||||
}
|
||||
}
|
||||
// At this point the area is extended to max.
|
||||
nareas = append(nareas, a)
|
||||
}
|
||||
}
|
||||
return nareas
|
||||
}
|
206
common/basetilecreator.go
Normal file
206
common/basetilecreator.go
Normal file
@ -0,0 +1,206 @@
|
||||
// Copyright 2014 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"image/color"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
tileWidth = 18
|
||||
tileHeight = 18
|
||||
yOrderCapacity = 512
|
||||
)
|
||||
|
||||
const (
|
||||
MaxHeight = 1934
|
||||
MinHeight = -1934
|
||||
)
|
||||
|
||||
// To scan the whole height in terms of the y coordinate
|
||||
// the database is queried in height units defined in the tileDepths table.
|
||||
var tileDepths = [...][2]int16{
|
||||
{1024, MaxHeight},
|
||||
{256, 1023},
|
||||
{128, 255},
|
||||
{64, 127},
|
||||
{32, 63},
|
||||
{16, 31},
|
||||
{8, 15},
|
||||
{4, 7},
|
||||
{2, 3},
|
||||
{0, 1},
|
||||
{-1, 0},
|
||||
{-4, -2},
|
||||
{-8, -5},
|
||||
{-16, -9},
|
||||
{-32, -17},
|
||||
{-64, -33},
|
||||
{-128, -65},
|
||||
{-256, -129},
|
||||
{-1024, -257},
|
||||
{MinHeight, -1025}}
|
||||
|
||||
var BackgroundColor = color.RGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}
|
||||
|
||||
type BaseTileUpdateFunc func(x, y int, hash []byte) bool
|
||||
|
||||
type BaseTileCreator struct {
|
||||
client *RedisClient
|
||||
colors *Colors
|
||||
renderer *Renderer
|
||||
yOrder *YOrder
|
||||
yMin int16
|
||||
yMax int16
|
||||
baseDir string
|
||||
emptyImage []byte
|
||||
bg color.RGBA
|
||||
}
|
||||
|
||||
func NewBaseTileCreator(
|
||||
client *RedisClient,
|
||||
colors *Colors,
|
||||
bg color.RGBA,
|
||||
yMin, yMax int16,
|
||||
transparent bool,
|
||||
baseDir string) *BaseTileCreator {
|
||||
|
||||
renderer := NewRenderer(tileWidth, tileHeight, transparent)
|
||||
yMin, yMax = Order16(yMin, yMax)
|
||||
btc := &BaseTileCreator{
|
||||
client: client,
|
||||
colors: colors,
|
||||
bg: bg,
|
||||
renderer: renderer,
|
||||
yMin: yMin,
|
||||
yMax: yMax,
|
||||
baseDir: baseDir,
|
||||
}
|
||||
btc.yOrder = NewYOrder(btc.renderBlock, yOrderCapacity)
|
||||
return btc
|
||||
}
|
||||
|
||||
func (btc *BaseTileCreator) Close() error {
|
||||
return btc.client.Close()
|
||||
}
|
||||
|
||||
// renderBlock is a callback to draw a block with a YOrder.
|
||||
func (btc *BaseTileCreator) renderBlock(block *Block) error {
|
||||
return btc.renderer.RenderBlock(block, btc.colors)
|
||||
}
|
||||
|
||||
// blockLoaded is a callback for RedisClient.QueryCuboid.
|
||||
func (btc *BaseTileCreator) blockLoaded(block *Block) *Block {
|
||||
block, err := btc.yOrder.RenderBlock(block)
|
||||
if err != nil {
|
||||
log.Printf("WARN: rendering block failed: %s\n", err)
|
||||
}
|
||||
return block
|
||||
}
|
||||
|
||||
func (btc *BaseTileCreator) RenderArea(x, z int16) error {
|
||||
btc.renderer.Reset()
|
||||
btc.renderer.SetPos(x, z)
|
||||
btc.yOrder.Reset()
|
||||
|
||||
var c1, c2 Coord
|
||||
|
||||
nareas := make([]Area, 0, tileWidth*tileHeight/2)
|
||||
areas := make([]Area, 1, tileWidth*tileHeight/2)
|
||||
|
||||
areas[0] = Area{
|
||||
X1: 0, Z1: 0,
|
||||
X2: int16(tileWidth) - 1, Z2: int16(tileHeight) - 1}
|
||||
|
||||
for _, yRange := range tileDepths {
|
||||
if yRange[0] > btc.yMax || yRange[1] < btc.yMin {
|
||||
continue
|
||||
}
|
||||
|
||||
c1.Y = max16(yRange[0], btc.yMin)
|
||||
c2.Y = min16(yRange[1], btc.yMax)
|
||||
|
||||
for _, area := range areas {
|
||||
c1.X = area.X1 + x
|
||||
c1.Z = area.Z1 + z
|
||||
c2.X = area.X2 + x
|
||||
c2.Z = area.Z2 + z
|
||||
query := Cuboid{P1: c1, P2: c2}
|
||||
var count int
|
||||
var err error
|
||||
if count, err = btc.client.QueryCuboid(query, btc.blockLoaded); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = btc.yOrder.Drain(); err != nil {
|
||||
log.Printf("WARN: rendering block failed: %s\n", err)
|
||||
}
|
||||
|
||||
// If there where loaded blocks in this area recalculate coverage.
|
||||
if count > 0 {
|
||||
nareas = area.recalculate(btc.renderer, nareas)
|
||||
} else {
|
||||
nareas = append(nareas, area)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nareas) == 0 {
|
||||
break
|
||||
}
|
||||
areas, nareas = nareas, areas[:0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (btc *BaseTileCreator) blankImage() []byte {
|
||||
// To avoid redundant encoding cache the resulting empty image.
|
||||
if btc.emptyImage == nil {
|
||||
m := BackgroundImage((tileWidth-2)*16, (tileHeight-2)*16, btc.bg)
|
||||
btc.emptyImage = EncodeToMem(m)
|
||||
}
|
||||
return btc.emptyImage
|
||||
}
|
||||
|
||||
// WriteFunc returns a function intended to be run in background so
|
||||
// the creation of the next tile with this creator can be done
|
||||
// concurrently.
|
||||
func (btc *BaseTileCreator) WriteFunc(i, j int, update BaseTileUpdateFunc) func() (bool, error) {
|
||||
|
||||
path := filepath.Join(btc.baseDir, strconv.Itoa(i), strconv.Itoa(j)+".png")
|
||||
|
||||
// Empty images are likely to be produced during seeding.
|
||||
if update == nil && btc.renderer.IsEmpty() {
|
||||
return func() (bool, error) {
|
||||
//log.Printf("Writing empty (%d, %d) to file %s\n", x, z, path)
|
||||
return true, ioutil.WriteFile(path, btc.blankImage(), 0666)
|
||||
}
|
||||
}
|
||||
|
||||
image := btc.renderer.CreateShadedImage(
|
||||
16, 16, (tileWidth-2)*16, (tileHeight-2)*16,
|
||||
btc.colors, btc.bg)
|
||||
|
||||
x, z := btc.renderer.GetPos()
|
||||
|
||||
if update == nil {
|
||||
return func() (bool, error) {
|
||||
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
|
||||
return true, SaveAsPNG(path, image)
|
||||
}
|
||||
}
|
||||
|
||||
return func() (bool, error) {
|
||||
if update(i, j, HashImage(image)) {
|
||||
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
|
||||
return true, SaveAsPNGAtomic(path, image)
|
||||
}
|
||||
|
||||
log.Printf("(%d, %d) is unchanged.\n", x, z)
|
||||
return false, nil
|
||||
}
|
||||
}
|
92
common/basetilehash.go
Normal file
92
common/basetilehash.go
Normal file
@ -0,0 +1,92 @@
|
||||
// Copyright 2016 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type btKey struct {
|
||||
x int
|
||||
y int
|
||||
}
|
||||
|
||||
type btHashEntry struct {
|
||||
prev *btHashEntry
|
||||
next *btHashEntry
|
||||
hash []byte
|
||||
btKey
|
||||
}
|
||||
|
||||
type BaseTileHash struct {
|
||||
hashes map[btKey]*btHashEntry
|
||||
maxEntries int
|
||||
root btHashEntry
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewBaseTileHash(maxEntries int) *BaseTileHash {
|
||||
bth := &BaseTileHash{
|
||||
hashes: map[btKey]*btHashEntry{},
|
||||
maxEntries: maxEntries}
|
||||
bth.root.next = &bth.root
|
||||
bth.root.prev = &bth.root
|
||||
return bth
|
||||
}
|
||||
|
||||
func (bth *BaseTileHash) toFront(entry *btHashEntry) {
|
||||
if bth.root.next == entry {
|
||||
return
|
||||
}
|
||||
entry.prev.next = entry.next
|
||||
entry.next.prev = entry.prev
|
||||
|
||||
entry.next = bth.root.next
|
||||
entry.prev = &bth.root
|
||||
|
||||
bth.root.next.prev = entry
|
||||
bth.root.next = entry
|
||||
}
|
||||
|
||||
func (bth *BaseTileHash) removeLast() *btHashEntry {
|
||||
last := bth.root.prev
|
||||
bth.root.prev = last.prev
|
||||
last.prev.next = &bth.root
|
||||
delete(bth.hashes, last.btKey)
|
||||
return last
|
||||
}
|
||||
|
||||
func (bth *BaseTileHash) insertFront(entry *btHashEntry) {
|
||||
entry.next = bth.root.next
|
||||
entry.prev = &bth.root
|
||||
bth.root.next.prev = entry
|
||||
bth.root.next = entry
|
||||
}
|
||||
|
||||
func (bth *BaseTileHash) Update(x, y int, hash []byte) bool {
|
||||
bth.Lock()
|
||||
defer bth.Unlock()
|
||||
key := btKey{x, y}
|
||||
if old, found := bth.hashes[key]; found {
|
||||
if !bytes.Equal(old.hash, hash) {
|
||||
old.hash = hash
|
||||
bth.toFront(old)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
var entry *btHashEntry
|
||||
if len(bth.hashes) >= bth.maxEntries {
|
||||
entry = bth.removeLast()
|
||||
} else {
|
||||
entry = new(btHashEntry)
|
||||
}
|
||||
entry.btKey = key
|
||||
entry.hash = hash
|
||||
bth.hashes[key] = entry
|
||||
bth.insertFront(entry)
|
||||
return true
|
||||
}
|
147
common/basetilehash_test.go
Normal file
147
common/basetilehash_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright 2016 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func randomBaseTileHash(updates int) *BaseTileHash {
|
||||
bth := NewBaseTileHash(256)
|
||||
h1 := []byte{1}
|
||||
h2 := []byte{2}
|
||||
for i := 0; i < updates; i++ {
|
||||
x, y := rand.Intn(100), rand.Intn(100)
|
||||
var h []byte
|
||||
if i%2 == 0 {
|
||||
h = h1
|
||||
} else {
|
||||
h = h2
|
||||
}
|
||||
bth.Update(x, y, h)
|
||||
}
|
||||
return bth
|
||||
}
|
||||
|
||||
func TestBaseTileHashLenList(t *testing.T) {
|
||||
for _, updates := range []int{53, 111, 1345, 11261} {
|
||||
bth := randomBaseTileHash(updates)
|
||||
countNext := 0
|
||||
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
|
||||
countNext++
|
||||
}
|
||||
countPrev := 0
|
||||
for cur := bth.root.prev; cur != &bth.root; cur = cur.prev {
|
||||
countPrev++
|
||||
}
|
||||
if countPrev != countNext {
|
||||
t.Errorf("count prev != count next: %d %d", countPrev, countNext)
|
||||
}
|
||||
if countPrev != len(bth.hashes) {
|
||||
t.Errorf("count prev != len(hash): %d %d", countPrev, len(bth.hashes))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBaseTileHashIntegrity(t *testing.T) {
|
||||
for _, updates := range []int{10, 100, 1000, 10000} {
|
||||
bth := randomBaseTileHash(updates)
|
||||
entries := map[*btHashEntry]bool{}
|
||||
|
||||
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
|
||||
if entries[cur] {
|
||||
t.Errorf("hash element found more than once: %d", updates)
|
||||
}
|
||||
entries[cur] = true
|
||||
}
|
||||
if len(entries) != len(bth.hashes) {
|
||||
t.Errorf("List has differnt length than hashes: %d : %d",
|
||||
len(entries), len(bth.hashes))
|
||||
}
|
||||
var already1 bool
|
||||
var already2 bool
|
||||
for k, v := range bth.hashes {
|
||||
if !entries[v] {
|
||||
if !already1 {
|
||||
already1 = true
|
||||
t.Errorf("Hash contains pointer to element not being in list: %d",
|
||||
updates)
|
||||
}
|
||||
}
|
||||
if k != v.btKey {
|
||||
if !already2 {
|
||||
already2 = true
|
||||
t.Errorf("Key in entry does not match hash key: %d", updates)
|
||||
}
|
||||
}
|
||||
delete(entries, v)
|
||||
}
|
||||
|
||||
if len(entries) > 0 {
|
||||
t.Error("There are more entries than indexed by hash")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBaseTileHashOverwrite(t *testing.T) {
|
||||
bth := NewBaseTileHash(256)
|
||||
h1 := []byte{1}
|
||||
h2 := []byte{2}
|
||||
|
||||
if updated := bth.Update(0, 0, h1); !updated {
|
||||
t.Error("First insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(0, 0, h2); !updated {
|
||||
t.Error("Second insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(0, 0, h2); updated {
|
||||
t.Error("Third insert does trigger update")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBaseTileHashSeparate(t *testing.T) {
|
||||
bth := NewBaseTileHash(256)
|
||||
h1 := []byte{1}
|
||||
|
||||
if updated := bth.Update(0, 0, h1); !updated {
|
||||
t.Error("First insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(0, 1, h1); !updated {
|
||||
t.Error("Second insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(1, 0, h1); !updated {
|
||||
t.Error("Third insert does trigger update")
|
||||
}
|
||||
|
||||
if len(bth.hashes) != 3 {
|
||||
t.Errorf("Expected size to be 3. Current size: %d", len(bth.hashes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestBaseTileHashLRU(t *testing.T) {
|
||||
bth := NewBaseTileHash(2)
|
||||
h1 := []byte{1}
|
||||
|
||||
if updated := bth.Update(0, 0, h1); !updated {
|
||||
t.Error("First insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(0, 1, h1); !updated {
|
||||
t.Error("Second insert does not trigger update")
|
||||
}
|
||||
|
||||
if updated := bth.Update(1, 0, h1); !updated {
|
||||
t.Error("Third insert does trigger update")
|
||||
}
|
||||
|
||||
if len(bth.hashes) != 2 {
|
||||
t.Errorf("Expected size to be 2. Current size: %d", len(bth.hashes))
|
||||
}
|
||||
}
|
331
common/block.go
Normal file
331
common/block.go
Normal file
@ -0,0 +1,331 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Error returned if a Producer has run to its end.
|
||||
var (
|
||||
ErrNoMoreBlocks = errors.New("No more blocks.")
|
||||
ErrMapContentSizeMismatch = errors.New("Content size does not match.")
|
||||
ErrBlockTruncated = errors.New("Block is truncated.")
|
||||
)
|
||||
|
||||
const (
|
||||
mapBlockSize = 16
|
||||
nodeCount = mapBlockSize * mapBlockSize * mapBlockSize
|
||||
)
|
||||
|
||||
type (
|
||||
// Block data from Minetest database.
|
||||
Block struct {
|
||||
Coord Coord
|
||||
Data []byte
|
||||
}
|
||||
// BlockProducer is used to over a existing Minetest database
|
||||
// and return its content block by block.
|
||||
BlockProducer interface {
|
||||
// error is ErrNoMoreBlocks if it run out of blocks.
|
||||
Next(*Block) error
|
||||
// Closes the open database connections.
|
||||
Close() error
|
||||
}
|
||||
|
||||
// BlockConsumer is used to store blocks in a new Minetest database.
|
||||
BlockConsumer interface {
|
||||
Consume(*Block) error
|
||||
// Closes the open database connections.
|
||||
Close() error
|
||||
}
|
||||
|
||||
DecodedBlock struct {
|
||||
Version byte
|
||||
Transparent bool
|
||||
MapContent []byte
|
||||
AirID int32
|
||||
IgnoreID int32
|
||||
IndexMap map[int32]int32
|
||||
}
|
||||
)
|
||||
|
||||
// zlibEmpty is a minimal zlib stream with zero length.
|
||||
// zlib.NewReader needs a valid zlib stream to start with
|
||||
// even if Reset is called directly afterwards.
|
||||
var zlibEmpty = []byte{
|
||||
0x78, 0x9c, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0x01,
|
||||
0x00, 0x00, 0xff, 0xff,
|
||||
0x00, 0x00, 0x00, 0x01}
|
||||
|
||||
// zlibReaderPool is a pool of zlib Readers to be reused
|
||||
// for decoding the compressed parts of database blocks.
|
||||
// Decoding blocks relies heavly on zlib decompression.
|
||||
// Reusing the internal structures of already allocated
|
||||
// zlib readers speeds up the decoding significantly.
|
||||
var zlibReaderPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
reader, _ := zlib.NewReader(bytes.NewBuffer(zlibEmpty))
|
||||
return reader
|
||||
},
|
||||
}
|
||||
|
||||
// The content of the map and the meta data are compressed with zlib.
|
||||
// Unfortunately the byte length of this two structures are not stored
|
||||
// explicitly in the block data. To access the informations behind
|
||||
// them (e.g. the node id mappings) we have to count the bytes consumed
|
||||
// by the zlib reader and continue our extraction process behind this
|
||||
// offset. posBuf implements such a counting reader source.
|
||||
type posBuf struct {
|
||||
Data []byte
|
||||
Pos int
|
||||
}
|
||||
|
||||
func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error) {
|
||||
|
||||
dataLen := len(data)
|
||||
if dataLen < 4 {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
|
||||
version := data[0]
|
||||
|
||||
contentWidth := Min(int(data[2]), 2)
|
||||
paramsWidth := Min(int(data[3]), 2)
|
||||
|
||||
uncompressedLen := nodeCount * (contentWidth + paramsWidth)
|
||||
|
||||
var offset int
|
||||
switch {
|
||||
case version >= 27:
|
||||
offset = 6
|
||||
case version >= 22:
|
||||
offset = 4
|
||||
default:
|
||||
offset = 2
|
||||
}
|
||||
|
||||
zr := zlibReaderPool.Get().(interface {
|
||||
io.ReadCloser
|
||||
zlib.Resetter
|
||||
})
|
||||
defer func() {
|
||||
zr.Close() // This sould not be necessary.
|
||||
zlibReaderPool.Put(zr)
|
||||
}()
|
||||
|
||||
buf := posBuf{Data: data[offset:]}
|
||||
if err = zr.Reset(&buf, nil); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
mapContent := make([]byte, uncompressedLen)
|
||||
|
||||
var k int
|
||||
k, err = io.ReadFull(zr, mapContent)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if k != uncompressedLen {
|
||||
err = ErrMapContentSizeMismatch
|
||||
return
|
||||
}
|
||||
|
||||
// There is a bug before Go 1.7 that enforces
|
||||
// to add 4 as an offset after the compressed
|
||||
// geometry data. This is resolved via build tags
|
||||
// and definitions in pre17offset.go and
|
||||
// post17offset.go.
|
||||
offset += buf.Pos + afterCompressOfs
|
||||
buf.Pos = 0
|
||||
if offset >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
buf.Data = data[offset:]
|
||||
|
||||
if err = zr.(zlib.Resetter).Reset(&buf, nil); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Discard the meta data.
|
||||
if _, err = io.Copy(ioutil.Discard, zr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
offset += buf.Pos
|
||||
|
||||
switch {
|
||||
case version <= 21:
|
||||
offset += 2
|
||||
case version == 23:
|
||||
offset++
|
||||
case version == 24:
|
||||
if offset >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
ver := data[offset]
|
||||
offset++
|
||||
if ver == 1 {
|
||||
if offset+1 >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
num := int(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += 2 + 10*num
|
||||
}
|
||||
}
|
||||
|
||||
offset++
|
||||
if offset+1 >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
numStaticObjects := int(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += 2
|
||||
for i := 0; i < numStaticObjects; i++ {
|
||||
offset += 13
|
||||
if offset+1 >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
dataSize := int(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += dataSize + 2
|
||||
}
|
||||
offset += 4
|
||||
|
||||
airID, ignoreID := int32(-1), int32(-1)
|
||||
indexMap := make(map[int32]int32)
|
||||
var transparent bool
|
||||
if version >= 22 {
|
||||
offset++
|
||||
if offset+1 >= dataLen {
|
||||
return nil, ErrBlockTruncated
|
||||
}
|
||||
numMappings := int(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += 2
|
||||
|
||||
// Be a bit more tolerant with truncated node name table.
|
||||
// We should probably issue an error here, too!?
|
||||
const outOfBounds = "Offset in node id table out of bounds. Ignored."
|
||||
|
||||
for i := 0; i < numMappings; i++ {
|
||||
if offset+1 >= dataLen {
|
||||
log.Println(outOfBounds)
|
||||
break
|
||||
}
|
||||
nodeID := int32(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += 2
|
||||
if offset+1 >= dataLen {
|
||||
log.Println(outOfBounds)
|
||||
break
|
||||
}
|
||||
nameLen := int(binary.BigEndian.Uint16(data[offset:]))
|
||||
offset += 2
|
||||
if offset+nameLen-1 >= dataLen {
|
||||
log.Println(outOfBounds)
|
||||
break
|
||||
}
|
||||
name := string(data[offset : offset+nameLen])
|
||||
offset += nameLen
|
||||
switch name {
|
||||
case "air":
|
||||
airID = nodeID
|
||||
case "ignore":
|
||||
ignoreID = nodeID
|
||||
default:
|
||||
if index, found := colors.NameIndex[name]; found {
|
||||
indexMap[nodeID] = index
|
||||
if !transparent && colors.IsTransparent(index) {
|
||||
transparent = true
|
||||
}
|
||||
} else {
|
||||
logMissing(name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
db = &DecodedBlock{
|
||||
Version: version,
|
||||
Transparent: transparent,
|
||||
MapContent: mapContent,
|
||||
AirID: airID,
|
||||
IgnoreID: ignoreID,
|
||||
IndexMap: indexMap}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
var missingColors = struct {
|
||||
sync.Mutex
|
||||
cols map[string]struct{}
|
||||
}{cols: map[string]struct{}{}}
|
||||
|
||||
func logMissing(name string) {
|
||||
missingColors.Lock()
|
||||
defer missingColors.Unlock()
|
||||
if _, found := missingColors.cols[name]; !found {
|
||||
missingColors.cols[name] = struct{}{}
|
||||
log.Printf("Missing color entry for %s.\n", name)
|
||||
}
|
||||
}
|
||||
|
||||
func (db *DecodedBlock) AirOnly() bool {
|
||||
return db.AirID != -1 && len(db.IndexMap) == 0
|
||||
}
|
||||
|
||||
func (db *DecodedBlock) Content(x, y, z int) (content int32, found bool) {
|
||||
pos := z<<8 + y<<4 + x
|
||||
|
||||
switch {
|
||||
case db.Version >= 24:
|
||||
pos <<= 1
|
||||
content = int32(db.MapContent[pos])<<8 | int32(db.MapContent[pos+1])
|
||||
case db.Version >= 20:
|
||||
if c := db.MapContent[pos]; c <= 0x80 {
|
||||
content = int32(c)
|
||||
} else {
|
||||
content = int32(c)<<4 | int32(db.MapContent[pos+0x2000])>>4
|
||||
}
|
||||
default:
|
||||
return
|
||||
}
|
||||
if content != db.AirID && content != db.IgnoreID {
|
||||
content, found = db.IndexMap[content]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pb *posBuf) Read(p []byte) (int, error) {
|
||||
pl := len(p)
|
||||
ml := len(pb.Data)
|
||||
if pb.Pos >= ml {
|
||||
return 0, io.EOF
|
||||
}
|
||||
rest := ml - pb.Pos
|
||||
if pl > rest {
|
||||
copy(p, pb.Data[pb.Pos:])
|
||||
pb.Pos = ml
|
||||
return rest, io.EOF
|
||||
}
|
||||
copy(p, pb.Data[pb.Pos:pb.Pos+pl])
|
||||
pb.Pos += pl
|
||||
return pl, nil
|
||||
}
|
||||
|
||||
func (pb *posBuf) ReadByte() (byte, error) {
|
||||
if pb.Pos >= len(pb.Data) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
c := pb.Data[pb.Pos]
|
||||
pb.Pos++
|
||||
return c, nil
|
||||
}
|
152
common/colors.go
Normal file
152
common/colors.go
Normal file
@ -0,0 +1,152 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"image/color"
|
||||
"log"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DefaultTransparentDim sets the default dimming
|
||||
// factor of transparent nodes to 2%.
|
||||
const DefaultTransparentDim = 2.0 / 100.0
|
||||
|
||||
type Colors struct {
|
||||
Colors []color.RGBA
|
||||
NameIndex map[string]int32
|
||||
NumTransparent int32
|
||||
TransparentDim float32
|
||||
}
|
||||
|
||||
type namedColor struct {
|
||||
name string
|
||||
color color.RGBA
|
||||
}
|
||||
|
||||
type sortByAlpha []namedColor
|
||||
|
||||
func (colors sortByAlpha) Less(i, j int) bool {
|
||||
return colors[i].color.A < colors[j].color.A
|
||||
}
|
||||
|
||||
func (colors sortByAlpha) Len() int {
|
||||
return len(colors)
|
||||
}
|
||||
|
||||
func (colors sortByAlpha) Swap(i, j int) {
|
||||
colors[i], colors[j] = colors[j], colors[i]
|
||||
}
|
||||
|
||||
func ParseColors(filename string) (colors *Colors, err error) {
|
||||
|
||||
var file *os.File
|
||||
if file, err = os.Open(filename); err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
cols := make([]namedColor, 0, 2200)
|
||||
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.HasPrefix(line, "#") {
|
||||
continue
|
||||
}
|
||||
c := color.RGBA{A: 0xff}
|
||||
var name string
|
||||
if n, _ := fmt.Sscanf(
|
||||
line, "%s %d %d %d %d", &name, &c.R, &c.G, &c.B, &c.A); n > 0 {
|
||||
cols = append(cols, namedColor{name: name, color: c})
|
||||
}
|
||||
}
|
||||
err = scanner.Err()
|
||||
|
||||
// Sort transparent colors to front. Makes it easier to figure out
|
||||
// if an index corresponds to a transparent color (i < Transparent).
|
||||
sort.Sort(sortByAlpha(cols))
|
||||
|
||||
cs := make([]color.RGBA, len(cols))
|
||||
nameIndex := make(map[string]int32, len(cols))
|
||||
|
||||
numTransparent := int32(0)
|
||||
for i, nc := range cols {
|
||||
if nc.color.A < 0xff {
|
||||
numTransparent++
|
||||
}
|
||||
cs[i] = nc.color
|
||||
nameIndex[nc.name] = int32(i)
|
||||
}
|
||||
colors = &Colors{
|
||||
Colors: cs,
|
||||
NameIndex: nameIndex,
|
||||
NumTransparent: numTransparent,
|
||||
TransparentDim: DefaultTransparentDim}
|
||||
return
|
||||
}
|
||||
|
||||
func (colors *Colors) IsTransparent(index int32) bool {
|
||||
return index < colors.NumTransparent
|
||||
}
|
||||
|
||||
func BlendColor(c1, c2 color.RGBA, a float32) color.RGBA {
|
||||
b := float32(1) - a
|
||||
return color.RGBA{
|
||||
R: uint8(float32(c1.R)*a + float32(c2.R)*b),
|
||||
G: uint8(float32(c1.G)*a + float32(c2.G)*b),
|
||||
B: uint8(float32(c1.B)*a + float32(c2.B)*b),
|
||||
A: 0xff}
|
||||
}
|
||||
|
||||
func (colors *Colors) BlendColors(span *Span, col color.RGBA, pos int32) color.RGBA {
|
||||
curr := span
|
||||
// Ignore colors below pos.
|
||||
for ; curr != nil && pos >= curr.To; curr = curr.Next {
|
||||
}
|
||||
if curr == nil {
|
||||
return col
|
||||
}
|
||||
dim := colors.TransparentDim
|
||||
for ; curr != nil; curr = curr.Next {
|
||||
c := colors.Colors[curr.Value]
|
||||
// At least alpha channel attenuation + dim% extra for each depth meter.
|
||||
base := float32(c.A) / 255.0
|
||||
factor := min32f(1.0, base+float32(curr.To-curr.From)*dim)
|
||||
col = BlendColor(c, col, factor)
|
||||
}
|
||||
return col
|
||||
}
|
||||
|
||||
func ParseColor(col string) (color.RGBA, error) {
|
||||
col = strings.TrimLeft(col, "#")
|
||||
rgb, err := strconv.ParseUint(col, 16, 32)
|
||||
if err != nil {
|
||||
return color.RGBA{}, err
|
||||
}
|
||||
return color.RGBA{
|
||||
R: uint8(rgb >> 16),
|
||||
G: uint8(rgb >> 8),
|
||||
B: uint8(rgb),
|
||||
A: 0xff}, nil
|
||||
}
|
||||
|
||||
func ParseColorDefault(col string, def color.RGBA) color.RGBA {
|
||||
c, err := ParseColor(col)
|
||||
if err != nil {
|
||||
log.Printf("WARN: cannot parse color '%s': %s\n", col, err)
|
||||
return def
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
func ColorToHex(col color.RGBA) string {
|
||||
return fmt.Sprintf("#%02x%02x%02x", col.R, col.G, col.B)
|
||||
}
|
311
common/coords.go
Normal file
311
common/coords.go
Normal file
@ -0,0 +1,311 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
const (
|
||||
numBitsPerComponent = 12
|
||||
modulo = 1 << numBitsPerComponent
|
||||
maxPositive = modulo / 2
|
||||
minValue = -1 << (numBitsPerComponent - 1)
|
||||
maxValue = 1<<(numBitsPerComponent-1) - 1
|
||||
)
|
||||
|
||||
type (
|
||||
Coord struct {
|
||||
X, Y, Z int16
|
||||
}
|
||||
|
||||
Cuboid struct {
|
||||
P1, P2 Coord
|
||||
}
|
||||
|
||||
KeyTransformer func(int64) int64
|
||||
KeyEncoder func(int64) ([]byte, error)
|
||||
KeyDecoder func([]byte) (int64, error)
|
||||
KeyTranscoder func([]byte) ([]byte, error)
|
||||
KeySplitter func(int64) Coord
|
||||
KeyJoiner func(Coord) int64
|
||||
)
|
||||
|
||||
func (cub Cuboid) Contains(c Coord) bool {
|
||||
return c.X >= cub.P1.X && c.X <= cub.P2.X &&
|
||||
c.Y >= cub.P1.Y && c.Y <= cub.P2.Y &&
|
||||
c.Z >= cub.P1.Z && c.Z <= cub.P2.Z
|
||||
}
|
||||
|
||||
func (c Coord) String() string {
|
||||
return fmt.Sprintf("(%d, %d, %d)", c.X, c.Y, c.Z)
|
||||
}
|
||||
|
||||
func clipComponent(x int16) int16 {
|
||||
if x < minValue {
|
||||
return minValue
|
||||
}
|
||||
if x > maxValue {
|
||||
return maxValue
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func ClipCoord(c Coord) Coord {
|
||||
return Coord{
|
||||
X: clipComponent(c.X),
|
||||
Y: clipComponent(c.Y),
|
||||
Z: clipComponent(c.Z)}
|
||||
}
|
||||
|
||||
func MinCoord(a, b Coord) Coord {
|
||||
return Coord{
|
||||
X: min16(a.X, b.X),
|
||||
Y: min16(a.Y, b.Y),
|
||||
Z: min16(a.Z, b.Z)}
|
||||
}
|
||||
|
||||
func MaxCoord(a, b Coord) Coord {
|
||||
return Coord{
|
||||
X: max16(a.X, b.X),
|
||||
Y: max16(a.Y, b.Y),
|
||||
Z: max16(a.Z, b.Z)}
|
||||
}
|
||||
|
||||
// DecodeStringFromBytes constructs a database key out of byte slice.
|
||||
func DecodeStringFromBytes(key []byte) (pos int64, err error) {
|
||||
return strconv.ParseInt(string(key), 10, 64)
|
||||
}
|
||||
|
||||
func keyToBytes(key int64, buf []byte) []byte {
|
||||
return strconv.AppendInt(buf, key, 10)
|
||||
}
|
||||
|
||||
func StringToBytes(key int64) []byte {
|
||||
return strconv.AppendInt(nil, key, 10)
|
||||
}
|
||||
|
||||
// EncodeStringToBytes encodes a block pos to byte slice.
|
||||
func EncodeStringToBytes(key int64) ([]byte, error) {
|
||||
return StringToBytes(key), nil
|
||||
}
|
||||
|
||||
func ToBigEndian(key int64) []byte {
|
||||
enc := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(enc, uint64(key))
|
||||
return enc
|
||||
}
|
||||
|
||||
func EncodeToBigEndian(key int64) ([]byte, error) {
|
||||
return ToBigEndian(key), nil
|
||||
}
|
||||
|
||||
func FromBigEndian(key []byte) int64 {
|
||||
return int64(binary.BigEndian.Uint64(key))
|
||||
}
|
||||
|
||||
func DecodeFromBigEndian(key []byte) (int64, error) {
|
||||
return FromBigEndian(key), nil
|
||||
}
|
||||
|
||||
func CoordToInterleaved(c Coord) (result int64) {
|
||||
const end = 1 << (numBitsPerComponent + 1)
|
||||
x := c.X - minValue
|
||||
y := c.Y - minValue
|
||||
z := c.Z - minValue
|
||||
setmask := int64(1)
|
||||
for mask := int16(1); mask != end; mask <<= 1 {
|
||||
if x&mask != 0 {
|
||||
result |= setmask
|
||||
}
|
||||
setmask <<= 1
|
||||
if y&mask != 0 {
|
||||
result |= setmask
|
||||
}
|
||||
setmask <<= 1
|
||||
if z&mask != 0 {
|
||||
result |= setmask
|
||||
}
|
||||
setmask <<= 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func InterleavedToCoord(pos int64) Coord {
|
||||
const end = 1 << (numBitsPerComponent + 1)
|
||||
var x, y, z int16
|
||||
for mask := int16(1); mask != end; mask <<= 1 {
|
||||
if pos&1 == 1 {
|
||||
x |= mask
|
||||
}
|
||||
pos >>= 1
|
||||
if pos&1 == 1 {
|
||||
y |= mask
|
||||
}
|
||||
pos >>= 1
|
||||
if pos&1 == 1 {
|
||||
z |= mask
|
||||
}
|
||||
pos >>= 1
|
||||
}
|
||||
return Coord{X: x + minValue, Y: y + minValue, Z: z + minValue}
|
||||
}
|
||||
|
||||
func CoordToPlain(c Coord) int64 {
|
||||
return int64(c.Z)<<(2*numBitsPerComponent) +
|
||||
int64(c.Y)<<numBitsPerComponent +
|
||||
int64(c.X)
|
||||
}
|
||||
|
||||
func unsignedToSigned(i int16) int16 {
|
||||
if i < maxPositive {
|
||||
return i
|
||||
}
|
||||
return i - maxPositive*2
|
||||
}
|
||||
|
||||
// To match C++ code.
|
||||
func pythonModulo(i int16) int16 {
|
||||
const mask = modulo - 1
|
||||
if i >= 0 {
|
||||
return i & mask
|
||||
}
|
||||
return modulo - -i&mask
|
||||
}
|
||||
|
||||
func PlainToCoord(i int64) (c Coord) {
|
||||
c.X = unsignedToSigned(pythonModulo(int16(i)))
|
||||
i = (i - int64(c.X)) >> numBitsPerComponent
|
||||
c.Y = unsignedToSigned(pythonModulo(int16(i)))
|
||||
i = (i - int64(c.Y)) >> numBitsPerComponent
|
||||
c.Z = unsignedToSigned(pythonModulo(int16(i)))
|
||||
return
|
||||
}
|
||||
|
||||
func TransformPlainToInterleaved(pos int64) int64 {
|
||||
return CoordToInterleaved(PlainToCoord(pos))
|
||||
}
|
||||
|
||||
func TransformInterleavedToPlain(pos int64) int64 {
|
||||
return CoordToPlain(InterleavedToCoord(pos))
|
||||
}
|
||||
|
||||
func DecodeStringFromBytesToInterleaved(key []byte) (v int64, err error) {
|
||||
if v, err = DecodeStringFromBytes(key); err != nil {
|
||||
return
|
||||
}
|
||||
v = TransformPlainToInterleaved(v)
|
||||
return
|
||||
}
|
||||
|
||||
func DecodeStringBytesToCoord(key []byte) (coord Coord, err error) {
|
||||
var k int64
|
||||
if k, err = DecodeStringFromBytes(key); err != nil {
|
||||
return
|
||||
}
|
||||
coord = PlainToCoord(k)
|
||||
return
|
||||
}
|
||||
|
||||
func EncodeStringToBytesFromInterleaved(key int64) ([]byte, error) {
|
||||
return EncodeStringToBytes(TransformInterleavedToPlain(key))
|
||||
}
|
||||
|
||||
func IdentityTranscoder(key []byte) ([]byte, error) {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func TranscodePlainToInterleaved(key []byte) ([]byte, error) {
|
||||
pos, err := DecodeStringFromBytesToInterleaved(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return EncodeToBigEndian(pos)
|
||||
}
|
||||
|
||||
func TranscodeInterleavedToPlain(key []byte) ([]byte, error) {
|
||||
pos, err := DecodeFromBigEndian(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return EncodeStringToBytes(TransformInterleavedToPlain(pos))
|
||||
}
|
||||
|
||||
// NaiveBigMin is for correctness checks of BigMin only.
|
||||
func NaiveBigMin(minz, maxz, zcode int64) int64 {
|
||||
var (
|
||||
c1 = InterleavedToCoord(minz)
|
||||
c2 = InterleavedToCoord(maxz)
|
||||
cand = maxz
|
||||
c Coord
|
||||
)
|
||||
|
||||
for c.X = c1.X; c.X <= c2.X; c.X++ {
|
||||
for c.Y = c1.Y; c.Y <= c2.Y; c.Y++ {
|
||||
for c.Z = c1.Z; c.Z <= c2.Z; c.Z++ {
|
||||
if z := CoordToInterleaved(c); z > zcode && z < cand {
|
||||
cand = z
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cand
|
||||
}
|
||||
|
||||
const (
|
||||
msb = uint8(3*numBitsPerComponent - 1)
|
||||
mask = int64(0x924924924)
|
||||
full = int64(0xfffffffff)
|
||||
)
|
||||
|
||||
func setbits(p uint8, v int64) int64 {
|
||||
m := (mask >> (msb - p)) & (^(full << p) & full)
|
||||
return (v | m) & ^(1 << p) & full
|
||||
}
|
||||
|
||||
func unsetbits(p uint8, v int64) int64 {
|
||||
m := ^(mask >> (msb - p)) & full
|
||||
return (v & m) | (int64(1) << p)
|
||||
}
|
||||
|
||||
func BigMin(minz, maxz, zcode int64) int64 {
|
||||
const (
|
||||
b001 = 1
|
||||
b010 = 2
|
||||
b011 = 2 | 1
|
||||
b100 = 4
|
||||
b101 = 4 | 1
|
||||
)
|
||||
bigmin := maxz
|
||||
pos := msb
|
||||
for m := int64(1) << msb; m != 0; m >>= 1 {
|
||||
var v uint8
|
||||
if zcode&m == m {
|
||||
v = b100
|
||||
}
|
||||
if minz&m == m {
|
||||
v |= b010
|
||||
}
|
||||
if maxz&m == m {
|
||||
v |= b001
|
||||
}
|
||||
switch v {
|
||||
case b001:
|
||||
bigmin = unsetbits(pos, minz)
|
||||
maxz = setbits(pos, maxz)
|
||||
case b011:
|
||||
return minz
|
||||
case b100:
|
||||
return bigmin
|
||||
case b101:
|
||||
minz = unsetbits(pos, minz)
|
||||
}
|
||||
pos--
|
||||
}
|
||||
return bigmin
|
||||
}
|
227
common/coords_test.go
Normal file
227
common/coords_test.go
Normal file
@ -0,0 +1,227 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var data = []int16{
|
||||
-2045, -1850, -1811, -1629, -1104,
|
||||
-967, -725, -646, -329, -212,
|
||||
-150, -1, 0, 1, 88, 524, 527, 549,
|
||||
1783, 1817, 1826, 2028, 2032}
|
||||
|
||||
func allData(f func(Coord)) {
|
||||
for _, z := range data {
|
||||
for _, y := range data {
|
||||
for _, x := range data {
|
||||
f(Coord{X: x, Y: y, Z: z})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkEncodeDecode(
|
||||
desc string,
|
||||
join KeyJoiner,
|
||||
encode KeyEncoder, decode KeyDecoder,
|
||||
c Coord, t *testing.T) {
|
||||
|
||||
k1 := join(c)
|
||||
var err error
|
||||
var b []byte
|
||||
if b, err = encode(k1); err != nil {
|
||||
t.Errorf("%s: Failed to encode %s %s\n", desc, c, err)
|
||||
return
|
||||
}
|
||||
var k2 int64
|
||||
if k2, err = decode(b); err != nil {
|
||||
t.Errorf("%s: Failed to decode %s %s\n", desc, c, err)
|
||||
return
|
||||
}
|
||||
|
||||
if k1 != k2 {
|
||||
t.Errorf("%s: Expected %d got %d for %s\n", desc, k1, k2, c)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncodeDecode(t *testing.T) {
|
||||
allData(func(c Coord) {
|
||||
checkEncodeDecode(
|
||||
"Big endian - interleaved",
|
||||
CoordToInterleaved,
|
||||
EncodeToBigEndian, DecodeFromBigEndian,
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkEncodeDecode(
|
||||
"String - interleaved",
|
||||
CoordToInterleaved,
|
||||
EncodeStringToBytes, DecodeStringFromBytes,
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkEncodeDecode(
|
||||
"Big endian - plain",
|
||||
CoordToPlain,
|
||||
EncodeToBigEndian, DecodeFromBigEndian,
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkEncodeDecode(
|
||||
"String - plain",
|
||||
CoordToPlain,
|
||||
EncodeStringToBytes, DecodeStringFromBytes,
|
||||
c, t)
|
||||
})
|
||||
}
|
||||
|
||||
func checkJoinSplit(
|
||||
desc string,
|
||||
join KeyJoiner, split KeySplitter,
|
||||
c Coord, t *testing.T) {
|
||||
|
||||
k := join(c)
|
||||
s := split(k)
|
||||
if s != c {
|
||||
t.Errorf("%s: Expected %s got %s %b\n", desc, c, s, k)
|
||||
}
|
||||
}
|
||||
|
||||
func TestJoinSplit(t *testing.T) {
|
||||
allData(func(c Coord) {
|
||||
checkJoinSplit(
|
||||
"P2C(C2P(xyz))",
|
||||
CoordToPlain, PlainToCoord,
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkJoinSplit(
|
||||
"I2C(C2I(xyz))",
|
||||
CoordToInterleaved, InterleavedToCoord,
|
||||
c, t)
|
||||
})
|
||||
}
|
||||
|
||||
func checkTransformer(
|
||||
desc string, joiner KeyJoiner,
|
||||
transform KeyTransformer,
|
||||
c Coord, t *testing.T) {
|
||||
|
||||
k1 := joiner(c)
|
||||
k2 := transform(k1)
|
||||
if k2 != k1 {
|
||||
t.Errorf("%s: Expected %v got %v for %s\n", desc, k1, k2, c)
|
||||
}
|
||||
}
|
||||
|
||||
func compose(transforms ...KeyTransformer) KeyTransformer {
|
||||
return func(x int64) int64 {
|
||||
for _, transform := range transforms {
|
||||
x = transform(x)
|
||||
}
|
||||
return x
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransforms(t *testing.T) {
|
||||
// Mainly to check the test itself.
|
||||
allData(func(c Coord) {
|
||||
checkTransformer(
|
||||
"plain",
|
||||
CoordToPlain,
|
||||
compose(),
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkTransformer(
|
||||
"I2P(P2I(plain))",
|
||||
CoordToPlain,
|
||||
compose(TransformPlainToInterleaved, TransformInterleavedToPlain),
|
||||
c, t)
|
||||
})
|
||||
allData(func(c Coord) {
|
||||
checkTransformer(
|
||||
"P2I(I2P(interleaved))",
|
||||
CoordToInterleaved,
|
||||
compose(TransformInterleavedToPlain, TransformPlainToInterleaved),
|
||||
c, t)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCoordInterleaving(t *testing.T) {
|
||||
allData(func(c Coord) {
|
||||
d := InterleavedToCoord(CoordToInterleaved(c))
|
||||
if c != d {
|
||||
t.Errorf("Expected %v got %v\n", c, d)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func outsiders(zmin, zmax int64, fn func(int64)) {
|
||||
|
||||
c1 := InterleavedToCoord(zmin)
|
||||
c2 := InterleavedToCoord(zmax)
|
||||
cub := Cuboid{P1: c1, P2: c2}
|
||||
var c Coord
|
||||
for c.X = c1.X; c.X <= c2.X; c.X++ {
|
||||
for c.Y = c1.Y; c.Y <= c2.Y; c.Y++ {
|
||||
for c.Z = c1.Z; c.Z <= c2.Z; c.Z++ {
|
||||
zn := CoordToInterleaved(c) + 1
|
||||
if zn > zmin && zn < zmax && !cub.Contains(InterleavedToCoord(zn)) {
|
||||
fn(zn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBigMin(t *testing.T) {
|
||||
const tries = 20
|
||||
for i := 0; i < tries; i++ {
|
||||
x1 := rand.Intn(4000) - 2000
|
||||
y1 := rand.Intn(4000) - 2000
|
||||
z1 := rand.Intn(4000) - 2000
|
||||
w := rand.Intn(18) + 1
|
||||
h := rand.Intn(18) + 1
|
||||
d := rand.Intn(18) + 1
|
||||
x2 := x1 + w
|
||||
y2 := y1 + h
|
||||
z2 := z1 + d
|
||||
|
||||
c1 := Coord{X: int16(x1), Y: int16(y1), Z: int16(z1)}
|
||||
c2 := Coord{X: int16(x2), Y: int16(y2), Z: int16(z2)}
|
||||
|
||||
zmin := CoordToInterleaved(c1)
|
||||
zmax := CoordToInterleaved(c2)
|
||||
|
||||
if zmin > zmax {
|
||||
t.Errorf("zmin > zmax: %d > %d\n", zmin, zmax)
|
||||
}
|
||||
|
||||
errors, success := 0, 0
|
||||
outsiders(zmin, zmax, func(zcode int64) {
|
||||
nbm := NaiveBigMin(zmin, zmax, zcode)
|
||||
cbm := BigMin(zmin, zmax, zcode)
|
||||
//fmt.Printf("nbm: %b\n", nbm)
|
||||
//fmt.Printf("cbm: %b\n", cbm)
|
||||
if nbm != cbm {
|
||||
errors++
|
||||
} else {
|
||||
success++
|
||||
}
|
||||
})
|
||||
if errors > 0 {
|
||||
cub := Cuboid{P1: c1, P2: c2}
|
||||
t.Errorf("BigMin: %s (%d %d) %d errors out of %d (%f)\n",
|
||||
cub,
|
||||
zmin, zmax,
|
||||
errors, errors+success,
|
||||
float64(errors)/float64(errors+success))
|
||||
}
|
||||
}
|
||||
}
|
86
common/coverage.go
Normal file
86
common/coverage.go
Normal file
@ -0,0 +1,86 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import "sync"
|
||||
|
||||
type zRange struct {
|
||||
y1 int16
|
||||
y2 int16
|
||||
xRange *Span
|
||||
}
|
||||
|
||||
type Coverage3D struct {
|
||||
pool *SpanPool
|
||||
zRanges map[int16]*zRange
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type Range struct {
|
||||
Z int16
|
||||
Y1 int16
|
||||
Y2 int16
|
||||
X1 int16
|
||||
X2 int16
|
||||
}
|
||||
|
||||
func NewCoverage3D() *Coverage3D {
|
||||
return &Coverage3D{
|
||||
pool: NewSpanPool(),
|
||||
zRanges: map[int16]*zRange{}}
|
||||
}
|
||||
|
||||
func (c3d *Coverage3D) Insert(c Coord) {
|
||||
c3d.mu.Lock()
|
||||
defer c3d.mu.Unlock()
|
||||
zr := c3d.zRanges[c.Z]
|
||||
if zr == nil {
|
||||
xr := c3d.pool.Alloc()
|
||||
xr.From = int32(c.X)
|
||||
xr.To = int32(c.X)
|
||||
xr.Next = nil
|
||||
c3d.zRanges[c.Z] = &zRange{
|
||||
y1: c.Y,
|
||||
y2: c.Y,
|
||||
xRange: xr}
|
||||
return
|
||||
}
|
||||
zr.xRange = c3d.pool.Insert(zr.xRange, int32(c.X), 0)
|
||||
if c.Y < zr.y1 {
|
||||
zr.y1 = c.Y
|
||||
}
|
||||
if c.Y > zr.y2 {
|
||||
zr.y2 = c.Y
|
||||
}
|
||||
}
|
||||
|
||||
func (c3d *Coverage3D) Query(c1, c2 Coord) []Range {
|
||||
|
||||
c1, c2 = MinCoord(c1, c2), MaxCoord(c1, c2)
|
||||
|
||||
c3d.mu.RLock()
|
||||
defer c3d.mu.RUnlock()
|
||||
|
||||
r := make([]Range, 0, 32)
|
||||
for z := c1.Z; z <= c2.Z; z++ {
|
||||
zr := c3d.zRanges[z]
|
||||
if zr == nil || c1.Y > zr.y2 || c2.Y < zr.y1 {
|
||||
continue
|
||||
}
|
||||
y1, y2 := max16(c1.Y, zr.y1), min16(c2.Y, zr.y2)
|
||||
for xr := zr.xRange; xr != nil && xr.From <= int32(c2.X); xr = xr.Next {
|
||||
if xr.To < int32(c1.X) {
|
||||
continue
|
||||
}
|
||||
r = append(r, Range{
|
||||
Z: z,
|
||||
Y1: y1,
|
||||
Y2: y2,
|
||||
X1: max16(c1.X, int16(xr.From)),
|
||||
X2: min16(c2.X, int16(xr.To))})
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
136
common/image.go
Normal file
136
common/image.go
Normal file
@ -0,0 +1,136 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"image"
|
||||
"image/color"
|
||||
"image/draw"
|
||||
"image/png"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/blake2b"
|
||||
|
||||
"github.com/bamiaux/rez"
|
||||
)
|
||||
|
||||
// ResizeFilter is used to scale down the pyramid tiles.
|
||||
var ResizeFilter = rez.NewLanczosFilter(3)
|
||||
|
||||
var rrand uint32
|
||||
var rrandmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
|
||||
func nextSuffix() string {
|
||||
rrandmu.Lock()
|
||||
r := rrand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rrand = r
|
||||
rrandmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
|
||||
func EncodeToMem(img image.Image) []byte {
|
||||
var buf bytes.Buffer
|
||||
enc := png.Encoder{CompressionLevel: png.BestCompression}
|
||||
if err := enc.Encode(&buf, img); err != nil {
|
||||
// This really should not happen.
|
||||
panic(err)
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func SaveAsPNG(path string, img image.Image) (err error) {
|
||||
var file *os.File
|
||||
if file, err = os.Create(path); err != nil {
|
||||
return
|
||||
}
|
||||
writer := bufio.NewWriter(file)
|
||||
err = png.Encode(writer, img)
|
||||
writer.Flush()
|
||||
file.Close()
|
||||
return
|
||||
}
|
||||
|
||||
func tmpName(tmpl string) (string, error) {
|
||||
tmpPre := tmpl + ".tmp"
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
tmp := tmpPre + nextSuffix()
|
||||
if _, err := os.Stat(tmp); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return tmp, nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
if nconflict++; nconflict > 10 {
|
||||
nconflict = 0
|
||||
rrand = reseed()
|
||||
}
|
||||
}
|
||||
return "", errors.New("Cannot create temp name")
|
||||
}
|
||||
|
||||
func SaveAsPNGAtomic(path string, img image.Image) (err error) {
|
||||
|
||||
var tmpPath string
|
||||
if tmpPath, err = tmpName(path); err != nil {
|
||||
return
|
||||
}
|
||||
// Still a bit racy
|
||||
if err = SaveAsPNG(tmpPath, img); err != nil {
|
||||
return
|
||||
}
|
||||
return os.Rename(tmpPath, path)
|
||||
}
|
||||
|
||||
func LoadPNG(path string, bg color.RGBA) image.Image {
|
||||
var err error
|
||||
var file *os.File
|
||||
if file, err = os.Open(path); err != nil {
|
||||
return image.NewUniform(bg)
|
||||
}
|
||||
defer file.Close()
|
||||
reader := bufio.NewReader(file)
|
||||
var img image.Image
|
||||
if img, err = png.Decode(reader); err != nil {
|
||||
log.Printf("WARN: decoding '%s' failed: %s\n", path, err)
|
||||
return image.NewUniform(bg)
|
||||
}
|
||||
return img
|
||||
}
|
||||
|
||||
func HashImage(img *image.RGBA) []byte {
|
||||
|
||||
hash, _ := blake2b.New256(nil)
|
||||
w, h := img.Rect.Dx()*4, img.Rect.Dy()
|
||||
|
||||
pos := img.PixOffset(img.Rect.Min.X, img.Rect.Min.Y)
|
||||
|
||||
for ; h > 0; h, pos = h-1, pos+img.Stride {
|
||||
hash.Write(img.Pix[pos : pos+w])
|
||||
}
|
||||
|
||||
return hash.Sum(nil)
|
||||
}
|
||||
|
||||
func BackgroundImage(width, height int, bg color.RGBA) *image.RGBA {
|
||||
m := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
draw.Draw(m, m.Bounds(), &image.Uniform{bg}, image.ZP, draw.Src)
|
||||
return m
|
||||
}
|
78
common/math.go
Normal file
78
common/math.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
func Max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func Min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max32(a, b int32) int32 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func max16(a, b int16) int16 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min16(a, b int16) int16 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func min32f(a, b float32) float32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func Clamp32f(x, a, b float32) float32 {
|
||||
switch {
|
||||
case x < a:
|
||||
return a
|
||||
case x > b:
|
||||
return b
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func Order(a, b int) (int, int) {
|
||||
if a < b {
|
||||
return a, b
|
||||
}
|
||||
return b, a
|
||||
}
|
||||
|
||||
func Order16(a, b int16) (int16, int16) {
|
||||
if a < b {
|
||||
return a, b
|
||||
}
|
||||
return b, a
|
||||
}
|
||||
|
||||
func Order64(a, b int64) (int64, int64) {
|
||||
if a < b {
|
||||
return a, b
|
||||
}
|
||||
return b, a
|
||||
}
|
10
common/post17offset.go
Normal file
10
common/post17offset.go
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2016 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.7
|
||||
|
||||
package common
|
||||
|
||||
// afterCompressOfs is not necessary after Go 1.7.
|
||||
const afterCompressOfs = 0
|
10
common/pre17offset.go
Normal file
10
common/pre17offset.go
Normal file
@ -0,0 +1,10 @@
|
||||
// Copyright 2016 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.7
|
||||
|
||||
package common
|
||||
|
||||
// afterCompressOfs is necessary before Go 1.7.
|
||||
const afterCompressOfs = 4
|
161
common/redisclient.go
Normal file
161
common/redisclient.go
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
type RedisClient struct {
|
||||
conn net.Conn
|
||||
reader *bufio.Reader
|
||||
arena []byte
|
||||
scratch [130]byte
|
||||
}
|
||||
|
||||
func NewRedisClient(network, address string) (client *RedisClient, err error) {
|
||||
var conn net.Conn
|
||||
if conn, err = net.Dial(network, address); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
client = &RedisClient{conn: conn, reader: bufio.NewReaderSize(conn, 8*1024)}
|
||||
return
|
||||
}
|
||||
|
||||
func (client *RedisClient) Close() error {
|
||||
return client.conn.Close()
|
||||
}
|
||||
|
||||
var (
|
||||
writeArray4 = []byte("*4\r\n")
|
||||
hspatial = []byte("HSPATIAL")
|
||||
nl = []byte("\r\n")
|
||||
ignore = []byte("IGNORE")
|
||||
)
|
||||
|
||||
func writeBulkString(buf []byte, data []byte) []byte {
|
||||
buf = append(buf, '$')
|
||||
buf = strconv.AppendInt(buf, int64(len(data)), 10)
|
||||
buf = append(buf, nl...)
|
||||
buf = append(buf, data...)
|
||||
buf = append(buf, nl...)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (client *RedisClient) writeHSpatial(p1, p2 int64) error {
|
||||
tmp := client.scratch[:0:40]
|
||||
buf := client.scratch[40:40]
|
||||
buf = append(buf, writeArray4...)
|
||||
buf = writeBulkString(buf, hspatial)
|
||||
buf = writeBulkString(buf, ignore)
|
||||
buf = writeBulkString(buf, keyToBytes(p1, tmp))
|
||||
buf = writeBulkString(buf, keyToBytes(p2, tmp))
|
||||
_, err := client.conn.Write(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func isError(line []byte) error {
|
||||
if len(line) > 0 && line[0] == '-' {
|
||||
return fmt.Errorf("error: %s", line[1:])
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSize is a cheaper replacement for fmt.Sscanf(string(line), "$%d\r\n", &size).
|
||||
func parseSize(line []byte) (int, error) {
|
||||
if len(line) < 1 || line[0] != '$' {
|
||||
return 0, errors.New("Missing '$' at begin of line")
|
||||
}
|
||||
line = bytes.TrimFunc(line[1:], unicode.IsSpace)
|
||||
v, err := strconv.ParseInt(string(line), 10, 0)
|
||||
return int(v), err
|
||||
}
|
||||
|
||||
func (client *RedisClient) alloc(size int) []byte {
|
||||
a := client.arena
|
||||
if len(a) < size {
|
||||
a = make([]byte, Max(size, 16*1024))
|
||||
}
|
||||
x := a[:size:size]
|
||||
client.arena = a[size:]
|
||||
return x
|
||||
}
|
||||
|
||||
func (client *RedisClient) readBulkString(data *[]byte) (size int, err error) {
|
||||
var line []byte
|
||||
if line, err = client.reader.ReadBytes('\n'); err != nil {
|
||||
return
|
||||
}
|
||||
if err = isError(line); err != nil {
|
||||
return
|
||||
}
|
||||
if size, err = parseSize(line); err != nil || size <= 0 {
|
||||
return
|
||||
}
|
||||
if cap(*data) < size {
|
||||
*data = client.alloc(size)
|
||||
} else {
|
||||
*data = (*data)[:size]
|
||||
}
|
||||
for rest := size; rest > 0; {
|
||||
var n int
|
||||
if n, err = client.reader.Read((*data)[size-rest : size]); err != nil {
|
||||
return
|
||||
}
|
||||
rest -= n
|
||||
}
|
||||
_, err = client.reader.ReadBytes('\n')
|
||||
return
|
||||
}
|
||||
|
||||
func (client *RedisClient) QueryCuboid(cuboid Cuboid, fn func(*Block) *Block) (count int, err error) {
|
||||
p1 := CoordToPlain(cuboid.P1)
|
||||
p2 := CoordToPlain(cuboid.P2)
|
||||
if err = client.writeHSpatial(p1, p2); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
block *Block
|
||||
size int
|
||||
key int64
|
||||
data []byte
|
||||
)
|
||||
|
||||
for s := client.scratch[:]; ; count++ {
|
||||
p := &s
|
||||
if size, err = client.readBulkString(p); err != nil {
|
||||
return
|
||||
}
|
||||
if size <= 0 {
|
||||
break
|
||||
}
|
||||
if key, err = DecodeStringFromBytes(*p); err != nil {
|
||||
return
|
||||
}
|
||||
if size, err = client.readBulkString(&data); err != nil || size < 0 {
|
||||
return
|
||||
}
|
||||
if block == nil {
|
||||
block = &Block{Coord: PlainToCoord(key), Data: data}
|
||||
} else {
|
||||
*block = Block{Coord: PlainToCoord(key), Data: data}
|
||||
}
|
||||
if block = fn(block); block != nil {
|
||||
data = block.Data[:0]
|
||||
} else {
|
||||
data = nil
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
397
common/renderer.go
Normal file
397
common/renderer.go
Normal file
@ -0,0 +1,397 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"image"
|
||||
"image/color"
|
||||
"math"
|
||||
)
|
||||
|
||||
type Renderer struct {
|
||||
width int
|
||||
height int
|
||||
xOfs int16
|
||||
zOfs int16
|
||||
yBuffer []int32
|
||||
yMin []int32
|
||||
cBuffer []int32
|
||||
RejectedBlocks int
|
||||
SolidBlocks int
|
||||
TransparentBlocks int
|
||||
spans *SpanPool
|
||||
tBuffer []*Span
|
||||
}
|
||||
|
||||
func NewRenderer(width, height int, transparent bool) (renderer *Renderer) {
|
||||
dim := width * height
|
||||
pixSize := dim * 16 * 16
|
||||
yBuffer := make([]int32, pixSize)
|
||||
cBuffer := make([]int32, pixSize)
|
||||
yMin := make([]int32, dim)
|
||||
|
||||
var tBuffer []*Span
|
||||
var spans *SpanPool
|
||||
|
||||
if transparent {
|
||||
tBuffer = make([]*Span, pixSize)
|
||||
spans = NewSpanPool()
|
||||
}
|
||||
|
||||
renderer = &Renderer{
|
||||
width: width,
|
||||
height: height,
|
||||
yBuffer: yBuffer,
|
||||
cBuffer: cBuffer,
|
||||
yMin: yMin,
|
||||
tBuffer: tBuffer,
|
||||
spans: spans}
|
||||
|
||||
renderer.initBuffers()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Renderer) SetPos(xOfs, zOfs int16) {
|
||||
r.xOfs = xOfs
|
||||
r.zOfs = zOfs
|
||||
}
|
||||
|
||||
func (r *Renderer) GetPos() (int16, int16) {
|
||||
return r.xOfs, r.zOfs
|
||||
}
|
||||
|
||||
func (r *Renderer) initBuffers() {
|
||||
yb := r.yBuffer
|
||||
yb = yb[:len(yb)]
|
||||
for i := range yb {
|
||||
yb[i] = math.MinInt32
|
||||
}
|
||||
|
||||
cb := r.cBuffer
|
||||
cb = cb[:len(cb)]
|
||||
for i := range cb {
|
||||
cb[i] = -1
|
||||
}
|
||||
|
||||
ym := r.yMin
|
||||
ym = ym[:len(ym)]
|
||||
for i := range ym {
|
||||
ym[i] = math.MinInt32
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Renderer) Reset() {
|
||||
|
||||
r.RejectedBlocks = 0
|
||||
|
||||
if r.SolidBlocks > 0 || r.TransparentBlocks > 0 {
|
||||
r.SolidBlocks = 0
|
||||
r.initBuffers()
|
||||
}
|
||||
|
||||
if r.TransparentBlocks > 0 {
|
||||
r.TransparentBlocks = 0
|
||||
|
||||
tb := r.tBuffer
|
||||
for i, t := range tb {
|
||||
if t != nil {
|
||||
r.spans.FreeAll(t)
|
||||
tb[i] = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Renderer) IsFilled() bool {
|
||||
for _, y := range r.yMin {
|
||||
if y == math.MinInt32 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *Renderer) IsEmpty() bool {
|
||||
return r.SolidBlocks == 0 && r.TransparentBlocks == 0
|
||||
}
|
||||
|
||||
// down goes down the y direction in a block from top to bottom.
|
||||
// In its loop it copies the logic of Block.Content pulling some
|
||||
// things like the version check and common indexing out to
|
||||
// save some cycles.
|
||||
func down(db *DecodedBlock, x, y, z int) (int32, int) {
|
||||
mc := db.MapContent
|
||||
switch {
|
||||
case db.Version >= 24:
|
||||
for sliver := (z<<8 + x) << 1; y >= 0; y-- {
|
||||
pos := sliver + y<<5
|
||||
content := int32(mc[pos])<<8 | int32(mc[pos+1])
|
||||
if content != db.AirID && content != db.IgnoreID {
|
||||
if c, found := db.IndexMap[content]; found {
|
||||
return c, y
|
||||
}
|
||||
}
|
||||
}
|
||||
case db.Version >= 20:
|
||||
for sliver := z<<8 + x; y >= 0; y-- {
|
||||
pos := sliver + y<<4
|
||||
var content int32
|
||||
if c := mc[pos]; c <= 0x80 {
|
||||
content = int32(c)
|
||||
} else {
|
||||
content = int32(c)<<4 | int32(mc[pos+0x2000])>>4
|
||||
}
|
||||
if content != db.AirID && content != db.IgnoreID {
|
||||
if c, found := db.IndexMap[content]; found {
|
||||
return c, y
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, -1
|
||||
}
|
||||
|
||||
func (r *Renderer) RenderBlock(block *Block, colors *Colors) (err error) {
|
||||
|
||||
bx := block.Coord.X - r.xOfs
|
||||
bz := block.Coord.Z - r.zOfs
|
||||
|
||||
// We do not need to render the block if the whole 16x16 area
|
||||
// is already filled and the block is strictly below.
|
||||
blockY := int32(block.Coord.Y) << 4
|
||||
pos := int(bz)*r.width + int(bx)
|
||||
if blockY < r.yMin[pos] {
|
||||
r.RejectedBlocks++
|
||||
return
|
||||
}
|
||||
|
||||
// Decoding is pretty expensive so do it that late.
|
||||
var db *DecodedBlock
|
||||
if db, err = NewDecodedBlock(block.Data, colors); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if db.AirOnly() {
|
||||
r.RejectedBlocks++
|
||||
return
|
||||
}
|
||||
|
||||
w := r.width << 4
|
||||
ofs := int(bz)*w<<4 + int(bx)<<4
|
||||
yB := r.yBuffer
|
||||
yMin := int32(math.MaxInt32)
|
||||
|
||||
if db.Transparent && r.tBuffer != nil {
|
||||
r.TransparentBlocks++
|
||||
|
||||
for z := 0; z < 16; z++ {
|
||||
for x := 0; x < 16; x++ {
|
||||
currentY := yB[ofs]
|
||||
if currentY < blockY {
|
||||
var c int32
|
||||
for y := 15; ; y-- {
|
||||
if c, y = down(db, x, y, z); y < 0 {
|
||||
break
|
||||
}
|
||||
cY := blockY + int32(y)
|
||||
|
||||
if colors.IsTransparent(c) {
|
||||
r.tBuffer[ofs] = r.spans.Insert(r.tBuffer[ofs], cY, c)
|
||||
// We need to continue to go down because we
|
||||
// can see through this node.
|
||||
} else {
|
||||
r.cBuffer[ofs] = c
|
||||
currentY = cY
|
||||
yB[ofs] = currentY
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if currentY < yMin {
|
||||
yMin = currentY
|
||||
}
|
||||
ofs++
|
||||
}
|
||||
ofs += w - 16
|
||||
}
|
||||
|
||||
} else {
|
||||
r.SolidBlocks++
|
||||
for z := 0; z < 16; z++ {
|
||||
for x := 0; x < 16; x++ {
|
||||
currentY := yB[ofs]
|
||||
if currentY < blockY {
|
||||
if c, y := down(db, x, 15, z); y >= 0 {
|
||||
r.cBuffer[ofs] = c
|
||||
currentY = blockY + int32(y)
|
||||
yB[ofs] = currentY
|
||||
}
|
||||
}
|
||||
if currentY < yMin {
|
||||
yMin = currentY
|
||||
}
|
||||
ofs++
|
||||
}
|
||||
ofs += w - 16
|
||||
}
|
||||
}
|
||||
|
||||
r.yMin[pos] = yMin
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Renderer) CreateImage(colors []color.RGBA, background color.RGBA) *image.RGBA {
|
||||
pw, ph := r.width<<4, r.height<<4
|
||||
image := image.NewRGBA(image.Rect(0, 0, pw, ph))
|
||||
ofs, numCols := 0, int32(len(colors))
|
||||
for z := ph - 1; z >= 0; z-- {
|
||||
for x := 0; x < pw; x++ {
|
||||
colIdx := r.cBuffer[ofs]
|
||||
if colIdx >= 0 && colIdx < numCols {
|
||||
image.Set(x, z, colors[colIdx])
|
||||
} else {
|
||||
image.Set(x, z, background)
|
||||
}
|
||||
ofs++
|
||||
}
|
||||
}
|
||||
return image
|
||||
}
|
||||
|
||||
func safeColor(x int32) uint8 {
|
||||
switch {
|
||||
case x < 0:
|
||||
return 0
|
||||
case x > 255:
|
||||
return 255
|
||||
default:
|
||||
return uint8(x)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Renderer) CreateShadedImage(
|
||||
xOfs, zOfs, width, height int,
|
||||
cols *Colors, background color.RGBA) *image.RGBA {
|
||||
|
||||
image := image.NewRGBA(image.Rect(0, 0, width, height))
|
||||
|
||||
pw := r.width << 4
|
||||
|
||||
cs := cols.Colors
|
||||
|
||||
ofs, numCols := zOfs*pw+xOfs, int32(len(cs))
|
||||
|
||||
stride := pw - width
|
||||
|
||||
istride := image.Stride + 4*width
|
||||
|
||||
iofs := image.PixOffset(0, height-1)
|
||||
|
||||
pix := image.Pix
|
||||
|
||||
if r.TransparentBlocks > 0 { // Fast path for transparent images.
|
||||
for z := height - 1; z >= 0; z-- {
|
||||
for x := 0; x < width; x++ {
|
||||
colIdx := r.cBuffer[ofs]
|
||||
if colIdx < 0 || colIdx >= numCols {
|
||||
pix[iofs] = background.R
|
||||
pix[iofs+1] = background.G
|
||||
pix[iofs+2] = background.B
|
||||
pix[iofs+3] = 0xff
|
||||
} else {
|
||||
y := r.yBuffer[ofs]
|
||||
t := r.tBuffer[ofs]
|
||||
|
||||
opaque := t == nil || t.Top() < y
|
||||
|
||||
var y1, y2 int32
|
||||
|
||||
if x == 0 {
|
||||
y1 = y
|
||||
} else {
|
||||
y1 = r.yBuffer[ofs-1]
|
||||
if opaque {
|
||||
if s := r.tBuffer[ofs-1]; s != nil {
|
||||
y1 = max32(y1, s.Top())
|
||||
}
|
||||
}
|
||||
}
|
||||
if z == 0 {
|
||||
y2 = y
|
||||
} else {
|
||||
y2 = r.yBuffer[ofs+pw]
|
||||
if opaque {
|
||||
if s := r.tBuffer[ofs+pw]; s != nil {
|
||||
y1 = max32(y1, s.Top())
|
||||
}
|
||||
}
|
||||
}
|
||||
d := ((y - y1) + (y - y2)) * 12
|
||||
if d > 36 {
|
||||
d = 36
|
||||
}
|
||||
col := cs[colIdx]
|
||||
col = color.RGBA{
|
||||
R: safeColor(int32(col.R) + d),
|
||||
G: safeColor(int32(col.G) + d),
|
||||
B: safeColor(int32(col.B) + d),
|
||||
A: 0xff}
|
||||
if !opaque {
|
||||
col = cols.BlendColors(t, col, y)
|
||||
}
|
||||
pix[iofs] = col.R
|
||||
pix[iofs+1] = col.G
|
||||
pix[iofs+2] = col.B
|
||||
pix[iofs+3] = col.A
|
||||
}
|
||||
iofs += 4
|
||||
ofs++
|
||||
}
|
||||
ofs += stride
|
||||
iofs -= istride
|
||||
}
|
||||
|
||||
} else { // Solid images.
|
||||
for z := height - 1; z >= 0; z-- {
|
||||
for x := 0; x < width; x++ {
|
||||
colIdx := r.cBuffer[ofs]
|
||||
if colIdx < 0 || colIdx >= numCols {
|
||||
pix[iofs] = background.R
|
||||
pix[iofs+1] = background.G
|
||||
pix[iofs+2] = background.B
|
||||
pix[iofs+3] = 0xff
|
||||
} else {
|
||||
var y, y1, y2 int32
|
||||
y = r.yBuffer[ofs]
|
||||
if x == 0 {
|
||||
y1 = y
|
||||
} else {
|
||||
y1 = r.yBuffer[ofs-1]
|
||||
}
|
||||
if z == 0 {
|
||||
y2 = y
|
||||
} else {
|
||||
y2 = r.yBuffer[ofs+pw]
|
||||
}
|
||||
d := ((y - y1) + (y - y2)) * 12
|
||||
if d > 36 {
|
||||
d = 36
|
||||
}
|
||||
col := cs[colIdx]
|
||||
pix[iofs] = safeColor(int32(col.R) + d)
|
||||
pix[iofs+1] = safeColor(int32(col.G) + d)
|
||||
pix[iofs+2] = safeColor(int32(col.B) + d)
|
||||
pix[iofs+3] = 0xff
|
||||
}
|
||||
iofs += 4
|
||||
ofs++
|
||||
}
|
||||
ofs += stride
|
||||
iofs -= istride
|
||||
}
|
||||
}
|
||||
return image
|
||||
}
|
157
common/spans.go
Normal file
157
common/spans.go
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const chunkSize = 1024
|
||||
|
||||
type Span struct {
|
||||
Value int32
|
||||
From int32
|
||||
To int32
|
||||
Next *Span
|
||||
}
|
||||
|
||||
type SpanPool struct {
|
||||
freeList *Span
|
||||
}
|
||||
|
||||
func NewSpanPool() *SpanPool {
|
||||
return &SpanPool{}
|
||||
}
|
||||
|
||||
func (sp *SpanPool) Alloc() *Span {
|
||||
if sp.freeList != nil {
|
||||
next := sp.freeList
|
||||
sp.freeList = next.Next
|
||||
return next
|
||||
}
|
||||
|
||||
spans := make([]Span, chunkSize)
|
||||
|
||||
for i := chunkSize - 1; i > 0; i-- {
|
||||
spans[i].Next = sp.freeList
|
||||
sp.freeList = &spans[i]
|
||||
}
|
||||
|
||||
return &spans[0]
|
||||
}
|
||||
|
||||
func (sp *SpanPool) Free(s *Span) {
|
||||
if s != nil {
|
||||
s.Next = sp.freeList
|
||||
sp.freeList = s
|
||||
}
|
||||
}
|
||||
|
||||
func (sp *SpanPool) FreeAll(s *Span) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
head, prev := s, s
|
||||
for ; s != nil; s = s.Next {
|
||||
prev = s
|
||||
}
|
||||
prev.Next = sp.freeList
|
||||
sp.freeList = head
|
||||
}
|
||||
|
||||
func (sp *SpanPool) Insert(s *Span, pos, value int32) *Span {
|
||||
|
||||
// No head -> create.
|
||||
if s == nil {
|
||||
s = sp.Alloc()
|
||||
s.From = pos
|
||||
s.To = pos
|
||||
s.Value = value
|
||||
s.Next = nil
|
||||
return s
|
||||
}
|
||||
|
||||
if pos < s.From {
|
||||
// Same value and directly neighbored -> extend head.
|
||||
if value == s.Value && pos == s.From-1 {
|
||||
s.From = pos
|
||||
return s
|
||||
}
|
||||
// Disjunct -> create new head.
|
||||
prev := sp.Alloc()
|
||||
prev.From = pos
|
||||
prev.To = pos
|
||||
prev.Value = value
|
||||
prev.Next = s
|
||||
return prev
|
||||
}
|
||||
|
||||
head := s
|
||||
for ; s != nil && pos > s.To; s = s.Next {
|
||||
next := s.Next
|
||||
if pos == s.To+1 && value == s.Value { // directly neighbored
|
||||
s.To = pos
|
||||
// Check if a gap has to be closed
|
||||
if next != nil && next.From == s.To+1 && value == next.Value {
|
||||
s.To = next.To
|
||||
s.Next = next.Next
|
||||
sp.Free(next)
|
||||
}
|
||||
return head
|
||||
}
|
||||
// Extend next?
|
||||
if next != nil && pos == next.From-1 && value == next.Value {
|
||||
next.From = pos
|
||||
return head
|
||||
}
|
||||
// Before next -> New between current and next
|
||||
if next == nil || pos < next.From {
|
||||
sn := sp.Alloc()
|
||||
sn.From = pos
|
||||
sn.To = pos
|
||||
sn.Value = value
|
||||
sn.Next = next
|
||||
s.Next = sn
|
||||
return head
|
||||
}
|
||||
}
|
||||
|
||||
return head
|
||||
}
|
||||
|
||||
func (s *Span) Visit(v func(*Span)) {
|
||||
for ; s != nil; s = s.Next {
|
||||
v(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Span) Len() int {
|
||||
n := 0
|
||||
for ; s != nil; s = s.Next {
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (s *Span) Top() int32 {
|
||||
for ; s.Next != nil; s = s.Next {
|
||||
}
|
||||
return s.To
|
||||
}
|
||||
|
||||
func (s *Span) String() string {
|
||||
var buf bytes.Buffer
|
||||
first := true
|
||||
s.Visit(func(s1 *Span) {
|
||||
if !first {
|
||||
buf.WriteString(", ")
|
||||
} else {
|
||||
first = false
|
||||
}
|
||||
buf.WriteString(fmt.Sprintf("(%d, %d)", s1.From, s1.To))
|
||||
})
|
||||
return buf.String()
|
||||
}
|
78
common/spans_test.go
Normal file
78
common/spans_test.go
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2014, 2015 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const spanItems = 3000
|
||||
|
||||
func TestSpans(t *testing.T) {
|
||||
|
||||
sp := NewSpanPool()
|
||||
var s *Span
|
||||
|
||||
for i := 0; i < spanItems; i++ {
|
||||
s = sp.Insert(s, int32(i), 42)
|
||||
}
|
||||
|
||||
if n := s.Len(); n != 1 {
|
||||
t.Errorf("inc: Span length %d expected 1\n", n)
|
||||
t.Errorf("spans: %s\n", s)
|
||||
}
|
||||
|
||||
sp.FreeAll(s)
|
||||
|
||||
s = nil
|
||||
for i := spanItems - 1; i >= 0; i-- {
|
||||
s = sp.Insert(s, int32(i), 42)
|
||||
}
|
||||
|
||||
if n := s.Len(); n != 1 {
|
||||
t.Errorf("dec: Span length %d expected 1\n", n)
|
||||
t.Errorf("spans: %s\n", s)
|
||||
}
|
||||
|
||||
sp.FreeAll(s)
|
||||
|
||||
s = nil
|
||||
for i := 0; i < spanItems/2; i++ {
|
||||
j := spanItems - 1 - i
|
||||
s = sp.Insert(s, int32(i), 42)
|
||||
s = sp.Insert(s, int32(j), 21)
|
||||
}
|
||||
|
||||
if n := s.Len(); n != 2 {
|
||||
t.Errorf("two: Span length %d expected 2\n", n)
|
||||
t.Errorf("spans: %s\n", s)
|
||||
}
|
||||
|
||||
sp.FreeAll(s)
|
||||
|
||||
inp := make([]int32, spanItems)
|
||||
for i := 0; i < spanItems; i++ {
|
||||
inp[i] = int32(i)
|
||||
}
|
||||
|
||||
for i := 0; i < spanItems; i++ {
|
||||
i1 := rand.Int31n(int32(spanItems))
|
||||
i2 := rand.Int31n(int32(spanItems))
|
||||
inp[i1], inp[i2] = inp[i2], inp[i1]
|
||||
}
|
||||
|
||||
s = nil
|
||||
for i := 0; i < spanItems; i++ {
|
||||
s = sp.Insert(s, inp[i], 42)
|
||||
}
|
||||
|
||||
if n := s.Len(); n != 1 {
|
||||
t.Errorf("rand: Span length %d expected 1\n", n)
|
||||
t.Errorf("spans: %s\n", s)
|
||||
}
|
||||
|
||||
sp.FreeAll(s)
|
||||
}
|
17
common/version.go
Normal file
17
common/version.go
Normal file
@ -0,0 +1,17 @@
|
||||
// Copyright 2014 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
const MTSatelliteVersion = "0.9.1"
|
||||
|
||||
func PrintVersionAndExit() {
|
||||
fmt.Printf("Version: %s\n", MTSatelliteVersion)
|
||||
os.Exit(0)
|
||||
}
|
98
common/yorder.go
Normal file
98
common/yorder.go
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright 2014, 2015, 2017 by Sascha L. Teichmann
|
||||
// Use of this source code is governed by the MIT license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
package common
|
||||
|
||||
import "container/heap"
|
||||
|
||||
// YOrder is a "streaming" Y sorter. The blocks comming from the
|
||||
// database are not sorted in Y order. To unpack only the
|
||||
// relevant blocks (the one at the surface) it would be nice
|
||||
// to have them sorted in inverse Y order so that blocks with
|
||||
// lower Y value are shadowed by ones wither higher value.
|
||||
//
|
||||
// Sorting all blocks correctly would leadind to load all blocks
|
||||
// before rendering. But a perfect order is not strictly necessary
|
||||
// because the problem is (expensively) solved at per node level.
|
||||
//
|
||||
// The YOrder defines a "windowed" data structure in which all blocks
|
||||
// are sorted correctly. So for small amounts of blocks the
|
||||
// sorting is perfect. For larger amounts it is possible to
|
||||
// have partial incorrect sortings but as stated above it doesn't
|
||||
// matter. The window allows not to preload all blocks.
|
||||
|
||||
type YOrder struct {
|
||||
RenderFn func(*Block) error
|
||||
blocks []*Block
|
||||
capacity int
|
||||
}
|
||||
|
||||
func NewYOrder(renderFn func(*Block) error, capacity int) *YOrder {
|
||||
return &YOrder{
|
||||
RenderFn: renderFn,
|
||||
blocks: make([]*Block, 0, capacity),
|
||||
capacity: capacity}
|
||||
}
|
||||
|
||||
func (yo *YOrder) Reset() {
|
||||
blocks := yo.blocks
|
||||
for i := range blocks {
|
||||
blocks[i] = nil
|
||||
}
|
||||
yo.blocks = blocks[:0]
|
||||
}
|
||||
|
||||
func (yo *YOrder) RenderBlock(block *Block) (*Block, error) {
|
||||
if len(yo.blocks) == yo.capacity {
|
||||
oblock := yo.blocks[0]
|
||||
if oblock.Coord.Y < block.Coord.Y {
|
||||
// New one is above highest old. Directly render new.
|
||||
err := yo.RenderFn(block)
|
||||
return block, err
|
||||
}
|
||||
// Render old one. Store copy of new in heap.
|
||||
heap.Pop(yo)
|
||||
heap.Push(yo, block)
|
||||
err := yo.RenderFn(oblock)
|
||||
return oblock, err
|
||||
}
|
||||
|
||||
heap.Push(yo, block)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (yo *YOrder) Drain() error {
|
||||
for len(yo.blocks) > 0 {
|
||||
if err := yo.RenderFn(heap.Pop(yo).(*Block)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (yo *YOrder) Len() int {
|
||||
return len(yo.blocks)
|
||||
}
|
||||
|
||||
func (yo *YOrder) Swap(i, j int) {
|
||||
yo.blocks[i], yo.blocks[j] = yo.blocks[j], yo.blocks[i]
|
||||
}
|
||||
|
||||
func (yo *YOrder) Less(i, j int) bool {
|
||||
// Reverse order intented.
|
||||
return yo.blocks[i].Coord.Y > yo.blocks[j].Coord.Y
|
||||
}
|
||||
|
||||
func (yo *YOrder) Push(x interface{}) {
|
||||
yo.blocks = append(yo.blocks, x.(*Block))
|
||||
}
|
||||
|
||||
func (yo *YOrder) Pop() interface{} {
|
||||
blocks := yo.blocks
|
||||
l := len(blocks)
|
||||
x := blocks[l-1]
|
||||
blocks[l-1] = nil
|
||||
yo.blocks = blocks[:l-1]
|
||||
return x
|
||||
}
|
Reference in New Issue
Block a user