Merged the supress-identical-tile-overwrite feature branch.

The webmapper now has an internal cache of SHA1 hashes of the last 256 base tiles it writes to disk.
If a tile is regenerated its hash is compared to the already stored.
If the hash is the same as the one before the tile is not stored to file
and the pyramid tiles are not updated, too.
This commit is contained in:
Sascha L. Teichmann 2016-05-13 16:42:53 +02:00
commit 2583071e81
7 changed files with 349 additions and 51 deletions

View File

@ -68,7 +68,7 @@ func createBaseLevel(
btc := common.NewBaseTileCreator(
client, colors, bg,
int16(yMin), int16(yMax),
transparent, baseDir, false)
transparent, baseDir, nil)
go createTiles(btc, jobs, &done)
}

View File

@ -27,7 +27,7 @@ type connection struct {
}
type msg struct {
tiles map[xz]bool
tiles []xz
pls []*player
}
@ -57,12 +57,8 @@ func (wsf *websocketForwarder) run() {
}
encMsg := map[string]interface{}{}
if tiles := message.tiles; tiles != nil {
xzs := make([]xz, 0, len(tiles))
for xz := range tiles {
xzs = append(xzs, xz)
}
encMsg["tiles"] = xzs
if message.tiles != nil {
encMsg["tiles"] = message.tiles
}
if message.pls != nil {
@ -101,7 +97,7 @@ func (wsf *websocketForwarder) ServeHTTP(rw http.ResponseWriter, r *http.Request
c.reader()
}
func (wsf *websocketForwarder) BaseTilesUpdated(changes map[xz]bool) {
func (wsf *websocketForwarder) BaseTilesUpdated(changes []xz) {
wsf.broadcast <- msg{tiles: changes}
}

View File

@ -24,12 +24,15 @@ import (
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
// Number of check sums to keep in memory.
const maxHashedTiles = 256
type baseTilesUpdates interface {
BaseTilesUpdated(map[xz]bool)
BaseTilesUpdated([]xz)
}
type tileUpdater struct {
changes map[xz]bool
changes map[xz]struct{}
btu baseTilesUpdates
mapDir string
redisAddress string
@ -48,8 +51,13 @@ type xz struct {
Z int16
}
type xzc struct {
xz
canceled bool
}
type xzm struct {
P xz
xz
Mask uint16
}
@ -65,8 +73,8 @@ func (c xz) parent() xzm {
xp, xr := c.X>>1, uint16(c.X&1)
zp, zr := c.Z>>1, uint16(c.Z&1)
return xzm{
P: xz{X: xp, Z: zp},
Mask: 1 << (zr<<1 | xr)}
xz{X: xp, Z: zp},
1 << (zr<<1 | xr)}
}
func newTileUpdater(
@ -84,7 +92,7 @@ func newTileUpdater(
mapDir: mapDir,
redisAddress: redisAddress,
ips: ips,
changes: map[xz]bool{},
changes: map[xz]struct{}{},
colors: colors,
bg: bg,
yMin: int16(yMin),
@ -141,7 +149,7 @@ func (tu *tileUpdater) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
if len(newChanges) > 0 {
tu.cond.L.Lock()
for _, c := range newChanges {
tu.changes[c.quantize()] = true
tu.changes[c.quantize()] = struct{}{}
}
tu.cond.L.Unlock()
tu.cond.Signal()
@ -150,21 +158,42 @@ func (tu *tileUpdater) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.WriteHeader(http.StatusOK)
}
func extractChanges(changes map[xz]struct{}) []xzc {
chs := make([]xzc, len(changes))
var i int
for ch := range changes {
chs[i] = xzc{ch, false}
i++
}
return chs
}
func activeChanges(changes []xzc) []xz {
chs := make([]xz, 0, len(changes))
for i := range changes {
if !changes[i].canceled {
chs = append(chs, changes[i].xz)
}
}
return chs
}
func (tu *tileUpdater) doUpdates() {
bth := common.NewBaseTileHash(maxHashedTiles)
baseDir := filepath.Join(tu.mapDir, "8")
for {
var changes map[xz]bool
tu.cond.L.Lock()
for len(tu.changes) == 0 {
tu.cond.Wait()
}
changes = tu.changes
tu.changes = map[xz]bool{}
changes := extractChanges(tu.changes)
tu.changes = map[xz]struct{}{}
tu.cond.L.Unlock()
baseDir := filepath.Join(tu.mapDir, "8")
jobs := make(chan xz)
jobs := make(chan *xzc)
var done sync.WaitGroup
for i, n := 0, common.Min(tu.workers, len(changes)); i < n; i++ {
@ -177,22 +206,29 @@ func (tu *tileUpdater) doUpdates() {
btc := common.NewBaseTileCreator(
client, tu.colors, tu.bg,
tu.yMin, tu.yMax,
tu.transparent, baseDir, true)
tu.transparent, baseDir, bth.Update)
done.Add(1)
go tu.updateBaseTiles(jobs, btc, &done)
}
parentJobs := make(map[xz]uint16)
for c := range changes {
//log.Printf("job: %+v\n", c)
jobs <- c
pxz := c.parent()
parentJobs[pxz.P] |= pxz.Mask
for i := range changes {
jobs <- &changes[i]
}
close(jobs)
done.Wait()
actChs := activeChanges(changes)
if len(actChs) == 0 {
continue
}
parentJobs := make(map[xz]uint16)
for i := range actChs {
pxz := actChs[i].parent()
parentJobs[pxz.xz] |= pxz.Mask
}
for level := 7; level >= 0; level-- {
pJobs := make(chan xzm)
for i, n := 0, common.Min(len(parentJobs), tu.workers); i < n; i++ {
@ -201,9 +237,9 @@ func (tu *tileUpdater) doUpdates() {
}
ppJobs := make(map[xz]uint16)
for c, mask := range parentJobs {
pJobs <- xzm{P: c, Mask: mask}
pJobs <- xzm{c, mask}
pxz := c.parent()
ppJobs[pxz.P] |= pxz.Mask
ppJobs[pxz.xz] |= pxz.Mask
}
close(pJobs)
done.Wait()
@ -211,7 +247,7 @@ func (tu *tileUpdater) doUpdates() {
}
if tu.btu != nil {
tu.btu.BaseTilesUpdated(changes)
tu.btu.BaseTilesUpdated(actChs)
}
}
}
@ -257,8 +293,8 @@ func (tu *tileUpdater) updatePyramidTile(scratch, resized *image.RGBA, level int
origPath := filepath.Join(
tu.mapDir,
strconv.Itoa(level),
strconv.Itoa(int(j.P.X)),
strconv.Itoa(int(j.P.Z))+".png")
strconv.Itoa(int(j.X)),
strconv.Itoa(int(j.Z))+".png")
sr := resized.Bounds()
levelDir := strconv.Itoa(level + 1)
@ -266,7 +302,7 @@ func (tu *tileUpdater) updatePyramidTile(scratch, resized *image.RGBA, level int
if j.Mask&(1<<i) != 0 {
//log.Printf("level %d: modified %d\n", level, i)
o := ofs[i]
bx, bz := int(2*j.P.X), int(2*j.P.Z)
bx, bz := int(2*j.X), int(2*j.Z)
path := filepath.Join(
tu.mapDir,
levelDir,
@ -294,7 +330,7 @@ func (tu *tileUpdater) updatePyramidTile(scratch, resized *image.RGBA, level int
}
func (tu *tileUpdater) updateBaseTiles(
jobs chan xz,
jobs chan *xzc,
btc *common.BaseTileCreator, done *sync.WaitGroup) {
defer btc.Close()
@ -302,8 +338,12 @@ func (tu *tileUpdater) updateBaseTiles(
for job := range jobs {
xz := job.dequantize()
//log.Printf("%d/%d %d/%d", x, z, job.X, job.Z)
if err := btc.CreateTile(xz.X-1, xz.Z-1, int(job.X), int(job.Z)); err != nil {
updated, err := btc.CreateTile(xz.X-1, xz.Z-1, int(job.X), int(job.Z))
if err != nil {
log.Printf("WARN: create tile failed: %s\n", err)
}
if !updated {
job.canceled = true
}
}
}

View File

@ -49,6 +49,8 @@ var tileDepths = [...][2]int16{
var BackgroundColor = color.RGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}
type BaseTileUpdateFunc func(x, y int, hash []byte) bool
type BaseTileCreator struct {
client *RedisClient
colors *Colors
@ -57,7 +59,7 @@ type BaseTileCreator struct {
yMin int16
yMax int16
baseDir string
update bool
update BaseTileUpdateFunc
emptyImage []byte
bg color.RGBA
}
@ -69,7 +71,7 @@ func NewBaseTileCreator(
yMin, yMax int16,
transparent bool,
baseDir string,
update bool) *BaseTileCreator {
update BaseTileUpdateFunc) *BaseTileCreator {
renderer := NewRenderer(tileWidth, tileHeight, transparent)
yMin, yMax = Order16(yMin, yMax)
return &BaseTileCreator{
@ -88,7 +90,7 @@ func (btc *BaseTileCreator) Close() error {
return btc.client.Close()
}
func (btc *BaseTileCreator) CreateTile(x, z int16, i, j int) error {
func (btc *BaseTileCreator) CreateTile(x, z int16, i, j int) (bool, error) {
btc.renderer.Reset()
btc.renderer.SetPos(x, z)
btc.yOrder.Reset()
@ -128,7 +130,7 @@ func (btc *BaseTileCreator) CreateTile(x, z int16, i, j int) error {
c2.Z = area.Z2 + z
query := Cuboid{P1: c1, P2: c2}
if err := btc.client.QueryCuboid(query, drawBlock); err != nil {
return err
return false, err
}
if err := btc.yOrder.Drain(btc.colors); err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
@ -140,28 +142,34 @@ func (btc *BaseTileCreator) CreateTile(x, z int16, i, j int) error {
path := filepath.Join(btc.baseDir, strconv.Itoa(i), strconv.Itoa(j)+".png")
// Empty images are likely to be produced during seeding.
if !btc.update && btc.renderer.IsEmpty() {
if btc.update == nil && btc.renderer.IsEmpty() {
// To avoid redundant encoding cache the resulting empty image.
if btc.emptyImage == nil {
var err error
m := BackgroundImage((tileWidth-2)*16, (tileHeight-2)*16, btc.bg)
if btc.emptyImage, err = EncodeToMem(m); err != nil {
return err
return false, err
}
}
//log.Printf("Writing empty (%d, %d) to file %s\n", x, z, path)
return ioutil.WriteFile(path, btc.emptyImage, 0666)
return true, ioutil.WriteFile(path, btc.emptyImage, 0666)
}
image := btc.renderer.CreateShadedImage(
16, 16, (tileWidth-2)*16, (tileHeight-2)*16,
btc.colors, btc.bg)
log.Printf("Writing (%d, %d) to file %s\n", x, z, path)
if !btc.update {
return SaveAsPNG(path, image)
if btc.update == nil {
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
return true, SaveAsPNG(path, image)
}
return SaveAsPNGAtomic(path, image)
if btc.update(i, j, SHA1Image(image)) {
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
return true, SaveAsPNGAtomic(path, image)
}
log.Printf("(%d, %d) is unchanged.\n", x, z)
return false, nil
}

92
common/basetilehash.go Normal file
View File

@ -0,0 +1,92 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bytes"
"sync"
)
type btKey struct {
x int
y int
}
type btHashEntry struct {
prev *btHashEntry
next *btHashEntry
hash []byte
btKey
}
type BaseTileHash struct {
hashes map[btKey]*btHashEntry
maxEntries int
root btHashEntry
sync.Mutex
}
func NewBaseTileHash(maxEntries int) *BaseTileHash {
bth := &BaseTileHash{
hashes: map[btKey]*btHashEntry{},
maxEntries: maxEntries}
bth.root.next = &bth.root
bth.root.prev = &bth.root
return bth
}
func (bth *BaseTileHash) toFront(entry *btHashEntry) {
if bth.root.next == entry {
return
}
entry.prev.next = entry.next
entry.next.prev = entry.prev
entry.next = bth.root.next
entry.prev = &bth.root
bth.root.next.prev = entry
bth.root.next = entry
}
func (bth *BaseTileHash) removeLast() *btHashEntry {
last := bth.root.prev
bth.root.prev = last.prev
last.prev.next = &bth.root
delete(bth.hashes, last.btKey)
return last
}
func (bth *BaseTileHash) insertFront(entry *btHashEntry) {
entry.next = bth.root.next
entry.prev = &bth.root
bth.root.next.prev = entry
bth.root.next = entry
}
func (bth *BaseTileHash) Update(x, y int, hash []byte) bool {
bth.Lock()
defer bth.Unlock()
key := btKey{x, y}
if old, found := bth.hashes[key]; found {
if !bytes.Equal(old.hash, hash) {
old.hash = hash
bth.toFront(old)
return true
}
return false
}
var entry *btHashEntry
if len(bth.hashes) >= bth.maxEntries {
entry = bth.removeLast()
} else {
entry = new(btHashEntry)
}
entry.btKey = key
entry.hash = hash
bth.hashes[key] = entry
bth.insertFront(entry)
return true
}

147
common/basetilehash_test.go Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"math/rand"
"testing"
)
func randomBaseTileHash(updates int) *BaseTileHash {
bth := NewBaseTileHash(256)
h1 := []byte{1}
h2 := []byte{2}
for i := 0; i < updates; i++ {
x, y := rand.Intn(100), rand.Intn(100)
var h []byte
if i%2 == 0 {
h = h1
} else {
h = h2
}
bth.Update(x, y, h)
}
return bth
}
func TestBaseTileHashLenList(t *testing.T) {
for _, updates := range []int{53, 111, 1345, 11261} {
bth := randomBaseTileHash(updates)
countNext := 0
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
countNext++
}
countPrev := 0
for cur := bth.root.prev; cur != &bth.root; cur = cur.prev {
countPrev++
}
if countPrev != countNext {
t.Errorf("count prev != count next: %d %d", countPrev, countNext)
}
if countPrev != len(bth.hashes) {
t.Errorf("count prev != len(hash): %d %d", countPrev, len(bth.hashes))
}
}
}
func TestBaseTileHashIntegrity(t *testing.T) {
for _, updates := range []int{10, 100, 1000, 10000} {
bth := randomBaseTileHash(updates)
entries := map[*btHashEntry]bool{}
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
if entries[cur] {
t.Errorf("hash element found more than once: %d", updates)
}
entries[cur] = true
}
if len(entries) != len(bth.hashes) {
t.Errorf("List has differnt length than hashes: %d : %d",
len(entries), len(bth.hashes))
}
var already1 bool
var already2 bool
for k, v := range bth.hashes {
if !entries[v] {
if !already1 {
already1 = true
t.Errorf("Hash contains pointer to element not being in list: %d",
updates)
}
}
if k != v.btKey {
if !already2 {
already2 = true
t.Errorf("Key in entry does not match hash key: %d", updates)
}
}
delete(entries, v)
}
if len(entries) > 0 {
t.Error("There are more entries than indexed by hash")
}
}
}
func TestBaseTileHashOverwrite(t *testing.T) {
bth := NewBaseTileHash(256)
h1 := []byte{1}
h2 := []byte{2}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 0, h2); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(0, 0, h2); updated {
t.Error("Third insert does trigger update")
}
}
func TestBaseTileHashSeparate(t *testing.T) {
bth := NewBaseTileHash(256)
h1 := []byte{1}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 1, h1); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(1, 0, h1); !updated {
t.Error("Third insert does trigger update")
}
if len(bth.hashes) != 3 {
t.Errorf("Expected size to be 3. Current size: %d", len(bth.hashes))
}
}
func TestBaseTileHashLRU(t *testing.T) {
bth := NewBaseTileHash(2)
h1 := []byte{1}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 1, h1); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(1, 0, h1); !updated {
t.Error("Third insert does trigger update")
}
if len(bth.hashes) != 2 {
t.Errorf("Expected size to be 2. Current size: %d", len(bth.hashes))
}
}

View File

@ -7,6 +7,7 @@ package common
import (
"bufio"
"bytes"
"crypto/sha1"
"errors"
"image"
"image/color"
@ -44,7 +45,7 @@ func nextSuffix() string {
func EncodeToMem(img image.Image) ([]byte, error) {
var buf bytes.Buffer
enc := png.Encoder{png.BestCompression}
enc := png.Encoder{CompressionLevel: png.BestCompression}
if err := enc.Encode(&buf, img); err != nil {
return nil, err
}
@ -110,3 +111,17 @@ func LoadPNG(path string, bg color.RGBA) image.Image {
}
return img
}
func SHA1Image(img *image.RGBA) []byte {
hash := sha1.New()
w, h := img.Rect.Dx()*4, img.Rect.Dy()
pos := img.PixOffset(img.Rect.Min.X, img.Rect.Min.Y)
for ; h > 0; h, pos = h-1, pos+img.Stride {
hash.Write(img.Pix[pos : pos+w])
}
return hash.Sum(nil)
}