Remplissage du dépôt

This commit is contained in:
sys4-fr 2018-12-13 21:09:02 +01:00
commit 6af26530ca
71 changed files with 12648 additions and 0 deletions

6
.hg_archival.txt Normal file
View File

@ -0,0 +1,6 @@
repo: 39aff4ef3fa7660b6691182214aad98569799a29
node: e1bf980a2b278c570b3f44f9452c9c087558acb3
branch: default
latesttag: 0.9.1
latesttagdistance: 4
changessincelatesttag: 5

16
.hgtags Normal file
View File

@ -0,0 +1,16 @@
fa8009dd8a8440d9c3532f5a80b562bd3be91705 0.1
55265226a98d4c490e79ba04d7de4865884bcf12 0.2
f1f4e58c816deadd290f329ae7c1f8d08293ff88 0.3
ec35ba11a14075f060ec7f9980fdfdf4ea7e8f78 0.4
0129ff78168003e22a84f542f6f002b03f526357 0.5
0c0f125037d1bcd3add61f5dcf98e6978a3de853 0.6
02774f29e446865d89c703dd0b1a4892489e4e8f 0.7
7251b84c03938a45b340bbc8fd12a58ff4d67d04 0.8
c78c9905d39aeb84643910f443c55035da749da9 0.8.1
c78c9905d39aeb84643910f443c55035da749da9 0.8.1
0000000000000000000000000000000000000000 0.8.1
0000000000000000000000000000000000000000 0.8.1
46182c5134851958b64d9ad3305ec97a381a30c7 0.8.1
404e84ee8dbfb2a0a318133312a9913d8c07a611 0.8.2
032192ac6db5a5fd0dbc9c9f23cef0483890298d 0.9
877ad82f1b88a2ea95d17299dae9c06880bf20de 0.9.1

8
3rdpartylibs.sh Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
# Currently used third party libraries
go get -u -v golang.org/x/crypto/blake2b
go get -u -v github.com/bamiaux/rez
go get -u -v github.com/jmhodges/levigo
go get -u -v github.com/mattn/go-sqlite3
go get -u -v github.com/gorilla/mux
go get -u -v github.com/gorilla/websocket

42
COMPILE.md Normal file
View File

@ -0,0 +1,42 @@
#HOWTO compile MTSatellite
To build MTSatellite a [Go](http://golang.org) compiler 1.4 or better is needed.
Currently this is only tested on Debian Wheezy, Debian Jessie,
Ubuntu Ubuntu Trusty Thar (14.04) and newer. Other flavors
of GNU/Linux should work, too. Mac OS X may work. Problems with MS Windows
are expected.
A quick and dirty way to produce the binaries of `mtdbconverter`,
`mtredisalize`, `mtseeder` and `mtwebmapper`:
# Assuming you have a 64bit GNU/Linux system. For other systems take
# the corresponding version from https://golang.org/dl/
$ wget https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz
$ echo "b5a64335f1490277b585832d1f6c7f8c6c11206cba5cd3f771dcb87b98ad1a33 go1.10.linux-amd64.tar.gz" | sha256sum -c -
$ tar xf go1.10.linux-amd64.tar.gz
$ mkdir -p gopath/{pkg,bin,src}
$ export GOROOT=`pwd`/go
$ export GOPATH=`pwd`/gopath
$ export PATH=$GOROOT/bin:$GOPATH/bin:$PATH
# On Debian Wheezy you have to install the LevelDB dev from Backports.
$ sudo apt-get install libleveldb-dev
$ go get -u bitbucket.org/s_l_teichmann/mtsatellite/cmd/mtdbconverter
$ go get -u bitbucket.org/s_l_teichmann/mtsatellite/cmd/mtredisalize
$ go get -u bitbucket.org/s_l_teichmann/mtsatellite/cmd/mtseeder
$ go get -u bitbucket.org/s_l_teichmann/mtsatellite/cmd/mtwebmapper
$ ls $GOPATH/bin
mtdbconverter mtredisalize mtseeder mtwebmapper

21
LICENSE Normal file
View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2014 Sascha L. Teichmann
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

42
README.md Normal file
View File

@ -0,0 +1,42 @@
# MTSatellite
MTSatellite is a "realtime" web mapping system for [Minetest](http://minetest.net) worlds.
With this system you can play your world and you instantly have an online map of it which
can be shared on the web.
To get a glimpse what it does watch [Realtime Webmapping for Minetest worlds](http://youtu.be/iYEROGPj7RI)
on YouTube.
A live map of an online world can be viewed [here](http://maps.mt.sha-bang.de/).
See [COMPILE](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/COMPILE.md) how to compile
MTSatellite. Essentially you need Go 1.4 (or higher) and a GNU/Linux system.
See [SETUP](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/SETUP.md) how to bring
MTSatellite to life.
To use it you have to configure your Minetest server to use the Redis backend data store.
Instead of a real Redis server you have to run **mtredisalize**. The data itself is not stored in a Redis server. It only uses the Redis protocol to communicate with the MT server. The data
is stored in an optimized form in a LevelDB or SQLite3 database. Changes made to the world are collected over a configurable amount
of time periodically. After a cycle of collection the web server **mtwebmapper** is triggered
to regenerate the map. To not fully regenerate the whole world map but only the differences
the map has to be pre-rendered with **mtseeder**.
* [mtredisalize](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/cmd/mtredisalize) is a server which has to be configured as a Redis backend to the
Minetest server.
* [mtwebmapper](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/cmd/mtwebmapper/) Web server that serves [Leaflet](http://leafletjs.com) compatible tiles to a
web browser. Running in the background this server updates the pre-computed map tiles.
* [mtdbconverter](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/cmd/mtdbconverter) is converter between Minetest databases. mtredisalize works
best if you use a special interleaved LevelDB key schema. To bring your old database (LevelBD or SQLite3)
into this format this tool does the job. It can also be used to convert the interleaved database
back to a plain LevelDB or SQLite3 one.
* [mtseeder](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/cmd/mtseeder) pre-computes all tiles of the map of the world plus a set of pyramidal overview images.
Even playing in a dynamic and fully changeable world most of the data is pretty constant over time. Therefore
it makes sense to do only small updates on pre-calculated images instead of generating
the map entirely on the fly.
This is Free Software under the terms of the MIT license.
See [LICENSE](LICENSE) file for details.
(c) 2014 by Sascha L. Teichmann

196
SETUP.md Normal file
View File

@ -0,0 +1,196 @@
# SETUP MTSatellite
You will need a Minetest server with Redis support compiled in. Consult the Minetest documentation to figure out how to get such build.
Furthermore you need the binaries `mtdbconverter`, `mtseeder`, `mtredisalize` and `mtwebmapper` in your **PATH**.
Consult [COMPILE](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/COMPILE.md) how to build these.
Setting up MTSatellite takes six steps:
1. [Backup your world](#markdown-header-backup-your-world)
2. [Convert world database into interleaved format](#markdown-header-convert-world-database-into-interleaved-format)
3. [Start `mtredisalize`](#markdown-header-start-mtredisalize)
4. [Pre-compute the map tiles with `mtseeder`](#markdown-header-pre-compute-the-map-tiles-with-mtseeder)
5. [Start the web server `mtwebmapper`](#markdown-header-start-the-web-server-mtwebmapper)
6. [Configure and restart the Minetest server](#markdown-header-configure-and-restart-the-minetest-server)
Experimental: Optionally you can [enable on map tracking of logged in players](#markdown-header-enable-on-map-tracking-of-logged-in-players).
## Backup your world
Stop your running Minetest server and make a backup of your world
before you will start crying.
## Convert world database into interleaved format
MTSatellite operates best if the block data of the world is stored in a LevelDB database with
a key scheme called interleaved. With this key scheme you can pick up sets of neighbored blocks a
lot quicker than with a plain database.
See [Z-order curve](http://en.wikipedia.org/wiki/Z-order_curve) at Wikipedia to grasp the core ideas.
MTSatellite can run on plain LevelDB or SQLite3 world databases but with slightly reduced performance.
This should work but to our knowledge it is not used in productive setups.
Stay with the interleaved format!
To convert your original plain SQLite3 or LevelDB database (Redis is not supported atm) to the interleaved
LevelDB format you have to use `mtdbconverter`:
mtdbconverter -source-backend=sqlite /path/to/your/world/map.sqlite /path/to/your/world/map.db
Depending on the size of your world and the speed of your computer system this conversion will take some time.
Change `-source-backend=sqlite` to `-source-backend=leveldb` if your world is stored as a LevelDB.
`mtdbconverter` can also be used to convert your world back to the plain key scheme.
Use `mtdbconverter --help` to see all options.
You can skip the conversion if you want to use a plain database.
## Start mtredisalize
`mtredisalize` is the component which serves the block data to Minetest and `mtwebmapper` as a Redis
look-alike server. Start it with:
mtredisalize \
-host=localhost \
-interleaved=true \
-change-url=http://localhost:8808/update \
-change-duration=10s \
/path/to/your/world/map.db
This binds the server to localhost port 6379 the default Redis port. You can shange it with the `-port=` option.
The `-interleaved=true` option is **mandatory** if you use the interleaved format of the database. Forgetting it
will end up in the crying mentioned above. Set this flag to `false` if you are using a plain database.
The `-change-url=` option is a forward reference to the `mtwebmapper` server which will be notified if the
world has changed. If it is not configured the tile re-generation is not triggered. As long as the Minetest server
is down there will be no changes and therefore it is safe to configure it even if the `mtwebmapper` service is not
running.
The `-change-duration=` option specifies the amount of time how long the `mtredisalize` server should aggregate
changes made to the world before reporting them to `mtwebmapper`. It defaults to 30 seconds but the value can
be increased or decreased depending how often you want to update the map. Decreasing it will increase the
computing pressure on your system so configure it wisely.
## Pre-compute the map tiles with mtseeder
Even in a dynamical Mintest world played with many players most of the data is static over time. To generate
a basic map to apply only changes to use `mtseeder`:
GOMAXPROCS=6 mtseeder \
-colors=/path/to/your/colors.txt \
-output-dir=/path/to/your/map \
-workers=3
This contacts the `mtredisalize` server running at localhost port 6379 to fetch the block data from. You will
need a `colors.txt` to map the block nodes to pixel colors of your map. The repository contains a
[prefabricated](https://bitbucket.org/s_l_teichmann/mtsatellite/raw/default/colors.txt) or you can create
an adjusted one fitting your server with [mtautocolors](https://bitbucket.org/s_l_teichmann/mtautocolors).
If you want to have certain nodes to be transparent you can add `-transparent=true` to the
options. In this case if a color from colors.txt does have a forth color component the numerical
value between 0 (fully transparent) and 255 (fully opaque) will be the base transparency of the
pixel. Every depth meter of the same material will reduce the transparency by 2%. This can be adjusted
with the `-transparent-dim=percent` flags.
See `mtseeder --help` for all options.
The `-workers=` option and the `GOMAXPROCS=` environment variable are completely optional but very useful
to exploit multiple processor cores on your machine. Set `GOMAXPROCS=` to the result of `nproc` and `-workers=`
to a number a little lesser. You have to experiment with this to find a good setting.
Even with good CPU usage generating the map and overview image tiles take a while.
Tip: A lot of the Minetest map tiles are white/empty but are saved as dupes in the file system. To
deduplicate them you can use e.g. [hardlink](https://bitbucket.org/s_l_teichmann/hardlink). You
can also run it as a nightly cron job to dedupe the map on a regular basis.
## Start the web server mtwebmapper
This web server serves the Leaflet compatibles to the browser and is contacted by `mtredisalize`
if something in the world has changed. In this case the corresponding map tiles are re-generated
in the background. To start `mtwebmapper` use:
GOMAXPROCS=3 mtwebmapper \
-colors=/path/to/your/colors.txt \
-web-host="" \
-map=/path/to/your/map \
-web=/path/to/your/static/web \
-redis-host=localhost \
-workers=2 \
-websockets=false
For the `colors=` options applys the same as said above. You can also add
`-transparent=true` for transparency as mentioned above. The `web-host=` is the interface the
server ist listening on. `""` means all interfaces. The port defaults to 8808.
For a productive setup you may consider running it behind a reverse proxy.
`-map=` has to be the same path as used by `mtseeder`.
`-web=` is the path to the static web data (Leaflet, HTML, CSS, etc.). You can take it
from the [repository](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/cmd/mtwebmapper/web/)
To fetch the block data from the `mtredisalize` you have to use the option `redis-host=`. If
you omit this then there will be no background job to re-generate the map. This is useful
if you want to serve a map that is only generated once whith `mtseeder`.
To see all the available options use `mtwebmapper --help`.
The `GOMAXPROCS=`/`-workers=` setting has to be adjusted to your system capacity. Do not
give to much ressources to this if you planning to run the mapping webserver on the
same machine as the Minetest server. On the other hand assigning more cores to it definitely
helps to boost up the performance.
Setting the `-websockets=true` flag enables websocket support for the server. With this
feature turned on and changing the line (in `web/index.html`) from
var useWebsocket = false; // Set to true if you want websocket support
to
var useWebsocket = true; // Set to true if you want websocket support
the web client gets an extra 'auto update' button. When switched on the server
informs the client if something in the maps has changed. The displayed map will
then update automatically without the need of manual pressing the 'update view'
button. Of cause your browser needs Websocket support, too.
## Configure and restart the Minetest server
Now everything is in place and the only thing left ist to re-configure the Minetest server
itself. You have to open your `/path/to/your/world.mt` file in your text editor and replace the
backend with a Redis configuration:
backend = redis
redis_hash = IGNORED
redis_address = localhost
You may have to set `redis_port` too if you run `mtredisalize` not on port 6379.
Now we are all done and you can fire your Minetest server up again. :-)
## Enable on map tracking of logged in players
MTSatellite can display logged in players on the map.
This is an experimental feature and its only confirmed working on GNU/Linux systems.
OS X and \*BSD should work, too.
To use it install the [track_players](https://bitbucket.org/s_l_teichmann/mtsatellite/src/default/mods/track_players)
mod. Simple add a checkout to your mods folder and activate it in your world.mt file.
...
load_mod_track_players = true
...
This minetest mod writes players position to a [named pipe aka FIFO](http://en.wikipedia.org/wiki/Named_pipe).
`mtwebmapper` is able to read from this file and serve these positions as GeoJSON to the browser.
The FIFO has to be created _before_ the start of the minetest server.
$ mkfifo /tmp/mt_players_fifo
The path to the FIFO can be changed in track_players/init.lua
...
local fifo_path = "/tmp/mt_players_fifo"
...
To use the feature in `mtwebmapper` add the argument `-players=/tmp/mt_players_fifo` to the list
of command line arguments.
*Caution*: Please start `mtwebmapper` before the minetest server! Caused by the nature of FIFOs and the
single threaded execution of minetest mods the minetest server will block if there is no consumer
reading the player positions.
The player tracking is well integrated with the websocket support. If you enable websockets you will
be able to see the players moving on the map.

6
TODO Normal file
View File

@ -0,0 +1,6 @@
- Document all the new commands (in Markdown)
- Rename the project to 'MTSatellite' as mtredisalize is only one component
and its all about mapping now.
- mtredisalize: Check if the mutex stuff in the LevelDB is really needed.
LevelDB has some threadi-safety already.
- mtredisalize: Write some docs about interleaving backends.

View File

@ -0,0 +1,145 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"os"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
leveldb "github.com/jmhodges/levigo"
)
type (
// LevelDBBlockProducer is a helper to fetch blocks from a LevelDB.
LevelDBBlockProducer struct {
db *leveldb.DB
opts *leveldb.Options
ro *leveldb.ReadOptions
iterator *leveldb.Iterator
splitter common.KeySplitter
decoder common.KeyDecoder
}
// LevelDBBlockConsumer is a helper to store blocks in a LevelDB.
LevelDBBlockConsumer struct {
db *leveldb.DB
opts *leveldb.Options
wo *leveldb.WriteOptions
joiner common.KeyJoiner
encoder common.KeyEncoder
}
)
// NewLevelDBBlockProducer returns a new helper to fetch blocks from a LevelDB.
func NewLevelDBBlockProducer(path string,
splitter common.KeySplitter,
decoder common.KeyDecoder) (ldbp *LevelDBBlockProducer, err error) {
// check if we can stat it -> exists.
if _, err = os.Stat(path); err != nil {
return
}
opts := leveldb.NewOptions()
opts.SetCreateIfMissing(false)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
opts.Close()
return
}
ro := leveldb.NewReadOptions()
ro.SetFillCache(false)
iterator := db.NewIterator(ro)
iterator.SeekToFirst()
ldbp = &LevelDBBlockProducer{
db: db,
opts: opts,
ro: ro,
iterator: iterator,
splitter: splitter,
decoder: decoder}
return
}
// Close closes a helper to fetch blocks from a LevelDB.
func (ldbp *LevelDBBlockProducer) Close() error {
if ldbp.iterator != nil {
ldbp.iterator.Close()
}
ldbp.ro.Close()
ldbp.db.Close()
ldbp.opts.Close()
return nil
}
// Next fetches the next block from a LevelDB.
func (ldbp *LevelDBBlockProducer) Next(block *common.Block) (err error) {
if ldbp.iterator == nil {
err = common.ErrNoMoreBlocks
return
}
if !ldbp.iterator.Valid() {
if err = ldbp.iterator.GetError(); err == nil {
err = common.ErrNoMoreBlocks
}
ldbp.iterator.Close()
ldbp.iterator = nil
return
}
var key int64
if key, err = ldbp.decoder(ldbp.iterator.Key()); err != nil {
return
}
block.Coord = ldbp.splitter(key)
block.Data = ldbp.iterator.Value()
ldbp.iterator.Next()
return
}
// NewLevelDBBlockConsumer returns a new helper to store blocks in a LevelDB.
func NewLevelDBBlockConsumer(
path string,
joiner common.KeyJoiner,
encoder common.KeyEncoder) (ldbc *LevelDBBlockConsumer, err error) {
opts := leveldb.NewOptions()
opts.SetCreateIfMissing(true)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
return
}
ldbc = &LevelDBBlockConsumer{
db: db,
opts: opts,
wo: leveldb.NewWriteOptions(),
joiner: joiner,
encoder: encoder}
return
}
// Close closes a helper to store blocks in a LevelDB.
func (ldbc *LevelDBBlockConsumer) Close() error {
ldbc.wo.Close()
ldbc.db.Close()
ldbc.opts.Close()
return nil
}
// Consume stores a block in LevelDB.
func (ldbc *LevelDBBlockConsumer) Consume(block *common.Block) (err error) {
var encodedKey []byte
if encodedKey, err = ldbc.encoder(ldbc.joiner(block.Coord)); err != nil {
return
}
err = ldbc.db.Put(ldbc.wo, encodedKey, block.Data)
return
}

175
cmd/mtdbconverter/main.go Normal file
View File

@ -0,0 +1,175 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"os"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func usage() {
fmt.Fprintf(os.Stderr,
"Usage: %s [<options>] <source database> <dest database>\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
func selectKeySplitter(interleaved bool) common.KeySplitter {
if interleaved {
return common.InterleavedToCoord
}
return common.PlainToCoord
}
func selectKeyJoiner(interleaved bool) common.KeyJoiner {
if interleaved {
return common.CoordToInterleaved
}
return common.CoordToPlain
}
func selectKeyDecoder(interleaved bool) common.KeyDecoder {
if interleaved {
return common.DecodeFromBigEndian
}
return common.DecodeStringFromBytes
}
func selectKeyEncoder(interleaved bool) common.KeyEncoder {
if interleaved {
return common.EncodeToBigEndian
}
return common.EncodeStringToBytes
}
func copyProducerToConsumer(producer common.BlockProducer, consumer common.BlockConsumer) error {
blocks := make(chan *common.Block)
done := make(chan struct{})
defer close(done)
pool := sync.Pool{New: func() interface{} { return new(common.Block) }}
go func() {
defer close(blocks)
for {
block := pool.Get().(*common.Block)
if err := producer.Next(block); err != nil {
if err != common.ErrNoMoreBlocks {
log.Printf("Reading failed: %s\n", err)
}
return
}
select {
case blocks <- block:
case <-done:
return
}
}
}()
i := 0
for block := range blocks {
if err := consumer.Consume(block); err != nil {
return err
}
block.Data = nil
pool.Put(block)
i++
if i%1000 == 0 {
log.Printf("%d blocks transferred.\n", i)
}
}
log.Printf("%d blocks transferred in total.\n", i)
return nil
}
func main() {
var (
srcBackend string
dstBackend string
srcInterleaved bool
dstInterleaved bool
version bool
)
flag.Usage = usage
flag.StringVar(&srcBackend, "source-backend", "sqlite",
"type of source database (leveldb, sqlite)")
flag.StringVar(&srcBackend, "sb", "sqlite",
"type of source database (leveldb, sqlite). Shorthand")
flag.StringVar(&dstBackend, "dest-backend", "leveldb",
"type of destination database (leveldb, sqlite)")
flag.StringVar(&dstBackend, "db", "leveldb",
"type of destination database (leveldb, sqlite). Shorthand")
flag.BoolVar(&srcInterleaved, "source-interleaved", false,
"Is source database interleaved?")
flag.BoolVar(&srcInterleaved, "si", false,
"Is source database interleaved? Shorthand")
flag.BoolVar(&dstInterleaved, "dest-interleaved", true,
"Should dest database be interleaved?")
flag.BoolVar(&dstInterleaved, "di", true,
"Should source database be interleaved? Shorthand")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
if flag.NArg() < 2 {
log.Fatal("Missing source and/or destination database.")
}
var (
producer common.BlockProducer
consumer common.BlockConsumer
err error
)
if srcBackend == "sqlite" {
if producer, err = NewSQLiteBlockProducer(
flag.Arg(0),
selectKeySplitter(srcInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(0), err)
}
} else { // LevelDB
if producer, err = NewLevelDBBlockProducer(
flag.Arg(0),
selectKeySplitter(srcInterleaved),
selectKeyDecoder(srcInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(0), err)
}
}
defer producer.Close()
if dstBackend == "sqlite" {
if consumer, err = NewSQLiteBlockConsumer(
flag.Arg(1),
selectKeyJoiner(dstInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(1), err)
}
} else { // LevelDB
if consumer, err = NewLevelDBBlockConsumer(
flag.Arg(1),
selectKeyJoiner(dstInterleaved),
selectKeyEncoder(dstInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(1), err)
}
}
defer consumer.Close()
if err = copyProducerToConsumer(producer, consumer); err != nil {
log.Fatalf("Database transfer failed: %s\n", err)
}
}

185
cmd/mtdbconverter/sqlite.go Normal file
View File

@ -0,0 +1,185 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"database/sql"
"errors"
"os"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
_ "github.com/mattn/go-sqlite3"
)
const (
createSQL = "CREATE TABLE blocks (pos INT NOT NULL PRIMARY KEY, data BLOB)"
insertSQL = "INSERT INTO blocks (pos, data) VALUES (?, ?)"
deleteSQL = "DELETE FROM blocks"
selectSQL = "SELECT pos, data FROM blocks"
)
// ErrDatabaseNotExists indicates that the database does not exist.
var ErrDatabaseNotExists = errors.New("Database does not exists.")
const blocksPerTx = 128 // Number of blocks copied in a transaction.
type (
// SQLiteBlockProducer helps getting blocks from a SQLite database.
SQLiteBlockProducer struct {
db *sql.DB
rows *sql.Rows
splitter common.KeySplitter
}
// SQLiteBlockConsumer helps storing blocks into a SQLite database.
SQLiteBlockConsumer struct {
db *sql.DB
insertStmt *sql.Stmt
tx *sql.Tx
txCounter int
joiner common.KeyJoiner
}
)
func fileExists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// NewSQLiteBlockConsumer returns a storage helper for SQLite databases.
func NewSQLiteBlockConsumer(
path string,
joiner common.KeyJoiner) (sbc *SQLiteBlockConsumer, err error) {
createNew := !fileExists(path)
var db *sql.DB
if db, err = sql.Open("sqlite3", path); err != nil {
return
}
if createNew {
if _, err = db.Exec(createSQL); err != nil {
db.Close()
return
}
} else {
if _, err = db.Exec(deleteSQL); err != nil {
db.Close()
return
}
}
var insertStmt *sql.Stmt
if insertStmt, err = db.Prepare(insertSQL); err != nil {
db.Close()
return
}
var tx *sql.Tx
if tx, err = db.Begin(); err != nil {
insertStmt.Close()
db.Close()
return
}
sbc = &SQLiteBlockConsumer{
db: db,
insertStmt: insertStmt,
tx: tx,
joiner: joiner}
return
}
// Close closes a SQLite storage helper.
func (sbc *SQLiteBlockConsumer) Close() error {
sbc.tx.Commit()
sbc.insertStmt.Close()
return sbc.db.Close()
}
func (sbc *SQLiteBlockConsumer) getTx() (tx *sql.Tx, err error) {
if sbc.txCounter >= blocksPerTx {
sbc.txCounter = 0
if err = sbc.tx.Commit(); err != nil {
return
}
if sbc.tx, err = sbc.db.Begin(); err != nil {
return
}
}
sbc.txCounter++
tx = sbc.tx
return
}
// Consume stores a block in an SQLite database.
func (sbc *SQLiteBlockConsumer) Consume(block *common.Block) (err error) {
var tx *sql.Tx
if tx, err = sbc.getTx(); err != nil {
return
}
_, err = tx.Stmt(sbc.insertStmt).Exec(sbc.joiner(block.Coord), block.Data)
return
}
// NewSQLiteBlockProducer returns a new producer to fetch blocks from a
// SQLite database.
func NewSQLiteBlockProducer(
path string,
splitter common.KeySplitter) (sbp *SQLiteBlockProducer, err error) {
if !fileExists(path) {
err = ErrDatabaseNotExists
return
}
var db *sql.DB
if db, err = sql.Open("sqlite3", path); err != nil {
return
}
var rows *sql.Rows
if rows, err = db.Query(selectSQL); err != nil {
db.Close()
return
}
sbp = &SQLiteBlockProducer{
db: db,
rows: rows,
splitter: splitter}
return
}
// Next fetches the next block from a SQLite database.
func (sbp *SQLiteBlockProducer) Next(block *common.Block) (err error) {
if sbp.rows == nil {
err = common.ErrNoMoreBlocks
return
}
if sbp.rows.Next() {
var key int64
if err = sbp.rows.Scan(&key, &block.Data); err == nil {
block.Coord = sbp.splitter(key)
}
} else {
sbp.rows.Close()
sbp.rows = nil
err = common.ErrNoMoreBlocks
}
return
}
// Close closes a block producer from a SQLite database.
func (sbp *SQLiteBlockProducer) Close() error {
if sbp.rows != nil {
sbp.rows.Close()
}
return sbp.db.Close()
}

View File

@ -0,0 +1,45 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
type (
// Block is the essential transfer unit from to the database.
// Key is the serialized spatial position.
// Data is the serialized from of the corresponding block data.
Block struct {
Key []byte
Data []byte
}
// Session is a database session.
Session interface {
// Del deletes a block by a given key.
Del(hash, key []byte) (bool, error)
// Fetch fetches the block data for a given position.
Fetch(hash, key []byte) ([]byte, error)
// InTransaction returns true if a transaction is running.
InTransaction() bool
// Store stores a block with a given position and data.
Store(hash, key, value []byte) (bool, error)
// AllKeys returns all keys in the database.
AllKeys(hash []byte, done <-chan struct{}) (<-chan []byte, int, error)
// SpatialQuery performs a box query between the positions first and second.
SpatialQuery(hash, first, second []byte, done <-chan struct{}) (<-chan Block, error)
// BeginTransaction starts a transcation.
BeginTransaction() error
// CommitTransaction finishes a transaction.
CommitTransaction() error
// Close closes the database session.
Close() error
}
// Backend is the interface representing a database.
Backend interface {
// NewSession opens a new session.
NewSession() (Session, error)
// Shutdown shuts down the database server.
Shutdown() error
}
)

View File

@ -0,0 +1,77 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"sync"
"bytes"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
// Pull up if it _really_ produces too much data.
const quantizationFactor = 1
type quantizedXZ struct {
X, Z int16
}
type changeTracker struct {
changes map[quantizedXZ]struct{}
mutex sync.Mutex
}
func newChangeTracker() *changeTracker {
return &changeTracker{changes: make(map[quantizedXZ]struct{})}
}
func (ct *changeTracker) BlockChanged(coord common.Coord) {
ct.mutex.Lock()
ct.changes[quantizedXZ{
X: coord.X / quantizationFactor,
Z: coord.Z / quantizationFactor}] = struct{}{}
ct.mutex.Unlock()
}
func (ct *changeTracker) FlushChanges(url string) {
var oldChanges map[quantizedXZ]struct{}
ct.mutex.Lock()
if len(ct.changes) > 0 {
oldChanges = ct.changes
ct.changes = make(map[quantizedXZ]struct{})
}
ct.mutex.Unlock()
if oldChanges == nil {
return
}
go func() {
changes := make([]quantizedXZ, len(oldChanges))
i := 0
for change := range oldChanges {
changes[i] = change
i++
}
var err error
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err = encoder.Encode(changes); err != nil {
log.Printf("WARN: encode changes to JSON failed: %s\n", err)
return
}
var resp *http.Response
resp, err = http.Post(
url, "application/json", bytes.NewBuffer(buf.Bytes()))
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
log.Printf("WARN: posting changes to %s failed: %s\n", url, err)
}
}()
}

View File

@ -0,0 +1,241 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"log"
"net"
"strconv"
)
var (
redisOk = []byte("+OK\r\n")
redisPong = []byte("+PONG\r\n")
redisError = []byte("-ERR\r\n")
redisNoSuchBlock = []byte("$-1\r\n")
redisCrnl = []byte("\r\n")
redisEmptyArray = []byte("*0\r\n")
redisQueued = []byte("+QUEUED\r\n")
redisTrue = []byte(":1\r\n")
redisFalse = []byte(":0\r\n")
)
type Connection struct {
conn net.Conn
session Session
maxBulkStringSize int64
boolArray []bool
}
func NewConnection(conn net.Conn, session Session, maxBulkStringSize int64) *Connection {
return &Connection{
conn: conn,
session: session,
maxBulkStringSize: maxBulkStringSize,
boolArray: []bool{}}
}
func (c *Connection) Run() {
defer func() {
c.session.Close()
c.conn.Close()
}()
r := bufio.NewReaderSize(c.conn, 8*1024)
parser := NewRedisParser(r, c, c.maxBulkStringSize)
parser.Parse()
log.Println("client disconnected")
}
func logError(err error) bool {
if err != nil {
log.Printf("ERROR: %s\n", err)
return false
}
return true
}
func (c *Connection) Hdel(hash, key []byte) bool {
success, err := c.session.Del(hash, key)
if err != nil {
return c.writeError(err)
}
return c.writeBool(success)
}
func (c *Connection) Hget(hash, key []byte) bool {
var err error
var data []byte
if data, err = c.session.Fetch(hash, key); err != nil {
return c.writeError(err)
}
return c.writeBlock(data)
}
func (c *Connection) Hset(hash, key, data []byte) bool {
var err error
var exists bool
if exists, err = c.session.Store(hash, key, data); err != nil {
return c.writeError(err)
}
if c.session.InTransaction() {
c.boolArray = append(c.boolArray, exists)
return c.writeQueued()
}
return c.writeBool(exists)
}
func (c *Connection) Multi() bool {
if c.session.InTransaction() {
log.Println("WARN: Already running transaction.")
} else {
if err := c.session.BeginTransaction(); err != nil {
return c.writeError(err)
}
}
return c.writeOk()
}
func (c *Connection) Exec() bool {
if !c.session.InTransaction() {
return c.writeEmptyArray()
}
arr := c.boolArray
c.boolArray = []bool{}
if err := c.session.CommitTransaction(); err != nil {
return c.writeError(err)
}
return c.writeBoolArray(arr)
}
func (c *Connection) Hkeys(hash []byte) bool {
var (
err error
n int
keys <-chan []byte
done = make(chan struct{})
)
defer close(done)
if keys, n, err = c.session.AllKeys(hash, done); err != nil {
return c.writeError(err)
}
if n == 0 {
return c.writeEmptyArray()
}
if _, err := c.conn.Write(redisLength('*', n)); err != nil {
return logError(err)
}
for key := range keys {
if err = c.writeBulkString(key); err != nil {
return logError(err)
}
}
return true
}
func (c *Connection) Ping() bool {
return c.writeMessage(redisPong)
}
func (c *Connection) HSpatial(hash, first, second []byte) bool {
var (
err error
blocks <-chan Block
done = make(chan struct{})
)
defer close(done)
if blocks, err = c.session.SpatialQuery(hash, first, second, done); err != nil {
return c.writeError(err)
}
for block := range blocks {
if err = c.writeBulkString(block.Key); err != nil {
return logError(err)
}
if err = c.writeBulkString(block.Data); err != nil {
return logError(err)
}
}
return logError(c.writeBulkString(nil))
}
func (c *Connection) writeError(err error) bool {
logError(err)
return c.writeMessage(redisError)
}
func (c *Connection) writeEmptyArray() bool {
return c.writeMessage(redisEmptyArray)
}
func (c *Connection) writeBool(b bool) bool {
if b {
return c.writeMessage(redisTrue)
}
return c.writeMessage(redisFalse)
}
func redisLength(prefix byte, s int) []byte {
buf := append(make([]byte, 0, 16), prefix)
return append(strconv.AppendInt(buf, int64(s), 10), '\r', '\n')
}
func (c *Connection) writeBoolArray(arr []bool) bool {
if _, err := c.conn.Write(redisLength('*', len(arr))); err != nil {
return logError(err)
}
for _, b := range arr {
if !c.writeBool(b) {
return false
}
}
return true
}
func (c *Connection) writeMessage(msg []byte) bool {
_, err := c.conn.Write(msg)
return logError(err)
}
func (c *Connection) writeOk() bool {
return c.writeMessage(redisOk)
}
func (c *Connection) writeQueued() bool {
return c.writeMessage(redisQueued)
}
func (c *Connection) writeBlock(data []byte) bool {
return logError(c.writeBulkString(data))
}
func (c *Connection) writeBulkString(data []byte) (err error) {
con := c.conn
if data == nil {
_, err = con.Write(redisNoSuchBlock)
} else {
if _, err = con.Write(redisLength('$', len(data))); err != nil {
return
}
if _, err = con.Write(data); err != nil {
return
}
_, err = con.Write(redisCrnl)
}
return
}

429
cmd/mtredisalize/leveldb.go Normal file
View File

@ -0,0 +1,429 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"log"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
leveldb "github.com/jmhodges/levigo"
)
type LevelDBBackend struct {
cache *leveldb.Cache
db *leveldb.DB
interleaved bool
coverage *common.Coverage3D
encoder common.KeyTranscoder
decoder common.KeyTranscoder
changeTracker *changeTracker
}
type LevelDBSession struct {
backend *LevelDBBackend
tx *leveldb.WriteBatch
}
func NewLeveDBBackend(
path string,
changeTracker *changeTracker,
interleaved bool,
cacheSize int) (ldb *LevelDBBackend, err error) {
opts := leveldb.NewOptions()
var cache *leveldb.Cache
if cacheSize > 0 {
cache = leveldb.NewLRUCache(cacheSize * 1024 * 1024)
opts.SetCache(cache)
}
opts.SetCreateIfMissing(true)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
if cache != nil {
cache.Close()
}
return
}
var (
encoder common.KeyTranscoder
decoder common.KeyTranscoder
)
if interleaved {
encoder = common.TranscodeInterleavedToPlain
decoder = common.TranscodePlainToInterleaved
} else {
encoder = common.IdentityTranscoder
decoder = common.IdentityTranscoder
}
ldb = &LevelDBBackend{
cache: cache,
db: db,
interleaved: interleaved,
encoder: encoder,
decoder: decoder,
changeTracker: changeTracker,
}
if !interleaved {
if err = ldb.buildCoverage(); err != nil {
ldb.Shutdown()
ldb = nil
return
}
}
return
}
func (ldb *LevelDBBackend) buildCoverage() error {
log.Println("INFO: Start building coverage index (this may take some time)...")
coverage := common.NewCoverage3D()
ro := leveldb.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
it := ldb.db.NewIterator(ro)
it.SeekToFirst()
for ; it.Valid(); it.Next() {
c, err := common.DecodeStringBytesToCoord(it.Key())
if err != nil {
return err
}
coverage.Insert(c)
}
if err := it.GetError(); err != nil {
return err
}
ldb.coverage = coverage
log.Println("INFO: Finished building coverage index.")
return nil
}
func (ldb *LevelDBBackend) NewSession() (Session, error) {
return &LevelDBSession{ldb, nil}, nil
}
func (ldbs *LevelDBSession) Close() error {
if ldbs.tx != nil {
ldbs.tx.Close()
}
return nil
}
func (ldb *LevelDBBackend) Shutdown() error {
ldb.db.Close()
if ldb.cache != nil {
ldb.cache.Close()
}
return nil
}
func (ldbs *LevelDBSession) Del(hash, key []byte) (success bool, err error) {
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
ro := leveldb.NewReadOptions()
defer ro.Close()
var data []byte
data, err = ldbs.backend.db.Get(ro, key)
if err != nil {
return
}
if data == nil {
success = false
return
}
success = true
wo := leveldb.NewWriteOptions()
defer wo.Close()
err = ldbs.backend.db.Delete(wo, key)
return
}
func (ldbs *LevelDBSession) Fetch(hash, key []byte) (value []byte, err error) {
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
ro := leveldb.NewReadOptions()
value, err = ldbs.backend.db.Get(ro, key)
//if err != nil {
// log.Printf("Fetch key '%s' failed.\n", key)
//} else {
// log.Printf("Fetch key = '%s' len(value) = %d\n", key, len(value))
//}
ro.Close()
return
}
func (ldbs *LevelDBSession) InTransaction() bool {
return ldbs.tx != nil
}
func keyExists(db *leveldb.DB, key []byte) (exists bool, err error) {
ro := leveldb.NewReadOptions()
defer ro.Close()
var data []byte
if data, err = db.Get(ro, key); err != nil {
return
}
exists = data != nil
return
}
func (ldbs *LevelDBSession) Store(hash, key, value []byte) (exists bool, err error) {
origKey := key
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
if exists, err = keyExists(ldbs.backend.db, key); err != nil {
return
}
if ldbs.tx != nil {
ldbs.tx.Put(key, value)
} else {
wo := leveldb.NewWriteOptions()
err = ldbs.backend.db.Put(wo, key, value)
wo.Close()
if err != nil {
return
}
}
// This technically too early because this is done in a transactions
// which are commited (and possible fail) later.
if ldbs.backend.changeTracker != nil || ldbs.backend.coverage != nil {
c, err := common.DecodeStringBytesToCoord(origKey)
if err != nil {
return exists, err
}
if ldbs.backend.coverage != nil && !exists {
ldbs.backend.coverage.Insert(c)
}
if ldbs.backend.changeTracker != nil {
ldbs.backend.changeTracker.BlockChanged(c)
}
}
return
}
func (ldbs *LevelDBSession) BeginTransaction() error {
ldbs.tx = leveldb.NewWriteBatch()
return nil
}
func (ldbs *LevelDBSession) CommitTransaction() (err error) {
tx := ldbs.tx
if tx == nil {
log.Println("WARN: No transaction running.")
return
}
ldbs.tx = nil
wo := leveldb.NewWriteOptions()
wo.SetSync(true)
err = ldbs.backend.db.Write(wo, tx)
wo.Close()
tx.Close()
return
}
func (ldbs *LevelDBSession) AllKeys(
hash []byte,
done <-chan struct{}) (<-chan []byte, int, error) {
ro := leveldb.NewReadOptions()
ro.SetFillCache(false)
it := ldbs.backend.db.NewIterator(ro)
it.SeekToFirst()
var n int
for ; it.Valid(); it.Next() {
n++
}
if err := it.GetError(); err != nil {
it.Close()
ro.Close()
return nil, n, err
}
keys := make(chan []byte)
go func() {
defer ro.Close()
defer close(keys)
defer it.Close()
it.SeekToFirst()
encoder := ldbs.backend.encoder
for ; it.Valid(); it.Next() {
if key, err := encoder(it.Key()); err == nil {
select {
case keys <- key:
case <-done:
return
}
} else {
log.Printf("WARN: %s\n", err)
return
}
}
if err := it.GetError(); err != nil {
log.Printf("WARN: %s\n", err)
}
}()
return keys, n, nil
}
func (ldbs *LevelDBSession) SpatialQuery(
hash, first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
if ldbs.backend.interleaved {
return ldbs.interleavedSpatialQuery(first, second, done)
}
return ldbs.plainSpatialQuery(first, second, done)
}
func (ldbs *LevelDBSession) plainSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.PlainToCoord(firstKey)
c2 := common.PlainToCoord(secondKey)
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
go func() {
defer close(blocks)
ro := leveldb.NewReadOptions()
defer ro.Close()
var a, b common.Coord
for _, r := range ldbs.backend.coverage.Query(c1, c2) {
a.Z, b.Z = int16(r.Z), int16(r.Z)
a.X, b.X = int16(r.X1), int16(r.X2)
for a.Y = r.Y2; a.Y >= r.Y1; a.Y-- {
b.Y = a.Y
// The keys in the database are stored and ordered as strings
// "1", "10", ..., "19", "2", "20", "21" so you cannot use
// an iterator and assume it is numerical ordered.
// Each block is fetched with a Get instead.
for f, t := common.CoordToPlain(a), common.CoordToPlain(b); f <= t; f++ {
key := common.StringToBytes(f)
value, err := ldbs.backend.db.Get(ro, key)
if err != nil {
log.Printf("get failed: %s\n", err)
return
}
if value != nil {
select {
case blocks <- Block{Key: key, Data: value}:
case <-done:
return
}
}
}
}
}
}()
return blocks, nil
}
func (ldbs *LevelDBSession) interleavedSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.ClipCoord(common.PlainToCoord(firstKey))
c2 := common.ClipCoord(common.PlainToCoord(secondKey))
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
go func() {
defer close(blocks)
ro := leveldb.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
it := ldbs.backend.db.NewIterator(ro)
defer it.Close()
zmin, zmax := common.CoordToInterleaved(c1), common.CoordToInterleaved(c2)
// Should not be necessary.
zmin, zmax = common.Order64(zmin, zmax)
var (
cub = common.Cuboid{P1: c1, P2: c2}
err error
encodedKey []byte
)
//log.Printf("seeking to: %d\n", zmin)
it.Seek(common.ToBigEndian(zmin))
for it.Valid() {
zcode := common.FromBigEndian(it.Key())
if zcode > zmax {
break
}
if c := common.InterleavedToCoord(zcode); cub.Contains(c) {
if encodedKey, err = common.EncodeStringToBytes(common.CoordToPlain(c)); err != nil {
log.Printf("error encoding key: %s\n", err)
return
}
select {
case blocks <- Block{Key: encodedKey, Data: it.Value()}:
case <-done:
return
}
it.Next()
} else {
next := common.BigMin(zmin, zmax, zcode)
//log.Printf("seeking to: %d\n", next)
it.Seek(common.ToBigEndian(next))
//log.Printf("seeking done: %d\n", next)
}
}
//log.Println("iterating done")
if err = it.GetError(); err != nil {
log.Printf("error while iterating: %s\n", err)
return
}
}()
return blocks, nil
}

177
cmd/mtredisalize/main.go Normal file
View File

@ -0,0 +1,177 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"strings"
"time"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
const (
defaultMaxBulkStringSize = 32 * 1024 * 1024
defaultGCDuration = "24h"
defaultChangeDuration = "30s"
)
func usage() {
fmt.Fprintf(os.Stderr,
"Usage: %s [<options>] <database>\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
func main() {
var (
port int
host string
driver string
cacheSize int
version bool
interleaved bool
changeURL string
gcDuration string
changeDuration string
maxBulkStringSize int64
)
flag.Usage = usage
flag.IntVar(&port, "port", 6379, "port to bind")
flag.StringVar(&driver, "driver", "leveldb", "type of database (leveldb, sqlite)")
flag.StringVar(&host, "host", "", "host to bind")
flag.IntVar(&cacheSize, "cache", 32, "cache size in MB")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.BoolVar(&interleaved,
"interleaved", false, "Backend stores key in interleaved form.")
flag.StringVar(&gcDuration,
"gc-duration", defaultGCDuration, "Duration between forced GCs.")
flag.StringVar(&changeDuration,
"change-duration", defaultChangeDuration, "Duration to aggregate changes.")
flag.StringVar(&changeURL, "change-url", "", "URL to send changes to.")
flag.Int64Var(&maxBulkStringSize, "max-bulk-string-size", defaultMaxBulkStringSize,
"max size of a bulk string to be accepted as input (in bytes).")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
if flag.NArg() < 1 {
log.Fatal("Missing path to world")
}
var (
err error
backend Backend
gcDur time.Duration
chDur time.Duration
changeTracker *changeTracker
)
if gcDur, err = time.ParseDuration(gcDuration); err != nil {
log.Fatal(err)
}
// Setup the change listening stuff.
var changeChan <-chan time.Time
useChangeNotification := changeURL != ""
if useChangeNotification {
if chDur, err = time.ParseDuration(changeDuration); err != nil {
log.Fatal(err)
}
changeChan = time.Tick(chDur)
changeTracker = newChangeTracker()
} else {
// We will never receive ticks on this.
changeChan = make(<-chan time.Time)
}
path := flag.Arg(0)
if driver == "sqlite" {
if backend, err = NewSQLiteBackend(path, changeTracker, interleaved); err != nil {
log.Fatal(err)
}
} else {
if backend, err = NewLeveDBBackend(
path, changeTracker, interleaved, cacheSize); err != nil {
log.Fatal(err)
}
}
defer backend.Shutdown()
var listener net.Listener
var proto, address string
if strings.ContainsRune(host, '/') {
proto, address = "unix", host
} else {
proto, address = "tcp", fmt.Sprintf("%s:%d", host, port)
}
listener, err = net.Listen(proto, address)
if err != nil {
log.Fatal(err)
}
defer listener.Close()
log.Printf("Server started at %s\n", listener.Addr())
connChan := make(chan net.Conn)
defer close(connChan)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, os.Kill)
go func() {
for {
conn, err := listener.Accept()
if err != nil {
log.Fatal(err)
}
log.Printf("Client accepted from: %s\n", conn.RemoteAddr())
connChan <- conn
}
}()
log.Printf("Doing garbage collection every: %s\n", gcDur)
gcChan := time.Tick(gcDur)
for {
select {
case conn := <-connChan:
var session Session
if session, err = backend.NewSession(); err != nil {
log.Printf("Cannot create session: %s\n", err)
conn.Close()
} else {
go NewConnection(conn, session, maxBulkStringSize).Run()
}
case <-sigChan:
log.Println("Shutting down")
return
case <-gcChan:
log.Println("Starting garbage collection.")
runtime.GC()
log.Println("Garbage collection done.")
case <-changeChan:
if changeTracker != nil {
changeTracker.FlushChanges(changeURL)
}
}
}
}

View File

@ -0,0 +1,273 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"strconv"
"strings"
)
type RedisCommands interface {
Hdel(hash, key []byte) bool
Hget(hash, key []byte) bool
Hset(hash, key, block []byte) bool
Multi() bool
Exec() bool
Hkeys(hash []byte) bool
HSpatial(hash, first, second []byte) bool
Ping() bool
}
type RedisParser struct {
reader *bufio.Reader
commands RedisCommands
missing int64
args []interface{}
maxBulkStringSize int64
}
func NewRedisParser(reader *bufio.Reader,
commands RedisCommands,
maxBulkStringSize int64) *RedisParser {
return &RedisParser{
reader: reader,
commands: commands,
maxBulkStringSize: maxBulkStringSize}
}
func (rp *RedisParser) Parse() {
for line := rp.nextLine(); line != nil && rp.dispatch(line); line = rp.nextLine() {
}
}
func (rp *RedisParser) nextLine() []byte {
line, err := rp.reader.ReadBytes('\n')
if err != nil {
if err != io.EOF {
rp.consumeError(err)
}
return nil
}
return bytes.TrimRight(line, "\r\n")
}
func (rp *RedisParser) dispatch(line []byte) bool {
if len(line) < 1 {
return false
}
switch line[0] {
case '-':
return true // ignore errors
case ':':
return rp.integer(line)
case '+':
return rp.simpleString(line)
case '$':
return rp.bulkString(line)
case '*':
return rp.array(line)
}
return true
}
func (rp *RedisParser) simpleString(line []byte) bool {
return rp.consumeSimpleString(string(line[1:]))
}
func (rp *RedisParser) integer(line []byte) bool {
i, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
return rp.consumeInteger(i)
}
func (rp *RedisParser) bulkString(line []byte) bool {
var i int64
var err error
i, err = strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
switch {
case i < 0:
return rp.consumeBulkString(nil)
case i == 0:
return rp.consumeBulkString([]byte{})
default:
if i > rp.maxBulkStringSize { // prevent denial of service.
return rp.consumeError(
fmt.Errorf("Bulk string too large (%d bytes).\n", i))
}
data := make([]byte, i)
for rest := i; rest > 0; {
var n int
if n, err = rp.reader.Read(data[i-rest : i]); err != nil {
return rp.consumeError(err)
}
rest -= int64(n)
}
if _, err = rp.reader.ReadBytes('\n'); err != nil {
return rp.consumeError(err)
}
return rp.consumeBulkString(data)
}
}
func (rp *RedisParser) array(line []byte) bool {
var i int64
var err error
i, err = strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
return rp.consumeArray(i)
}
func (rp *RedisParser) push(i interface{}) bool {
rp.args = append(rp.args, i)
rp.missing--
if rp.missing <= 0 {
rp.missing = 0
res := rp.execute()
rp.args = []interface{}{}
return res
}
return true
}
func asString(i interface{}) string {
switch v := i.(type) {
case string:
return v
case []byte:
return string(v)
}
return fmt.Sprintf("%s", i)
}
func (rp *RedisParser) execute() bool {
l := len(rp.args)
if l < 1 {
log.Println("WARN: Too less argument for command.")
return false
}
cmd := strings.ToUpper(asString(rp.args[0]))
switch cmd {
case "HDEL":
if l < 3 {
log.Println("WARN: Missing argments for HGET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
if !ok1 || !ok2 {
log.Println("WARN: HDEL data are not byte slices.")
return false
}
return rp.commands.Hdel(hash, key)
case "HGET":
if l < 3 {
log.Println("WARN: Missing argments for HGET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
if !ok1 || !ok2 {
log.Println("WARN: HGET data are not byte slices.")
return false
}
return rp.commands.Hget(hash, key)
case "HSET":
if l < 4 {
log.Println("WARN: Missing argments for HSET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
value, ok3 := rp.args[3].([]byte)
if !ok1 || !ok2 || !ok3 {
log.Println("WARN: HSET data are not byte slices.")
return false
}
return rp.commands.Hset(hash, key, value)
case "MULTI":
return rp.commands.Multi()
case "EXEC":
return rp.commands.Exec()
case "HKEYS":
if l < 2 {
log.Println("WARN: Missing argments for HKEYS.")
return false
}
hash, ok := rp.args[1].([]byte)
if !ok {
log.Println("WARN: HKEYS data are not byte slices.")
return false
}
return rp.commands.Hkeys(hash)
case "HSPATIAL":
if l < 4 {
log.Println("WARN: Missing argments for HSPATIAL.")
return false
}
hash, ok1 := rp.args[1].([]byte)
first, ok2 := rp.args[2].([]byte)
second, ok3 := rp.args[3].([]byte)
if !ok1 || !ok2 || !ok3 {
log.Println("WARN: HSPATIAL data are not byte slices.")
return false
}
return rp.commands.HSpatial(hash, first, second)
case "PING":
return rp.commands.Ping()
}
log.Printf("WARN: unknown command: '%s'\n", cmd)
return false
}
func (rp *RedisParser) consumeSimpleString(s string) bool {
return rp.push(s)
}
func (rp *RedisParser) consumeBulkString(data []byte) bool {
return rp.push(data)
}
func (rp *RedisParser) consumeInteger(i int64) bool {
return rp.push(i)
}
func (rp *RedisParser) consumeError(err error) bool {
log.Printf("error: %s\n", err)
return true
}
func (rp *RedisParser) consumeArray(i int64) bool {
if rp.missing > 0 {
log.Println("WARN: Nested arrays are not supported!")
return false
}
if i < 0 {
log.Println("Null arrays are not supported")
return false
}
rp.missing = i
return true
}

543
cmd/mtredisalize/sqlite.go Normal file
View File

@ -0,0 +1,543 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"database/sql"
"log"
"sync"
_ "github.com/mattn/go-sqlite3"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
var globalLock sync.RWMutex
const (
deleteSQL = "DELETE FROM blocks WHERE pos = ?"
fetchSQL = "SELECT data FROM blocks WHERE pos = ?"
existsSQL = "SELECT 1 FROM blocks WHERE pos = ?"
updateSQL = "UPDATE blocks SET data = ? WHERE pos = ?"
insertSQL = "INSERT INTO blocks (pos, data) VALUES (?, ?)"
countSQL = "SELECT count(*) FROM blocks"
keysSQL = "SELECT pos FROM blocks"
rangeSQL = "SELECT pos, data FROM blocks WHERE pos BETWEEN ? AND ? ORDER BY pos"
)
type SQLiteBackend struct {
db *sql.DB
encoder common.KeyEncoder
decoder common.KeyDecoder
changeTracker *changeTracker
interleaved bool
coverage *common.Coverage3D
existsStmt *sql.Stmt
deleteStmt *sql.Stmt
fetchStmt *sql.Stmt
insertStmt *sql.Stmt
updateStmt *sql.Stmt
countStmt *sql.Stmt
keysStmt *sql.Stmt
rangeStmt *sql.Stmt
}
type SQLiteSession struct {
backend *SQLiteBackend
tx *sql.Tx
}
func (sqlb *SQLiteBackend) NewSession() (Session, error) {
return &SQLiteSession{sqlb, nil}, nil
}
func (ss *SQLiteSession) Close() error {
t := ss.tx
if t != nil {
ss.tx = nil
return t.Rollback()
}
return nil
}
func NewSQLiteBackend(
path string,
changeTracker *changeTracker, interleaved bool) (sqlb *SQLiteBackend, err error) {
res := SQLiteBackend{interleaved: interleaved, changeTracker: changeTracker}
if res.db, err = sql.Open("sqlite3", path); err != nil {
return
}
if res.existsStmt, err = res.db.Prepare(existsSQL); err != nil {
res.closeAll()
return
}
if res.fetchStmt, err = res.db.Prepare(fetchSQL); err != nil {
res.closeAll()
return
}
if res.deleteStmt, err = res.db.Prepare(deleteSQL); err != nil {
res.closeAll()
return
}
if res.insertStmt, err = res.db.Prepare(insertSQL); err != nil {
res.closeAll()
return
}
if res.updateStmt, err = res.db.Prepare(updateSQL); err != nil {
res.closeAll()
return
}
if res.countStmt, err = res.db.Prepare(countSQL); err != nil {
res.closeAll()
return
}
if res.keysStmt, err = res.db.Prepare(keysSQL); err != nil {
res.closeAll()
return
}
if res.rangeStmt, err = res.db.Prepare(rangeSQL); err != nil {
res.closeAll()
return
}
if interleaved {
res.encoder = common.EncodeStringToBytesFromInterleaved
res.decoder = common.DecodeStringFromBytesToInterleaved
} else {
res.encoder = common.EncodeStringToBytes
res.decoder = common.DecodeStringFromBytes
}
if !interleaved {
if err = res.buildCoverage(); err != nil {
return
}
}
sqlb = &res
return
}
func (sqlb *SQLiteBackend) buildCoverage() (err error) {
log.Println("INFO: Start building coverage index (this may take some time)...")
sqlb.coverage = common.NewCoverage3D()
var rows *sql.Rows
if rows, err = sqlb.keysStmt.Query(); err != nil {
return
}
defer rows.Close()
for rows.Next() {
var key int64
if err = rows.Scan(&key); err != nil {
return
}
sqlb.coverage.Insert(common.PlainToCoord(key))
}
err = rows.Err()
log.Println("INFO: Finished building coverage index.")
return
}
func closeStmt(stmt **sql.Stmt) error {
s := *stmt
if s != nil {
*stmt = nil
return s.Close()
}
return nil
}
func closeDB(db **sql.DB) error {
d := *db
if d != nil {
*db = nil
return d.Close()
}
return nil
}
func (sqlb *SQLiteBackend) closeAll() error {
closeStmt(&sqlb.deleteStmt)
closeStmt(&sqlb.fetchStmt)
closeStmt(&sqlb.insertStmt)
closeStmt(&sqlb.updateStmt)
closeStmt(&sqlb.existsStmt)
closeStmt(&sqlb.countStmt)
closeStmt(&sqlb.keysStmt)
closeStmt(&sqlb.rangeStmt)
return closeDB(&sqlb.db)
}
func (sqlb *SQLiteBackend) Shutdown() error {
globalLock.Lock()
defer globalLock.Unlock()
return sqlb.closeAll()
}
func (ss *SQLiteSession) txStmt(stmt *sql.Stmt) *sql.Stmt {
if ss.tx != nil {
return ss.tx.Stmt(stmt)
}
return stmt
}
func (ss *SQLiteSession) Del(hash, key []byte) (success bool, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.Lock()
defer globalLock.Unlock()
existsStmt := ss.txStmt(ss.backend.existsStmt)
var x int
err2 := existsStmt.QueryRow(pos).Scan(&x)
if err2 == sql.ErrNoRows {
success = false
return
}
if err2 != nil {
err = err2
return
}
success = true
deleteStmt := ss.txStmt(ss.backend.deleteStmt)
_, err = deleteStmt.Exec(pos)
return
}
func (ss *SQLiteSession) Fetch(hash, key []byte) (data []byte, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.RLock()
defer globalLock.RUnlock()
fetchStmt := ss.txStmt(ss.backend.fetchStmt)
err2 := fetchStmt.QueryRow(pos).Scan(&data)
if err2 == sql.ErrNoRows {
return
}
err = err2
return
}
func (ss *SQLiteSession) InTransaction() bool {
return ss.tx != nil
}
func (ss *SQLiteSession) Store(hash, key, value []byte) (exists bool, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.Lock()
defer globalLock.Unlock()
existsStmt := ss.txStmt(ss.backend.existsStmt)
var x int
err2 := existsStmt.QueryRow(pos).Scan(&x)
if err2 == sql.ErrNoRows {
exists = false
} else if err2 != nil {
err = err2
return
} else {
exists = true
}
if exists {
updateStmt := ss.txStmt(ss.backend.updateStmt)
_, err = updateStmt.Exec(value, pos)
} else {
insertStmt := ss.txStmt(ss.backend.insertStmt)
_, err = insertStmt.Exec(pos, value)
}
if err != nil {
return
}
// This technically too early because this is done in a transactions
// which are commited (and possible fail) later.
if ss.backend.changeTracker != nil || ss.backend.coverage != nil {
c := common.PlainToCoord(pos)
if ss.backend.coverage != nil && !exists {
ss.backend.coverage.Insert(c)
}
if ss.backend.changeTracker != nil {
ss.backend.changeTracker.BlockChanged(c)
}
}
return
}
func (ss *SQLiteSession) BeginTransaction() (err error) {
if ss.tx != nil {
log.Println("WARN: Already running transaction.")
return nil
}
globalLock.Lock()
defer globalLock.Unlock()
ss.tx, err = ss.backend.db.Begin()
return
}
func (ss *SQLiteSession) CommitTransaction() error {
tx := ss.tx
if tx == nil {
log.Println("WARN: No transaction running.")
return nil
}
globalLock.Lock()
defer globalLock.Unlock()
ss.tx = nil
return tx.Commit()
}
func (ss *SQLiteSession) AllKeys(
hash []byte,
done <-chan struct{}) (<-chan []byte, int, error) {
globalLock.RLock()
countStmt := ss.txStmt(ss.backend.countStmt)
var n int
var err error
if err = countStmt.QueryRow().Scan(&n); err != nil {
if err == sql.ErrNoRows {
err = nil
}
globalLock.RUnlock()
return nil, n, err
}
keysStmt := ss.txStmt(ss.backend.keysStmt)
var rows *sql.Rows
if rows, err = keysStmt.Query(); err != nil {
globalLock.RUnlock()
return nil, n, err
}
keys := make(chan []byte)
go func() {
defer globalLock.RUnlock()
defer rows.Close()
defer close(keys)
var err error
for rows.Next() {
var key int64
if err = rows.Scan(&key); err != nil {
log.Printf("WARN: %s\n", err)
break
}
var encoded []byte
if encoded, err = ss.backend.encoder(key); err != nil {
log.Printf("Cannot encode key: %d %s\n", key, err)
break
}
select {
case keys <- encoded:
case <-done:
return
}
}
}()
return keys, n, nil
}
func (ss *SQLiteSession) SpatialQuery(
hash, first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
if ss.backend.interleaved {
return ss.interleavedSpatialQuery(first, second, done)
}
return ss.plainSpatialQuery(first, second, done)
}
func (ss *SQLiteSession) interleavedSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.ClipCoord(common.PlainToCoord(firstKey))
c2 := common.ClipCoord(common.PlainToCoord(secondKey))
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
globalLock.RLock()
go func() {
defer close(blocks)
defer globalLock.RUnlock()
zmin, zmax := common.CoordToInterleaved(c1), common.CoordToInterleaved(c2)
// Should not be necessary.
zmin, zmax = common.Order64(zmin, zmax)
cub := common.Cuboid{P1: c1, P2: c2}
rangeStmt := ss.txStmt(ss.backend.rangeStmt)
zcode := zmin
loop:
rows, err := rangeStmt.Query(zcode, zmax)
if err != nil {
log.Printf("error: fetching range failed: %s\n", err)
return
}
for rows.Next() {
var data []byte
if err = rows.Scan(&zcode, &data); err != nil {
rows.Close()
log.Printf("error: scanning row failed: %s\n", err)
return
}
c := common.InterleavedToCoord(zcode)
if cub.Contains(c) {
key := common.StringToBytes(common.CoordToPlain(c))
//fmt.Printf("sending: %q\n", c)
select {
case blocks <- Block{Key: key, Data: data}:
case <-done:
return
}
} else {
if err = rows.Close(); err != nil {
log.Printf("error: closing range failed: %s\n", err)
return
}
zcode = common.BigMin(zmin, zmax, zcode)
goto loop
}
}
if err = rows.Err(); err != nil {
log.Printf("error: iterating range failed: %s\n", err)
}
if err = rows.Close(); err != nil {
log.Printf("error: closing range failed: %s\n", err)
}
}()
return blocks, nil
}
func (ss *SQLiteSession) plainSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.PlainToCoord(firstKey)
c2 := common.PlainToCoord(secondKey)
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
globalLock.RLock()
go func() {
defer globalLock.RUnlock()
defer close(blocks)
rangeStmt := ss.txStmt(ss.backend.rangeStmt)
send := func(rows *sql.Rows, err error) bool {
if err != nil {
log.Printf("Error in range query: %s\n", err)
return false
}
defer rows.Close()
for rows.Next() {
var key int64
var data []byte
if err = rows.Scan(&key, &data); err != nil {
log.Printf("Error in scanning row: %s\n", err)
return false
}
var encodedKey []byte
if encodedKey, err = common.EncodeStringToBytes(key); err != nil {
log.Printf("Key encoding failed: %s\n", err)
return false
}
select {
case blocks <- Block{Key: encodedKey, Data: data}:
case <-done:
return false
}
}
if err = rows.Err(); err != nil {
log.Printf("Error in range query: %s\n", err)
return false
}
return true
}
var a, b common.Coord
for _, r := range ss.backend.coverage.Query(c1, c2) {
a.Z, b.Z = int16(r.Z), int16(r.Z)
a.X, b.X = int16(r.X1), int16(r.X2)
// log.Printf("y1 y2 x1 x2 z: %d %d, %d %d, %d\n", r.Y1, r.Y2, r.X1, r.X2, r.Z)
for a.Y = r.Y2; a.Y >= r.Y1; a.Y-- {
b.Y = a.Y
from, to := common.CoordToPlain(a), common.CoordToPlain(b)
if !send(rangeStmt.Query(from, to)) {
return
}
}
}
}()
return blocks, nil
}

122
cmd/mtseeder/baselevel.go Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"image/color"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
const (
baseLevelDir = "8"
)
type blockPos struct {
x, z int16
j, i int
}
func createTiles(
btc *common.BaseTileCreator,
jobs chan blockPos,
done *sync.WaitGroup) {
wFns := make(chan func() (bool, error))
// Writing already rendered tiles to disk can be done in background.
go func() {
for wfn := range wFns {
if _, err := wfn(); err != nil {
log.Printf("WARN: writing file failed: %v.\n", err)
}
}
}()
defer func() {
close(wFns)
btc.Close()
done.Done()
}()
for job := range jobs {
if err := btc.RenderArea(job.x-1, job.z-1); err != nil {
log.Printf("WARN: rendering failed: %v.\n", err)
continue
}
wFns <- btc.WriteFunc(job.i, job.j, nil)
}
}
func createBaseLevel(
address string,
xMin, yMin, zMin, xMax, yMax, zMax int,
transparent bool, transparentDim float32,
colorsFile string, bg color.RGBA, outDir string,
numWorkers int) (err error) {
var colors *common.Colors
if colors, err = common.ParseColors(colorsFile); err != nil {
return
}
colors.TransparentDim = transparentDim
baseDir := filepath.Join(outDir, baseLevelDir)
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
return
}
jobs := make(chan blockPos)
var done sync.WaitGroup
var proto string
if strings.ContainsRune(address, '/') {
proto = "unix"
} else {
proto = "tcp"
}
for i := 0; i < numWorkers; i++ {
var client *common.RedisClient
if client, err = common.NewRedisClient(proto, address); err != nil {
return
}
done.Add(1)
btc := common.NewBaseTileCreator(
client, colors, bg,
int16(yMin), int16(yMax),
transparent, baseDir)
go createTiles(btc, jobs, &done)
}
zMin, zMax = common.Order(zMin, zMax)
for x, i := int16(xMin), 0; x <= int16(xMax); x += 16 {
xDir := filepath.Join(baseDir, strconv.Itoa(i))
log.Printf("creating dir: %s\n", xDir)
if err = os.MkdirAll(xDir, os.ModePerm); err != nil {
log.Fatalf("Cannot create directory '%s': %s\n", xDir, err)
}
for z, j := int16(zMin), 0; z <= int16(zMax); z += 16 {
jobs <- blockPos{x: x, z: z, i: i, j: j}
j++
}
i++
}
close(jobs)
done.Wait()
return
}

97
cmd/mtseeder/main.go Normal file
View File

@ -0,0 +1,97 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func main() {
var (
port int
host string
xMin, yMin, zMin int
xMax, yMax, zMax int
colorsFile string
bgColor string
outDir string
numWorkers int
skipBaseLevel bool
skipPyramid bool
transparent bool
transparentDim float64
version bool
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&port, "port", 6379, "port to of mtredisalize server")
flag.IntVar(&port, "p", 6379, "port to of mtredisalize server (shorthand)")
flag.StringVar(&host, "host", "localhost", "host to mtredisalize server")
flag.IntVar(&xMin, "xmin", -1933, "x min of the area to tile")
flag.IntVar(&xMax, "xmax", 1932, "x max of the area to tile")
flag.IntVar(&yMin, "ymin", common.MinHeight, "Minimum y in blocks.")
flag.IntVar(&yMax, "ymax", common.MaxHeight, "Maximum y in blocks.")
flag.IntVar(&zMin, "zmin", -1933, "z min of the area to tile")
flag.IntVar(&zMax, "zmax", 1932, "z max of the area to tile")
flag.StringVar(&colorsFile, "colors", "colors.txt", "definition of colors")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.StringVar(&outDir, "output-dir", "map", "directory with the resulting image tree")
flag.StringVar(&outDir, "o", "map", "directory with the resulting image tree")
flag.IntVar(&numWorkers, "workers", 1, "number of workers")
flag.IntVar(&numWorkers, "w", 1, "number of workers (shorthand)")
flag.BoolVar(&skipBaseLevel, "skip-base-level", false, "Do not generate base level tiles")
flag.BoolVar(&skipBaseLevel, "sb", false, "Do not generate base level tiles (shorthand)")
flag.BoolVar(&skipPyramid, "skip-pyramid", false, "Do not generate pyramid tiles")
flag.BoolVar(&skipPyramid, "sp", false, "Do not generate pyramid tiles (shorthand)")
flag.BoolVar(&transparent, "transparent", false, "Render transparent blocks.")
flag.BoolVar(&transparent, "t", false, "Render transparent blocks (shorthand).")
flag.Float64Var(&transparentDim,
"transparent-dim", common.DefaultTransparentDim*100.0,
"Extra dimming of transparent nodes each depth meter in percent.")
flag.Float64Var(&transparentDim,
"td", common.DefaultTransparentDim*100.0,
"Extra fimming of transparent nodes each depth meter in percent. (shorthand)")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
if !skipBaseLevel {
td := common.Clamp32f(float32(transparentDim/100.0), 0.0, 1.0)
var address string
if strings.ContainsRune(host, '/') {
address = host
} else {
address = fmt.Sprintf("%s:%d", host, port)
}
if err := createBaseLevel(
address,
xMin, yMin, zMin, xMax, yMax, zMax,
transparent, td,
colorsFile, bg,
outDir,
numWorkers); err != nil {
log.Fatalf("Creating base level tiles failed: %s", err)
}
}
if !skipPyramid {
pc := pyramidCreator{numWorkers: numWorkers, outDir: outDir, bg: bg}
if err := pc.create(); err != nil {
log.Fatalf("Creating pyramid tiles failed: %s", err)
}
}
}

247
cmd/mtseeder/pyramid.go Normal file
View File

@ -0,0 +1,247 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"image"
"image/color"
"image/draw"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/bamiaux/rez"
)
type pyramidCreator struct {
numWorkers int
outDir string
bg color.RGBA
}
func findMaxDir(files []os.FileInfo) (min, max int) {
min, max = math.MaxInt32, math.MinInt32
for _, file := range files {
if !file.Mode().IsDir() {
continue
}
if x, err := strconv.Atoi(file.Name()); err == nil {
if x > max {
max = x
}
if x < min {
min = x
}
}
}
return
}
func findMaxFile(files []os.FileInfo) (min, max int) {
min, max = math.MaxInt32, math.MinInt32
for _, file := range files {
if !file.Mode().IsRegular() {
continue
}
name := file.Name()
name = strings.TrimSuffix(name, filepath.Ext(name))
if x, err := strconv.Atoi(name); err == nil {
if x > max {
max = x
}
if x < min {
min = x
}
}
}
return
}
type pyramidJob struct {
src [4]string
dst string
}
func (pc *pyramidCreator) createParentLevel(
oldDir string,
jobs chan pyramidJob) (newDir string, err error) {
oldName := filepath.Base(oldDir)
var oldLevel int
if oldLevel, err = strconv.Atoi(oldName); err != nil {
return
}
if oldLevel <= 0 {
return
}
var files []os.FileInfo
if files, err = ioutil.ReadDir(oldDir); err != nil {
return
}
xMin, xMax := findMaxDir(files)
if xMax == math.MinInt32 {
return
}
newLevel := oldLevel - 1
log.Printf("Generating tiles of level %d\n", newLevel)
parentDir := filepath.Dir(oldDir)
newDir = filepath.Join(parentDir, strconv.Itoa(newLevel))
if err = os.MkdirAll(newDir, os.ModePerm); err != nil {
return
}
for ox, nx := xMin, xMin; ox <= xMax; ox += 2 {
ox1Dir := filepath.Join(oldDir, strconv.Itoa(ox))
ox2Dir := filepath.Join(oldDir, strconv.Itoa(ox+1))
if files, err = ioutil.ReadDir(ox1Dir); err != nil {
return
}
zMin, zMax := findMaxFile(files)
if zMax == math.MinInt32 {
nx++
continue
}
nxDir := filepath.Join(newDir, strconv.Itoa(nx))
if err = os.MkdirAll(nxDir, os.ModePerm); err != nil {
return
}
for oz, nz := zMin, zMin; oz <= zMax; oz += 2 {
oz1 := strconv.Itoa(oz) + ".png"
oz2 := strconv.Itoa(oz+1) + ".png"
s1 := filepath.Join(ox1Dir, oz1)
s2 := filepath.Join(ox1Dir, oz2)
s3 := filepath.Join(ox2Dir, oz1)
s4 := filepath.Join(ox2Dir, oz2)
d := filepath.Join(nxDir, strconv.Itoa(nz)+".png")
jobs <- pyramidJob{src: [4]string{s1, s2, s3, s4}, dst: d}
nz++
}
nx++
}
return
}
func clip8(x int) int {
switch {
case x < 0:
return 0
case x > 256:
return 256
}
return x
}
func clipRect(r image.Rectangle) image.Rectangle {
return image.Rectangle{
Min: image.Point{X: clip8(r.Min.X), Y: clip8(r.Min.Y)},
Max: image.Point{X: clip8(r.Max.X), Y: clip8(r.Max.Y)}}
}
var dps = [4]image.Point{
image.Pt(0, 256),
image.Pt(0, 0),
image.Pt(256, 256),
image.Pt(256, 0)}
func (pc *pyramidCreator) fuseTile(
scratch, resized *image.RGBA,
conv rez.Converter,
job *pyramidJob) error {
for i, path := range job.src {
img := common.LoadPNG(path, pc.bg)
sr := clipRect(img.Bounds())
r := sr.Sub(sr.Min).Add(dps[i])
draw.Draw(scratch, r, img, sr.Min, draw.Src)
}
if err := conv.Convert(resized, scratch); err != nil {
return err
}
log.Printf("Writing pyramid tile '%s'.\n", job.dst)
return common.SaveAsPNG(job.dst, resized)
}
func (pc *pyramidCreator) fuseTiles(jobs chan pyramidJob, done *sync.WaitGroup) {
defer done.Done()
scratch := image.NewRGBA(image.Rect(0, 0, 512, 512))
resized := image.NewRGBA(image.Rect(0, 0, 256, 256))
cfg, err := rez.PrepareConversion(resized, scratch)
if err != nil {
log.Printf("WARN: cannot prepare rescaling: %s\n", err)
return
}
conv, err := rez.NewConverter(cfg, common.ResizeFilter)
if err != nil {
log.Printf("WARN: Cannot create image converter: %s\n", err)
return
}
for job := range jobs {
if err := pc.fuseTile(scratch, resized, conv, &job); err != nil {
log.Printf("WARN: Writing image failed: %s\n", err)
}
}
}
func (pc *pyramidCreator) create() (err error) {
for oldDir := filepath.Join(pc.outDir, baseLevelDir); oldDir != ""; {
if oldDir, err = pc.createLevel(oldDir); err != nil {
return
}
}
return
}
func (pc *pyramidCreator) createLevel(oldDir string) (string, error) {
jobs := make(chan pyramidJob)
var done sync.WaitGroup
for i := 0; i < pc.numWorkers; i++ {
done.Add(1)
go pc.fuseTiles(jobs, &done)
}
newDir, err := pc.createParentLevel(oldDir, jobs)
close(jobs)
if err != nil {
return newDir, err
}
done.Wait()
return newDir, err
}

168
cmd/mttilemapper/main.go Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"image"
"log"
"os"
"runtime/pprof"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func main() {
var (
port int
host string
x, y, z int
width, height, depth int
colorsfile string
bgColor string
outfile string
shaded bool
transparent bool
cpuProfile string
transparentDim float64
version bool
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&port, "port", 6379, "port to of mtredisalize server")
flag.IntVar(&port, "p", 6379, "port to of mtredisalize server (shorthand)")
flag.StringVar(&host, "host", "localhost", "host to mtredisalize server")
flag.IntVar(&x, "x", 0, "x of query cuboid")
flag.IntVar(&y, "y", -75, "y of query cuboid")
flag.IntVar(&z, "z", 0, "z of query cuboid")
flag.IntVar(&width, "width", 16, "width of query cuboid")
flag.IntVar(&height, "height", 16, "height of query cuboid")
flag.IntVar(&depth, "depth", 150, "depth of query cuboid")
flag.IntVar(&width, "w", 16, "width of query cuboid (shorthand)")
flag.IntVar(&height, "h", 16, "height of query cuboid (shorthand)")
flag.IntVar(&depth, "d", 150, "depth of query cuboid (shorthand)")
flag.StringVar(&colorsfile, "colors", "colors.txt", "definition of colors")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.StringVar(&outfile, "output", "out.png", "image file of result")
flag.StringVar(&outfile, "o", "out.png", "image file of result (shorthand)")
flag.BoolVar(&shaded, "shaded", true, "draw relief")
flag.BoolVar(&transparent, "transparent", false, "render transparent blocks")
flag.Float64Var(
&transparentDim, "transparent-dim", common.DefaultTransparentDim*100,
"Extra dimming of transparent nodes every depth meter in percent (0-100).")
flag.StringVar(&cpuProfile, "cpuprofile", "", "write cpu profile to file")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
if cpuProfile != "" {
f, err := os.Create(cpuProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
var colors *common.Colors
var err error
if colors, err = common.ParseColors(colorsfile); err != nil {
log.Fatalf("Cannot open color file: %s", err)
}
colors.TransparentDim = common.Clamp32f(
float32(transparentDim/100.0), 0.0, 100.0)
var proto, address string
if strings.ContainsRune(host, '/') {
proto, address = "unix", host
} else {
proto, address = "tcp", fmt.Sprintf("%s:%d", host, port)
}
var client *common.RedisClient
if client, err = common.NewRedisClient(proto, address); err != nil {
log.Fatalf("Cannot connect to '%s': %s", address, err)
}
defer client.Close()
if shaded {
width += 2
height += 2
x--
z--
}
q1x, q1y, q1z := int16(x), int16(y), int16(z)
q2x, q2y, q2z := q1x+int16(width)-1, q1y+int16(depth)-1, q1z+int16(height)-1
renderer := common.NewRenderer(width, height, transparent)
renderer.SetPos(q1x, q1z)
renderFn := func(block *common.Block) error {
return renderer.RenderBlock(block, colors)
}
yOrder := common.NewYOrder(renderFn, 512)
numBlocks := 0
drawBlock := func(block *common.Block) *common.Block {
block, err := yOrder.RenderBlock(block)
if err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
numBlocks++
return block
}
c1 := common.Coord{X: q1x, Z: q1z}
c2 := common.Coord{X: q2x, Z: q2z}
for c2.Y = q2y; c2.Y > q1y; c2.Y -= 8 {
c1.Y = c2.Y - 7
if c1.Y < q1y {
c1.Y = q1y
}
cuboid := common.Cuboid{P1: common.MinCoord(c1, c2), P2: common.MaxCoord(c1, c2)}
if _, err = client.QueryCuboid(cuboid, drawBlock); err != nil {
log.Fatalf("query failed: %s", err)
}
if err = yOrder.Drain(); err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
if renderer.IsFilled() {
break
}
}
var image image.Image
if shaded {
image = renderer.CreateShadedImage(
16, 16, (width-2)*16, (height-2)*16,
colors, bg)
} else {
image = renderer.CreateImage(colors.Colors, bg)
}
if err = common.SaveAsPNG(outfile, image); err != nil {
log.Fatalf("writing image failed: %s", err)
}
log.Printf("num blocks: %d\n", numBlocks)
log.Printf("rejected blocks: %d\n", renderer.RejectedBlocks)
log.Printf("transparent blocks: %d\n", renderer.TransparentBlocks)
log.Printf("solid blocks: %d\n", renderer.SolidBlocks)
}

View File

@ -0,0 +1,125 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"log"
"net/http"
"github.com/gorilla/websocket"
)
type websocketForwarder struct {
upgrader *websocket.Upgrader
register chan *connection
unregister chan *connection
broadcast chan msg
connections map[*connection]bool
}
type connection struct {
ws *websocket.Conn
send chan []byte
}
type msg struct {
tiles []xz
pls []*player
}
func newWebsocketForwarder() *websocketForwarder {
upgrader := &websocket.Upgrader{ReadBufferSize: 512, WriteBufferSize: 2048}
return &websocketForwarder{
upgrader: upgrader,
register: make(chan *connection),
unregister: make(chan *connection),
broadcast: make(chan msg),
connections: make(map[*connection]bool)}
}
func (wsf *websocketForwarder) run() {
for {
select {
case c := <-wsf.register:
wsf.connections[c] = true
case c := <-wsf.unregister:
if _, ok := wsf.connections[c]; ok {
delete(wsf.connections, c)
close(c.send)
}
case message := <-wsf.broadcast:
if len(wsf.connections) == 0 {
continue
}
encMsg := map[string]interface{}{}
if message.tiles != nil {
encMsg["tiles"] = message.tiles
}
if message.pls != nil {
encMsg["players"] = message.pls
}
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err := encoder.Encode(encMsg); err != nil {
log.Printf("encoding changes failed: %s\n", err)
continue
}
m := buf.Bytes()
for c := range wsf.connections {
select {
case c.send <- m:
default:
delete(wsf.connections, c)
close(c.send)
}
}
}
}
}
func (wsf *websocketForwarder) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
ws, err := wsf.upgrader.Upgrade(rw, r, nil)
if err != nil {
log.Printf("Cannot upgrade to websocket: %s\n", err)
return
}
c := &connection{ws: ws, send: make(chan []byte, 8)}
wsf.register <- c
defer func() { wsf.unregister <- c }()
go c.writer()
c.reader()
}
func (wsf *websocketForwarder) BaseTilesUpdated(changes []xz) {
wsf.broadcast <- msg{tiles: changes}
}
func (wsf *websocketForwarder) BroadcastPlayers(pls []*player) {
wsf.broadcast <- msg{pls: pls}
}
func (c *connection) writer() {
defer c.ws.Close()
for msg := range c.send {
if c.ws.WriteMessage(websocket.TextMessage, msg) != nil {
break
}
}
}
func (c *connection) reader() {
defer c.ws.Close()
for {
// Just read the message and ignore it.
if _, _, err := c.ws.NextReader(); err != nil {
break
}
}
}

149
cmd/mtwebmapper/main.go Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"net"
"net/http"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/gorilla/mux"
)
func main() {
var (
webPort int
webHost string
webDir string
mapDir string
redisPort int
redisHost string
colorsFile string
bgColor string
workers int
transparent bool
transparentDim float64
updateHosts string
websockets bool
playersFIFO string
version bool
yMin int
yMax int
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&webPort, "web-port", 8808, "port of the web server")
flag.IntVar(&webPort, "p", 8808, "port of the web server (shorthand)")
flag.StringVar(&webHost, "web-host", "localhost", "address to bind web server")
flag.StringVar(&webHost, "h", "localhost", "address to bind web server(shorthand)")
flag.StringVar(&webDir, "web", "web", "static served web files.")
flag.StringVar(&webDir, "w", "web", "static served web files (shorthand)")
flag.StringVar(&mapDir, "map", "map", "directory of prerendered tiles")
flag.StringVar(&mapDir, "m", "map", "directory of prerendered tiles (shorthand)")
flag.StringVar(&updateHosts, "update-hosts", "localhost",
"';' separated list of hosts which are allowed to send map update requests")
flag.StringVar(&updateHosts, "u", "localhost",
"';' separated list of hosts which are allowed to send map update requests (shorthand)")
flag.StringVar(&redisHost, "redis-host", "", "address of the backend Redis server")
flag.StringVar(&redisHost, "rh", "", "address of the backend Redis server (shorthand)")
flag.IntVar(&redisPort, "redis-port", 6379, "port of the backend Redis server")
flag.IntVar(&redisPort, "rp", 6379, "port of the backend Redis server (shorthand)")
flag.IntVar(&workers, "workers", 1, "number of workers to render tiles")
flag.StringVar(&colorsFile, "colors", "colors.txt", "colors used to render map tiles.")
flag.StringVar(&colorsFile, "c", "colors.txt", "colors used to render map tiles (shorthand).")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.BoolVar(&transparent, "transparent", false, "Render transparent blocks.")
flag.BoolVar(&transparent, "t", false, "Render transparent blocks (shorthand).")
flag.Float64Var(&transparentDim,
"transparent-dim", common.DefaultTransparentDim*100.0,
"Extra dimming of transparent nodes each depth meter in percent.")
flag.Float64Var(&transparentDim,
"td", common.DefaultTransparentDim*100.0,
"Extra fimming of transparent nodes each depth meter in percent. (shorthand)")
flag.BoolVar(&websockets, "websockets", false, "Forward tile changes to clients via websockets.")
flag.BoolVar(&websockets, "ws", false, "Forward tile changes to clients via websockets (shorthand).")
flag.StringVar(&playersFIFO, "players", "", "Path to FIFO file to read active players from.")
flag.StringVar(&playersFIFO, "ps", "", "Path to FIFO file to read active players from (shorthand).")
flag.IntVar(&yMin, "ymin", common.MinHeight, "Minimum y in blocks.")
flag.IntVar(&yMax, "ymax", common.MaxHeight, "Maximum y in blocks.")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
router := mux.NewRouter()
subBaseLine := newSubBaseLine(mapDir, bg)
router.Path("/map/{z:[0-9]+}/{x:[0-9]+}/{y:[0-9]+}.png").Handler(subBaseLine)
var btu baseTilesUpdates
var wsf *websocketForwarder
if websockets {
wsf = newWebsocketForwarder()
go wsf.run()
router.Path("/ws").Methods("GET").Handler(wsf)
btu = wsf
}
if playersFIFO != "" {
plys := newPlayers(playersFIFO, wsf)
go plys.run()
router.Path("/players").Methods("GET").Handler(plys)
}
if redisHost != "" {
var colors *common.Colors
var err error
if colors, err = common.ParseColors(colorsFile); err != nil {
log.Fatalf("ERROR: problem loading colors: %s", err)
}
colors.TransparentDim = common.Clamp32f(
float32(transparentDim/100.0), 0.0, 100.0)
var redisAddress string
if strings.ContainsRune(redisHost, '/') {
redisAddress = redisHost
} else {
redisAddress = fmt.Sprintf("%s:%d", redisHost, redisPort)
}
var allowedUpdateIps []net.IP
if allowedUpdateIps, err = ipsFromHosts(updateHosts); err != nil {
log.Fatalf("ERROR: name resolving problem: %s", err)
}
tu := newTileUpdater(
mapDir,
redisAddress,
allowedUpdateIps,
colors, bg,
yMin, yMax,
transparent,
workers,
btu)
go tu.doUpdates()
router.Path("/update").Methods("POST").Handler(tu)
}
router.PathPrefix("/").Handler(http.FileServer(http.Dir(webDir)))
http.Handle("/", router)
addr := fmt.Sprintf("%s:%d", webHost, webPort)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("Starting server failed: %s\n", err)
}
}

29
cmd/mtwebmapper/misc.go Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"net"
"strings"
)
func ipsFromHosts(hosts string) ([]net.IP, error) {
ips := []net.IP{}
if len(hosts) == 0 { // Empty list: allow all hosts.
return ips, nil
}
for _, host := range strings.Split(hosts, ";") {
hips, err := net.LookupIP(host)
if err != nil {
return nil, err
}
ips = append(ips, hips...)
}
return ips, nil
}

148
cmd/mtwebmapper/players.go Normal file
View File

@ -0,0 +1,148 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"encoding/json"
"html/template"
"log"
"math"
"net/http"
"os"
"sort"
"sync"
"time"
)
const sleepInterval = time.Second * 5
var geoJSONTmpl = template.Must(template.New("geojson").Parse(
`{ "type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [{{.Z}}, {{.X}}]
},
"properties": {
"name": "{{.Name | html }}"
}
}`))
type player struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Z float64 `json:"z"`
Name string `json:"name"`
}
type players struct {
fifo string
wsf *websocketForwarder
pls []*player
mu sync.RWMutex
}
func newPlayers(fifo string, wsf *websocketForwarder) *players {
return &players{fifo: fifo, wsf: wsf, pls: []*player{}}
}
func (p *player) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
if err := geoJSONTmpl.Execute(&buf, p); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (p *player) same(o *player) bool {
return p.Name == o.Name &&
math.Abs(p.X-o.X) < 0.000001 &&
math.Abs(p.Y-o.Y) < 0.000001 &&
math.Abs(p.Z-o.Z) < 0.000001
}
type sortPlayersByName []*player
func (pls sortPlayersByName) Len() int {
return len(pls)
}
func (pls sortPlayersByName) Less(i, j int) bool {
return pls[i].Name < pls[j].Name
}
func (pls sortPlayersByName) Swap(i, j int) {
pls[i], pls[j] = pls[j], pls[i]
}
func (ps *players) readFromFIFO() ([]*player, error) {
file, err := os.Open(ps.fifo)
if err != nil {
return nil, err
}
defer file.Close()
reader := bufio.NewReader(file)
decoder := json.NewDecoder(reader)
var pls []*player
if err = decoder.Decode(&pls); err != nil {
return nil, err
}
return pls, nil
}
func samePlayers(a, b []*player) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if !p.same(b[i]) {
return false
}
}
return true
}
func (ps *players) run() {
for {
pls, err := ps.readFromFIFO()
if err != nil {
//log.Printf("err: %s\n", err)
time.Sleep(sleepInterval)
continue
}
if pls == nil {
//log.Println("no players")
continue
}
//log.Printf("%+q\n", pls)
sort.Sort(sortPlayersByName(pls))
var change bool
ps.mu.Lock()
//log.Printf("%+q\n", pls)
//log.Printf("%+q\n", ps.pls)
if change = !samePlayers(pls, ps.pls); change {
ps.pls = pls
}
ps.mu.Unlock()
if change && ps.wsf != nil {
// TODO: Throttle this!
ps.wsf.BroadcastPlayers(pls)
}
}
}
func (ps *players) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json")
var pls []*player
ps.mu.RLock()
pls = ps.pls
ps.mu.RUnlock()
encoder := json.NewEncoder(rw)
if err := encoder.Encode(pls); err != nil {
log.Printf("error: sending JSON failed: %s\n", err)
}
}

View File

@ -0,0 +1,227 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"fmt"
"image"
"image/color"
"image/png"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/gorilla/mux"
)
type subBaseLine struct {
mapDir string
bg color.RGBA
}
func newSubBaseLine(mapDir string, bg color.RGBA) *subBaseLine {
return &subBaseLine{mapDir: mapDir, bg: bg}
}
func (sb *subBaseLine) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Cache-Control", "max-age=0, no-cache, no-store")
vars := mux.Vars(r)
xs := vars["x"]
ys := vars["y"]
zs := vars["z"]
x, y, z := toUint(xs), toUint(ys), toUint(zs)
if z < 9 {
http.ServeFile(rw, r, filepath.Join(sb.mapDir,
strconv.Itoa(int(z)),
strconv.Itoa(int(x)),
strconv.Itoa(int(y))+".png"))
return
}
if z > 16 {
z = 16
}
tx := x >> (z - 8)
ty := y >> (z - 8)
baseTile := filepath.Join(
sb.mapDir,
"8",
strconv.Itoa(int(tx)),
strconv.Itoa(int(ty))+".png")
var err error
var fi os.FileInfo
if fi, err = os.Stat(baseTile); err != nil {
http.NotFound(rw, r)
return
}
if checkLastModified(rw, r, fi.ModTime()) || checkETag(rw, r, fi) {
return
}
rx := x & ^(^uint(0) << (z - 8))
ry := y & ^(^uint(0) << (z - 8))
parts := uint(1) << (z - 8)
w := uint(256) / parts
xo := w * rx
yo := w * (parts - 1 - ry)
img := common.LoadPNG(baseTile, sb.bg)
type subImage interface {
SubImage(image.Rectangle) image.Image
}
if si, ok := img.(subImage); ok {
img = si.SubImage(image.Rect(int(xo), int(yo), int(xo+w), int(yo+w)))
} else {
// Should not happen.
http.Error(rw,
http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
return
}
img = blowUp(img)
rw.Header().Set("Content-Type", "image/png")
if err = png.Encode(rw, img); err != nil {
log.Printf("WARN: encoding image failed: %s\n", err)
}
}
func blowUp(src image.Image) *image.RGBA {
// Fast path for RGBA -> RGBA
if rgba, ok := src.(*image.RGBA); ok {
return blowUpRGBA(rgba)
}
// Fallback
dst := image.NewRGBA(image.Rect(0, 0, 256, 256))
// fix point numbers x:8
dx, dy := src.Bounds().Dx(), src.Bounds().Dy()
bx, by := src.Bounds().Min.X<<8, src.Bounds().Min.Y<<8
//start := time.Now()
pix := dst.Pix
lineOfs := dst.PixOffset(0, 0) // Should be 0.
py := by
var r, g, b, a uint8
for y := 0; y < 256; y++ {
sy := (py >> 8) & 0xff
ox := -1
px := bx
ofs := lineOfs // Should not really b needed
lineOfs += dst.Stride
for x := 0; x < 256; x++ {
sx := (px >> 8) & 0xff
if sx != ox { // Minimize interface indirection access.
ox = sx
xr, xg, xb, xa := src.At(sx, sy).RGBA()
r, g, b, a = uint8(xr), uint8(xg), uint8(xb), uint8(xa)
}
pix[ofs] = r
pix[ofs+1] = g
pix[ofs+2] = b
pix[ofs+3] = a
ofs += 4
px += dx
}
py += dy
}
//log.Printf("Rendering took: %s\n", time.Since(start))
return dst
}
func blowUpRGBA(src *image.RGBA) *image.RGBA {
dst := image.NewRGBA(image.Rect(0, 0, 256, 256))
// fix point numbers x:8
dx, dy := src.Bounds().Dx(), src.Bounds().Dy()
bx, by := src.Bounds().Min.X<<8, src.Bounds().Min.Y<<8
//start := time.Now()
sPix := src.Pix
dPix := dst.Pix
py := by
// Assuming memory layout is packed 256*256*4 with stride of 4*256.
// for dLineOfs, dEnd := dst.PixOffset(0, 0), dst.PixOffset(0, 256); dLineOfs < dEnd; dLineOfs += dst.Stride {
for ofs := 0; ofs < 256*256*4; {
sy := (py >> 8) & 0xff
sLineOfs := src.PixOffset(0, sy)
px := bx
// ofs := dLineOfs
for end := ofs + 4*256; ofs < end; ofs += 4 {
sOfs := sLineOfs + ((px >> 6) & 0x3fc)
px += dx
dPix[ofs] = sPix[sOfs]
dPix[ofs+1] = sPix[sOfs+1]
dPix[ofs+2] = sPix[sOfs+2]
dPix[ofs+3] = sPix[sOfs+3]
}
py += dy
}
//log.Printf("Rendering took: %s\n", time.Since(start))
return dst
}
func checkETag(w http.ResponseWriter, r *http.Request, fi os.FileInfo) bool {
etag := fmt.Sprintf("%x-%x", fi.ModTime().Unix(), fi.Size())
if ifNoneMatch := r.Header.Get("If-None-Match"); ifNoneMatch == etag {
w.WriteHeader(http.StatusNotModified)
return true
}
w.Header().Set("ETag", etag)
return false
}
func checkLastModified(w http.ResponseWriter, r *http.Request, modtime time.Time) bool {
if modtime.IsZero() {
return false
}
// The Date-Modified header truncates sub-second precision, so
// use mtime < t+1s instead of mtime <= t to check for unmodified.
if t, err := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) {
w.WriteHeader(http.StatusNotModified)
return true
}
w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat))
return false
}
func toUint(s string) uint {
x, err := strconv.Atoi(s)
if err != nil {
log.Printf("WARN: Cannot convert to int: %s\n", err)
return 0
}
return uint(x)
}

View File

@ -0,0 +1,379 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"encoding/json"
"image"
"image/color"
"image/draw"
"log"
"net"
"net/http"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/bamiaux/rez"
"bytes"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
// Number of check sums to keep in memory.
const maxHashedTiles = 256
type baseTilesUpdates interface {
BaseTilesUpdated([]xz)
}
type tileUpdater struct {
changes map[xz]struct{}
btu baseTilesUpdates
mapDir string
redisAddress string
ips []net.IP
colors *common.Colors
bg color.RGBA
yMin, yMax int16
workers int
transparent bool
cond *sync.Cond
mu sync.Mutex
}
type xz struct {
X int16
Z int16
}
type xzc struct {
xz
canceled bool
}
type xzm struct {
xz
Mask uint16
}
func (c xz) quantize() xz {
return xz{X: (c.X - -1933) / 16, Z: (c.Z - -1933) / 16}
}
func (c xz) dequantize() xz {
return xz{X: c.X*16 + -1933, Z: c.Z*16 + -1933}
}
func (c xz) parent() xzm {
xp, xr := c.X>>1, uint16(c.X&1)
zp, zr := c.Z>>1, uint16(c.Z&1)
return xzm{
xz{X: xp, Z: zp},
1 << (zr<<1 | xr)}
}
func newTileUpdater(
mapDir, redisAddress string,
ips []net.IP,
colors *common.Colors,
bg color.RGBA,
yMin, yMax int,
transparent bool,
workers int,
btu baseTilesUpdates) *tileUpdater {
tu := tileUpdater{
btu: btu,
mapDir: mapDir,
redisAddress: redisAddress,
ips: ips,
changes: map[xz]struct{}{},
colors: colors,
bg: bg,
yMin: int16(yMin),
yMax: int16(yMax),
transparent: transparent,
workers: workers}
tu.cond = sync.NewCond(&tu.mu)
return &tu
}
func (tu *tileUpdater) checkIP(r *http.Request) bool {
if len(tu.ips) == 0 {
return true
}
idx := strings.LastIndex(r.RemoteAddr, ":")
if idx < 0 {
log.Printf("WARN: cannot extract host from '%s'.\n", r.RemoteAddr)
return false
}
host := strings.Trim(r.RemoteAddr[:idx], "[]")
ip := net.ParseIP(host)
if ip == nil {
log.Printf("WARN: cannot get IP for host '%s'.\n", host)
return false
}
for i := range tu.ips {
if bytes.Compare(tu.ips[i], ip) == 0 {
return true
}
}
return false
}
func (tu *tileUpdater) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
if !tu.checkIP(r) {
log.Printf("WARN: Unauthorized update request from '%s'\n", r.RemoteAddr)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
var err error
var newChanges []xz
decoder := json.NewDecoder(r.Body)
if err = decoder.Decode(&newChanges); err != nil {
log.Printf("WARN: JSON document broken: %s\n", err)
http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if len(newChanges) > 0 {
tu.cond.L.Lock()
for _, c := range newChanges {
tu.changes[c.quantize()] = struct{}{}
}
tu.cond.L.Unlock()
tu.cond.Signal()
}
rw.WriteHeader(http.StatusOK)
}
func extractChanges(changes map[xz]struct{}) []xzc {
chs := make([]xzc, len(changes))
var i int
for ch := range changes {
chs[i] = xzc{ch, false}
i++
}
return chs
}
func activeChanges(changes []xzc) []xz {
chs := make([]xz, 0, len(changes))
for i := range changes {
if !changes[i].canceled {
chs = append(chs, changes[i].xz)
}
}
return chs
}
func (tu *tileUpdater) doUpdates() {
bth := common.NewBaseTileHash(maxHashedTiles)
baseDir := filepath.Join(tu.mapDir, "8")
for {
tu.cond.L.Lock()
for len(tu.changes) == 0 {
tu.cond.Wait()
}
changes := extractChanges(tu.changes)
tu.changes = map[xz]struct{}{}
tu.cond.L.Unlock()
jobs := make(chan *xzc)
var done sync.WaitGroup
var proto string
if strings.ContainsRune(tu.redisAddress, '/') {
proto = "unix"
} else {
proto = "tcp"
}
for i, n := 0, common.Min(tu.workers, len(changes)); i < n; i++ {
var client *common.RedisClient
var err error
if client, err = common.NewRedisClient(proto, tu.redisAddress); err != nil {
log.Printf("WARN: Cannot connect to redis server: %s\n", err)
continue
}
btc := common.NewBaseTileCreator(
client, tu.colors, tu.bg,
tu.yMin, tu.yMax,
tu.transparent, baseDir)
done.Add(1)
go tu.updateBaseTiles(jobs, btc, &done, bth.Update)
}
for i := range changes {
jobs <- &changes[i]
}
close(jobs)
done.Wait()
actChs := activeChanges(changes)
if len(actChs) == 0 {
continue
}
parentJobs := make(map[xz]uint16)
for i := range actChs {
pxz := actChs[i].parent()
parentJobs[pxz.xz] |= pxz.Mask
}
for level := 7; level >= 0; level-- {
pJobs := make(chan xzm)
for i, n := 0, common.Min(len(parentJobs), tu.workers); i < n; i++ {
done.Add(1)
go tu.updatePyramidTiles(level, pJobs, &done)
}
ppJobs := make(map[xz]uint16)
for c, mask := range parentJobs {
pJobs <- xzm{c, mask}
pxz := c.parent()
ppJobs[pxz.xz] |= pxz.Mask
}
close(pJobs)
done.Wait()
parentJobs = ppJobs
}
if tu.btu != nil {
tu.btu.BaseTilesUpdated(actChs)
}
}
}
func (tu *tileUpdater) updatePyramidTiles(
level int, jobs chan xzm, done *sync.WaitGroup) {
defer done.Done()
scratch := image.NewRGBA(image.Rect(0, 0, 256, 256))
resized := image.NewRGBA(image.Rect(0, 0, 128, 128))
for job := range jobs {
if err := tu.updatePyramidTile(scratch, resized, level, job); err != nil {
log.Printf("Updating pyramid tile failed: %s\n", err)
}
}
}
/*
(0,0) (128, 0)
(0, 128) (128, 128)
*/
var dps = [4]image.Point{
image.Pt(0, 128),
image.Pt(128, 128),
image.Pt(0, 0),
image.Pt(128, 0),
}
var ofs = [4][2]int{
{0, 0},
{1, 0},
{0, 1},
{1, 1}}
var windowSize = image.Pt(128, 128)
func (tu *tileUpdater) updatePyramidTile(scratch, resized *image.RGBA, level int, j xzm) error {
var orig image.Image
origPath := filepath.Join(
tu.mapDir,
strconv.Itoa(level),
strconv.Itoa(int(j.X)),
strconv.Itoa(int(j.Z))+".png")
sr := resized.Bounds()
levelDir := strconv.Itoa(level + 1)
for i := uint16(0); i < 4; i++ {
if j.Mask&(1<<i) != 0 {
//log.Printf("level %d: modified %d\n", level, i)
o := ofs[i]
bx, bz := int(2*j.X), int(2*j.Z)
path := filepath.Join(
tu.mapDir,
levelDir,
strconv.Itoa(bx+o[0]),
strconv.Itoa(bz+o[1])+".png")
img := common.LoadPNG(path, tu.bg)
if err := rez.Convert(resized, img, common.ResizeFilter); err != nil {
return err
}
r := sr.Sub(sr.Min).Add(dps[i])
draw.Draw(scratch, r, resized, sr.Min, draw.Src)
} else {
// Load lazy
if orig == nil {
orig = common.LoadPNG(origPath, tu.bg)
}
//log.Printf("level %d: copied %d\n", level, i)
min := orig.Bounds().Min.Add(dps[i])
r := image.Rectangle{min, min.Add(windowSize)}
draw.Draw(scratch, r, orig, min, draw.Src)
}
}
return common.SaveAsPNGAtomic(origPath, scratch)
}
func (tu *tileUpdater) updateBaseTiles(
jobs chan *xzc,
btc *common.BaseTileCreator,
done *sync.WaitGroup,
update common.BaseTileUpdateFunc) {
type jobWriter struct {
job *xzc
wFn func() (bool, error)
}
jWs := make(chan jobWriter)
go func() {
for jw := range jWs {
updated, err := jw.wFn()
if err != nil {
log.Printf("WARN: writing tile failed: %v.\n", err)
}
if !updated {
jw.job.canceled = true
}
}
}()
defer func() {
close(jWs)
btc.Close()
done.Done()
}()
for job := range jobs {
xz := job.dequantize()
if err := btc.RenderArea(xz.X-1, xz.Z-1); err != nil {
log.Printf("WARN: rendering tile failed: %v.\n", err)
job.canceled = true
continue
}
jWs <- jobWriter{job, btc.WriteFunc(int(job.X), int(job.Z), update)}
}
}

View File

@ -0,0 +1 @@
.leaflet-control-coordinates{background-color:#D8D8D8;background-color:rgba(255,255,255,.8);cursor:pointer}.leaflet-control-coordinates,.leaflet-control-coordinates .uiElement input{-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.leaflet-control-coordinates .uiElement{margin:4px}.leaflet-control-coordinates .uiElement .labelFirst{margin-right:4px}.leaflet-control-coordinates .uiHidden{display:none}

1338
cmd/mtwebmapper/web/css/font-awesome.css vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 535 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

View File

@ -0,0 +1,124 @@
/*
Author: L. Voogdt
License: MIT
Version: 1.0
*/
/* Marker setup */
.awesome-marker {
background: url('images/markers-soft.png') no-repeat 0 0;
width: 35px;
height: 46px;
position:absolute;
left:0;
top:0;
display: block;
text-align: center;
}
.awesome-marker-shadow {
background: url('images/markers-shadow.png') no-repeat 0 0;
width: 36px;
height: 16px;
}
/* Retina displays */
@media (min--moz-device-pixel-ratio: 1.5),(-o-min-device-pixel-ratio: 3/2),
(-webkit-min-device-pixel-ratio: 1.5),(min-device-pixel-ratio: 1.5),(min-resolution: 1.5dppx) {
.awesome-marker {
background-image: url('images/markers-soft@2x.png');
background-size: 720px 46px;
}
.awesome-marker-shadow {
background-image: url('images/markers-shadow@2x.png');
background-size: 35px 16px;
}
}
.awesome-marker i {
color: #333;
margin-top: 10px;
display: inline-block;
font-size: 14px;
}
.awesome-marker .icon-white {
color: #fff;
}
/* Colors */
.awesome-marker-icon-red {
background-position: 0 0;
}
.awesome-marker-icon-darkred {
background-position: -180px 0;
}
.awesome-marker-icon-lightred {
background-position: -360px 0;
}
.awesome-marker-icon-orange {
background-position: -36px 0;
}
.awesome-marker-icon-beige {
background-position: -396px 0;
}
.awesome-marker-icon-green {
background-position: -72px 0;
}
.awesome-marker-icon-darkgreen {
background-position: -252px 0;
}
.awesome-marker-icon-lightgreen {
background-position: -432px 0;
}
.awesome-marker-icon-blue {
background-position: -108px 0;
}
.awesome-marker-icon-darkblue {
background-position: -216px 0;
}
.awesome-marker-icon-lightblue {
background-position: -468px 0;
}
.awesome-marker-icon-purple {
background-position: -144px 0;
}
.awesome-marker-icon-darkpurple {
background-position: -288px 0;
}
.awesome-marker-icon-pink {
background-position: -504px 0;
}
.awesome-marker-icon-cadetblue {
background-position: -324px 0;
}
.awesome-marker-icon-white {
background-position: -574px 0;
}
.awesome-marker-icon-gray {
background-position: -648px 0;
}
.awesome-marker-icon-lightgray {
background-position: -612px 0;
}
.awesome-marker-icon-black {
background-position: -682px 0;
}

View File

@ -0,0 +1,478 @@
/* required styles */
.leaflet-map-pane,
.leaflet-tile,
.leaflet-marker-icon,
.leaflet-marker-shadow,
.leaflet-tile-pane,
.leaflet-tile-container,
.leaflet-overlay-pane,
.leaflet-shadow-pane,
.leaflet-marker-pane,
.leaflet-popup-pane,
.leaflet-overlay-pane svg,
.leaflet-zoom-box,
.leaflet-image-layer,
.leaflet-layer {
position: absolute;
left: 0;
top: 0;
}
.leaflet-container {
overflow: hidden;
-ms-touch-action: none;
}
.leaflet-tile,
.leaflet-marker-icon,
.leaflet-marker-shadow {
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
-webkit-user-drag: none;
}
.leaflet-marker-icon,
.leaflet-marker-shadow {
display: block;
}
/* map is broken in FF if you have max-width: 100% on tiles */
.leaflet-container img {
max-width: none !important;
}
/* stupid Android 2 doesn't understand "max-width: none" properly */
.leaflet-container img.leaflet-image-layer {
max-width: 15000px !important;
}
.leaflet-tile {
filter: inherit;
visibility: hidden;
}
.leaflet-tile-loaded {
visibility: inherit;
}
.leaflet-zoom-box {
width: 0;
height: 0;
}
/* workaround for https://bugzilla.mozilla.org/show_bug.cgi?id=888319 */
.leaflet-overlay-pane svg {
-moz-user-select: none;
}
.leaflet-tile-pane { z-index: 2; }
.leaflet-objects-pane { z-index: 3; }
.leaflet-overlay-pane { z-index: 4; }
.leaflet-shadow-pane { z-index: 5; }
.leaflet-marker-pane { z-index: 6; }
.leaflet-popup-pane { z-index: 7; }
.leaflet-vml-shape {
width: 1px;
height: 1px;
}
.lvml {
behavior: url(#default#VML);
display: inline-block;
position: absolute;
}
/* control positioning */
.leaflet-control {
position: relative;
z-index: 7;
pointer-events: auto;
}
.leaflet-top,
.leaflet-bottom {
position: absolute;
z-index: 1000;
pointer-events: none;
}
.leaflet-top {
top: 0;
}
.leaflet-right {
right: 0;
}
.leaflet-bottom {
bottom: 0;
}
.leaflet-left {
left: 0;
}
.leaflet-control {
float: left;
clear: both;
}
.leaflet-right .leaflet-control {
float: right;
}
.leaflet-top .leaflet-control {
margin-top: 10px;
}
.leaflet-bottom .leaflet-control {
margin-bottom: 10px;
}
.leaflet-left .leaflet-control {
margin-left: 10px;
}
.leaflet-right .leaflet-control {
margin-right: 10px;
}
/* zoom and fade animations */
.leaflet-fade-anim .leaflet-tile,
.leaflet-fade-anim .leaflet-popup {
opacity: 0;
-webkit-transition: opacity 0.2s linear;
-moz-transition: opacity 0.2s linear;
-o-transition: opacity 0.2s linear;
transition: opacity 0.2s linear;
}
.leaflet-fade-anim .leaflet-tile-loaded,
.leaflet-fade-anim .leaflet-map-pane .leaflet-popup {
opacity: 1;
}
.leaflet-zoom-anim .leaflet-zoom-animated {
-webkit-transition: -webkit-transform 0.25s cubic-bezier(0,0,0.25,1);
-moz-transition: -moz-transform 0.25s cubic-bezier(0,0,0.25,1);
-o-transition: -o-transform 0.25s cubic-bezier(0,0,0.25,1);
transition: transform 0.25s cubic-bezier(0,0,0.25,1);
}
.leaflet-zoom-anim .leaflet-tile,
.leaflet-pan-anim .leaflet-tile,
.leaflet-touching .leaflet-zoom-animated {
-webkit-transition: none;
-moz-transition: none;
-o-transition: none;
transition: none;
}
.leaflet-zoom-anim .leaflet-zoom-hide {
visibility: hidden;
}
/* cursors */
.leaflet-clickable {
cursor: pointer;
}
.leaflet-container {
cursor: -webkit-grab;
cursor: -moz-grab;
}
.leaflet-popup-pane,
.leaflet-control {
cursor: auto;
}
.leaflet-dragging .leaflet-container,
.leaflet-dragging .leaflet-clickable {
cursor: move;
cursor: -webkit-grabbing;
cursor: -moz-grabbing;
}
/* visual tweaks */
.leaflet-container {
background: #ddd;
outline: 0;
}
.leaflet-container a {
color: #0078A8;
}
.leaflet-container a.leaflet-active {
outline: 2px solid orange;
}
.leaflet-zoom-box {
border: 2px dotted #38f;
background: rgba(255,255,255,0.5);
}
/* general typography */
.leaflet-container {
font: 12px/1.5 "Helvetica Neue", Arial, Helvetica, sans-serif;
}
/* general toolbar styles */
.leaflet-bar {
box-shadow: 0 1px 5px rgba(0,0,0,0.65);
border-radius: 4px;
}
.leaflet-bar a,
.leaflet-bar a:hover {
background-color: #fff;
border-bottom: 1px solid #ccc;
width: 26px;
height: 26px;
line-height: 26px;
display: block;
text-align: center;
text-decoration: none;
color: black;
}
.leaflet-bar a,
.leaflet-control-layers-toggle {
background-position: 50% 50%;
background-repeat: no-repeat;
display: block;
}
.leaflet-bar a:hover {
background-color: #f4f4f4;
}
.leaflet-bar a:first-child {
border-top-left-radius: 4px;
border-top-right-radius: 4px;
}
.leaflet-bar a:last-child {
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
border-bottom: none;
}
.leaflet-bar a.leaflet-disabled {
cursor: default;
background-color: #f4f4f4;
color: #bbb;
}
.leaflet-touch .leaflet-bar a {
width: 30px;
height: 30px;
line-height: 30px;
}
/* zoom control */
.leaflet-control-zoom-in,
.leaflet-control-zoom-out {
font: bold 18px 'Lucida Console', Monaco, monospace;
text-indent: 1px;
}
.leaflet-control-zoom-out {
font-size: 20px;
}
.leaflet-touch .leaflet-control-zoom-in {
font-size: 22px;
}
.leaflet-touch .leaflet-control-zoom-out {
font-size: 24px;
}
/* layers control */
.leaflet-control-layers {
box-shadow: 0 1px 5px rgba(0,0,0,0.4);
background: #fff;
border-radius: 5px;
}
.leaflet-control-layers-toggle {
background-image: url(images/layers.png);
width: 36px;
height: 36px;
}
.leaflet-retina .leaflet-control-layers-toggle {
background-image: url(images/layers-2x.png);
background-size: 26px 26px;
}
.leaflet-touch .leaflet-control-layers-toggle {
width: 44px;
height: 44px;
}
.leaflet-control-layers .leaflet-control-layers-list,
.leaflet-control-layers-expanded .leaflet-control-layers-toggle {
display: none;
}
.leaflet-control-layers-expanded .leaflet-control-layers-list {
display: block;
position: relative;
}
.leaflet-control-layers-expanded {
padding: 6px 10px 6px 6px;
color: #333;
background: #fff;
}
.leaflet-control-layers-selector {
margin-top: 2px;
position: relative;
top: 1px;
}
.leaflet-control-layers label {
display: block;
}
.leaflet-control-layers-separator {
height: 0;
border-top: 1px solid #ddd;
margin: 5px -10px 5px -6px;
}
/* attribution and scale controls */
.leaflet-container .leaflet-control-attribution {
background: #fff;
background: rgba(255, 255, 255, 0.7);
margin: 0;
}
.leaflet-control-attribution,
.leaflet-control-scale-line {
padding: 0 5px;
color: #333;
}
.leaflet-control-attribution a {
text-decoration: none;
}
.leaflet-control-attribution a:hover {
text-decoration: underline;
}
.leaflet-container .leaflet-control-attribution,
.leaflet-container .leaflet-control-scale {
font-size: 11px;
}
.leaflet-left .leaflet-control-scale {
margin-left: 5px;
}
.leaflet-bottom .leaflet-control-scale {
margin-bottom: 5px;
}
.leaflet-control-scale-line {
border: 2px solid #777;
border-top: none;
line-height: 1.1;
padding: 2px 5px 1px;
font-size: 11px;
white-space: nowrap;
overflow: hidden;
-moz-box-sizing: content-box;
box-sizing: content-box;
background: #fff;
background: rgba(255, 255, 255, 0.5);
}
.leaflet-control-scale-line:not(:first-child) {
border-top: 2px solid #777;
border-bottom: none;
margin-top: -2px;
}
.leaflet-control-scale-line:not(:first-child):not(:last-child) {
border-bottom: 2px solid #777;
}
.leaflet-touch .leaflet-control-attribution,
.leaflet-touch .leaflet-control-layers,
.leaflet-touch .leaflet-bar {
box-shadow: none;
}
.leaflet-touch .leaflet-control-layers,
.leaflet-touch .leaflet-bar {
border: 2px solid rgba(0,0,0,0.2);
background-clip: padding-box;
}
/* popup */
.leaflet-popup {
position: absolute;
text-align: center;
}
.leaflet-popup-content-wrapper {
padding: 1px;
text-align: left;
border-radius: 12px;
}
.leaflet-popup-content {
margin: 13px 19px;
line-height: 1.4;
}
.leaflet-popup-content p {
margin: 18px 0;
}
.leaflet-popup-tip-container {
margin: 0 auto;
width: 40px;
height: 20px;
position: relative;
overflow: hidden;
}
.leaflet-popup-tip {
width: 17px;
height: 17px;
padding: 1px;
margin: -10px auto 0;
-webkit-transform: rotate(45deg);
-moz-transform: rotate(45deg);
-ms-transform: rotate(45deg);
-o-transform: rotate(45deg);
transform: rotate(45deg);
}
.leaflet-popup-content-wrapper,
.leaflet-popup-tip {
background: white;
box-shadow: 0 3px 14px rgba(0,0,0,0.4);
}
.leaflet-container a.leaflet-popup-close-button {
position: absolute;
top: 0;
right: 0;
padding: 4px 4px 0 0;
text-align: center;
width: 18px;
height: 14px;
font: 16px/14px Tahoma, Verdana, sans-serif;
color: #c3c3c3;
text-decoration: none;
font-weight: bold;
background: transparent;
}
.leaflet-container a.leaflet-popup-close-button:hover {
color: #999;
}
.leaflet-popup-scrolled {
overflow: auto;
border-bottom: 1px solid #ddd;
border-top: 1px solid #ddd;
}
.leaflet-oldie .leaflet-popup-content-wrapper {
zoom: 1;
}
.leaflet-oldie .leaflet-popup-tip {
width: 24px;
margin: 0 auto;
-ms-filter: "progid:DXImageTransform.Microsoft.Matrix(M11=0.70710678, M12=0.70710678, M21=-0.70710678, M22=0.70710678)";
filter: progid:DXImageTransform.Microsoft.Matrix(M11=0.70710678, M12=0.70710678, M21=-0.70710678, M22=0.70710678);
}
.leaflet-oldie .leaflet-popup-tip-container {
margin-top: -1px;
}
.leaflet-oldie .leaflet-control-zoom,
.leaflet-oldie .leaflet-control-layers,
.leaflet-oldie .leaflet-popup-content-wrapper,
.leaflet-oldie .leaflet-popup-tip {
border: 1px solid #999;
}
/* div icon */
.leaflet-div-icon {
background: #fff;
border: 1px solid #666;
}

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,152 @@
<!DOCTYPE html>
<html>
<head>
<title>Minetest demo map</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="css/leaflet.css" />
<link rel="stylesheet" href="css/Leaflet.Coordinates-0.1.4.css" />
<link rel="stylesheet" href="css/font-awesome.css" />
<link rel="stylesheet" href="css/leaflet.awesome-markers.css" />
<style type="text/css">
body {
height: 100%;
}
#map {
display: block;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: #111111;
}
.leaflet-container {
cursor: crosshair;
}
.leaflet-control-coordinates,
.leaflet-control-layers {
box-shadow: 0 1px 3px rgba(0,0,0,0.3);
background-color:rgba(255,255,255,.85);
}
.awesome-marker i {
font-size: 18px;
margin-left: -1px;
}
</style>
</head>
<body>
<div id="map"></div>
<script src="js/leaflet.js"></script>
<script src="js/Leaflet.Coordinates-0.1.4.min.js"></script>
<script src="js/easy-button.js"></script>
<script src="js/auto-update.js"></script>
<script type="text/javascript" src="js/leaflet-hash.js"></script>
<script type="text/javascript" src="js/leaflet.ajax.js"></script>
<script type="text/javascript" src="js/leaflet.awesome-markers.js"></script>
<script>
var useWebsocket = true; // Set to true if you want websocket support
L.Projection.NoWrap = {
project: function (latlng) {
return new L.Point(latlng.lat, latlng.lng);
},
unproject: function (point, unbounded) {
return new L.LatLng(point.x, point.y, true);
}
};
L.CRS.Direct = L.Util.extend({}, L.CRS, {
code: 'Direct',
projection: L.Projection.NoWrap,
transformation: new L.Transformation(1.0/65536, 30928.0/65536, -1.0/65536, 34608.0/65536)
});
var world = new L.tileLayer('map/{z}/{x}/{y}.png', {
minZoom: 0,
maxZoom: 16,
attribution: 'Demo world',
continuousWorld: false,
noWrap: true,
tms: true,
unloadInvisibleTiles: true
});
var players = L.geoJson.ajax('/players', {
pointToLayer: function(feature, latlng) {
return L.marker(latlng, {
icon: L.AwesomeMarkers.icon({
icon: 'male',
iconColor: 'black',
prefix: 'fa',
markerColor: 'orange'
}),
title: feature.properties.name
})
}
});
var rasterMaps = {
"A demo world": world,
};
var latest = world
var overlayMaps = {'Players': players};
var map = L.map('map', {
center: [0,0],
zoom: 3,
layers: [latest],
worldCopyJump: false,
crs: L.CRS.Direct});
L.control.coordinates({
position:"topright", //optional default "bootomright"
decimals:0, //optional default 4
decimalSeperator:".", //optional default "."
labelTemplateLat:"X: {y}", //optional default "Lat: {y}"
labelTemplateLng:"Y: {x}", //optional default "Lng: {x}"
enableUserInput:false, //optional default true
useDMS:false, //optional default false
useLatLngOrder: true //ordering of labels, default false-> lng-lat
}).addTo(map);
var manualUpdateControl;
if (useWebsocket && 'WebSocket' in window) {
L.autoUpdate('autoUpdate', function(pressed) {
var styleDec = manualUpdateControl.getContainer().style;
styleDec.visibility = pressed ? 'hidden' : 'visible';
},
players);
}
var layersControl = new L.Control.Layers(rasterMaps, overlayMaps, {collapsed: false});
map.addControl(layersControl);
manualUpdateControl = L.easyButton('fa-refresh',
function (){
var tiles = document.getElementsByTagName("img");
for (var i = 0; i < tiles.length; i++) {
var img = tiles[i];
var cl = img.getAttribute("class");
if (cl.indexOf("leaflet-tile-loaded") >= 0) {
var src = img.src;
var idx = src.lastIndexOf("#");
if (idx >= 0) {
src = src.substring(0, idx);
}
img.src = src + "#" + Math.random();
}
}
//map._resetView(map.getCenter(), map.getZoom(), false);
players.refresh("/players");
},
'Update view'
);
var hash = new L.Hash(map)
</script>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,187 @@
L.Control.AutoUpdate = L.Control.extend({
options: {
position: 'topleft',
label: 'Automatic update',
layer: undefined
},
pressed: true,
onAdd: function() {
var container = L.DomUtil.create('div', 'leaflet-bar leaflet-control');
this.link = L.DomUtil.create('a', 'leaflet-bar-part', container);
this.iconStart = L.DomUtil.create('i', 'fa fa-play', this.link);
this.link.href = '#';
L.DomEvent.on(this.link, 'click', this.cbClick, this);
return container;
},
switchButtons: function() {
if (this.pressed) {
this.pressed = false;
this.iconStart.setAttribute('class', 'fa fa-pause');
this.autoUpdate();
} else {
this.pressed = true;
this.iconStart.setAttribute('class', 'fa fa-play');
this.stopUpdate();
}
},
cbClick: function (e) {
L.DomEvent.stopPropagation(e);
this.intendedFunction(this.pressed);
this.switchButtons();
},
intendedFunction: function() {
alert('no function selected');
},
stopUpdate: function() {
if (this.socket) {
var s = this.socket;
this.socket = null;
s.close();
}
},
autoUpdate: function() {
var me = this;
this.socket = new WebSocket('ws://' + window.location.host + '/ws');
this.socket.onerror = function(evt) {
me.stopUpdate();
me.switchButtons();
};
this.socket.onclose = function(evt) {
this.socket = null;
}
this.socket.onopen = function(evt) {
// Sending pings every 5 secs to keep connection alive.
var heartbeat = function() {
if (heartbeat && me.socket) {
me.socket.send("PING");
setTimeout(heartbeat, 8000);
} else {
// Prevent sending pings to re-opened sockets.
heartbeat = null;
}
};
setTimeout(heartbeat, 8000);
};
this.socket.onmessage = function(evt) {
var json = evt.data;
if (!(typeof json === "string")) {
return;
}
var msg;
try {
msg = JSON.parse(json);
}
catch (err) {
return;
}
if (msg.players) {
me.options.layer.clearLayers();
me.options.layer.addData(msg.players);
}
var tilesData = msg.tiles;
if (!tilesData) {
return;
}
var invalidate = function(td) {
var pyramid = new Array(9);
var last = new Object();
pyramid[8] = last;
for (var i = 0; i < td.length; i++) {
var xz = td[i];
last[xz.X + "#" + xz.Z] = xz;
}
for (var p = 7; p >= 0; p--) {
var prev = pyramid[p+1];
var curr = new Object();
pyramid[p] = curr;
for (var k in prev) {
if (prev.hasOwnProperty(k)) {
var oxz = prev[k];
var nxz = { X: oxz.X >> 1, Z: oxz.Z >> 1 };
curr[nxz.X + "#" + nxz.Z] = nxz;
}
}
}
return function(x, y, z) {
if (y > 8) {
x >>= y - 8;
z >>= y - 8;
y = 8;
}
var level = pyramid[y];
var k = x + "#" + z;
return level.hasOwnProperty(k);
};
} (tilesData);
var tiles = document.getElementsByTagName('img');
var re = /\/map\/([0-9]+)\/([0-9]+)\/([0-9]+).*/;
for (var i = 0; i < tiles.length; i++) {
var img = tiles[i];
var cl = img.getAttribute('class');
if (cl.indexOf('leaflet-tile-loaded') < 0) {
continue;
}
var src = img.src;
var coord = src.match(re);
if (coord == null) {
continue;
}
var y = parseInt(coord[1]);
var x = parseInt(coord[2]);
var z = parseInt(coord[3]);
if (invalidate(x, y, z)) {
var idx = src.lastIndexOf('#');
if (idx >= 0) {
src = src.substring(0, idx);
}
img.src = src + '#' + Math.random();
}
}
};
}
});
L.autoUpdate = function(cbLabel, cbFunc, layer, cbMap) {
var control = new L.Control.AutoUpdate();
if (cbLabel) {
control.options.label = cbLabel;
}
if (cbFunc) {
control.intendedFunction = cbFunc;
}
if (layer) {
control.options.layer = layer;
}
if (cbMap === '') {
return control;
}
else if (cbMap) {
cbMap.addControl(control);
}
else {
map.addControl(control);
}
return control;
};

View File

@ -0,0 +1,48 @@
L.Control.EasyButtons = L.Control.extend({
options: {
position: 'topleft',
title: '',
intentedIcon: 'fa-circle-o'
},
onAdd: function () {
var container = L.DomUtil.create('div', 'leaflet-bar leaflet-control');
this.link = L.DomUtil.create('a', 'leaflet-bar-part', container);
L.DomUtil.create('i', 'fa fa-lg ' + this.options.intentedIcon , this.link);
this.link.href = '#';
L.DomEvent.on(this.link, 'click', this._click, this);
this.link.title = this.options.title;
return container;
},
intendedFunction: function(){ alert('no function selected');},
_click: function (e) {
L.DomEvent.stopPropagation(e);
L.DomEvent.preventDefault(e);
this.intendedFunction();
},
});
L.easyButton = function( btnIcon , btnFunction , btnTitle , btnMap ) {
var newControl = new L.Control.EasyButtons;
if (btnIcon) newControl.options.intentedIcon = btnIcon;
if ( typeof btnFunction === 'function'){
newControl.intendedFunction = btnFunction;
}
if (btnTitle) newControl.options.title = btnTitle;
if ( btnMap == '' ){
// skip auto addition
} else if ( btnMap ) {
btnMap.addControl(newControl);
} else {
map.addControl(newControl);
}
return newControl;
};

View File

@ -0,0 +1,162 @@
(function(window) {
var HAS_HASHCHANGE = (function() {
var doc_mode = window.documentMode;
return ('onhashchange' in window) &&
(doc_mode === undefined || doc_mode > 7);
})();
L.Hash = function(map) {
this.onHashChange = L.Util.bind(this.onHashChange, this);
if (map) {
this.init(map);
}
};
L.Hash.parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
if (args.length == 3) {
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
return false;
} else {
return {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
} else {
return false;
}
};
L.Hash.formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
return "#" + [zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
].join("/");
},
L.Hash.prototype = {
map: null,
lastHash: null,
parseHash: L.Hash.parseHash,
formatHash: L.Hash.formatHash,
init: function(map) {
this.map = map;
// reset the hash
this.lastHash = null;
this.onHashChange();
if (!this.isListening) {
this.startListening();
}
},
removeFrom: function(map) {
if (this.changeTimeout) {
clearTimeout(this.changeTimeout);
}
if (this.isListening) {
this.stopListening();
}
this.map = null;
},
onMapMove: function() {
// bail if we're moving the map (updating from a hash),
// or if the map is not yet loaded
if (this.movingMap || !this.map._loaded) {
return false;
}
var hash = this.formatHash(this.map);
if (this.lastHash != hash) {
location.replace(hash);
this.lastHash = hash;
}
},
movingMap: false,
update: function() {
var hash = location.hash;
if (hash === this.lastHash) {
return;
}
var parsed = this.parseHash(hash);
if (parsed) {
this.movingMap = true;
this.map.setView(parsed.center, parsed.zoom);
this.movingMap = false;
} else {
this.onMapMove(this.map);
}
},
// defer hash change updates every 100ms
changeDefer: 100,
changeTimeout: null,
onHashChange: function() {
// throttle calls to update() so that they only happen every
// `changeDefer` ms
if (!this.changeTimeout) {
var that = this;
this.changeTimeout = setTimeout(function() {
that.update();
that.changeTimeout = null;
}, this.changeDefer);
}
},
isListening: false,
hashChangeInterval: null,
startListening: function() {
this.map.on("moveend", this.onMapMove, this);
if (HAS_HASHCHANGE) {
L.DomEvent.addListener(window, "hashchange", this.onHashChange);
} else {
clearInterval(this.hashChangeInterval);
this.hashChangeInterval = setInterval(this.onHashChange, 50);
}
this.isListening = true;
},
stopListening: function() {
this.map.off("moveend", this.onMapMove, this);
if (HAS_HASHCHANGE) {
L.DomEvent.removeListener(window, "hashchange", this.onHashChange);
} else {
clearInterval(this.hashChangeInterval);
}
this.isListening = false;
}
};
L.hash = function(map) {
return new L.Hash(map);
};
L.Map.prototype.addHash = function() {
this._hash = L.hash(this);
};
L.Map.prototype.removeHash = function() {
this._hash.removeFrom();
};
})(window);

View File

@ -0,0 +1,740 @@
;(function(){
/**
* Require the given path.
*
* @param {String} path
* @return {Object} exports
* @api public
*/
function require(path, parent, orig) {
var resolved = require.resolve(path);
// lookup failed
if (null == resolved) {
orig = orig || path;
parent = parent || 'root';
var err = new Error('Failed to require "' + orig + '" from "' + parent + '"');
err.path = orig;
err.parent = parent;
err.require = true;
throw err;
}
var module = require.modules[resolved];
// perform real require()
// by invoking the module's
// registered function
if (!module.exports) {
module.exports = {};
module.client = module.component = true;
module.call(this, module.exports, require.relative(resolved), module);
}
return module.exports;
}
/**
* Registered modules.
*/
require.modules = {};
/**
* Registered aliases.
*/
require.aliases = {};
/**
* Resolve `path`.
*
* Lookup:
*
* - PATH/index.js
* - PATH.js
* - PATH
*
* @param {String} path
* @return {String} path or null
* @api private
*/
require.resolve = function(path) {
if (path.charAt(0) === '/') path = path.slice(1);
var paths = [
path,
path + '.js',
path + '.json',
path + '/index.js',
path + '/index.json'
];
for (var i = 0; i < paths.length; i++) {
var path = paths[i];
if (require.modules.hasOwnProperty(path)) return path;
if (require.aliases.hasOwnProperty(path)) return require.aliases[path];
}
};
/**
* Normalize `path` relative to the current path.
*
* @param {String} curr
* @param {String} path
* @return {String}
* @api private
*/
require.normalize = function(curr, path) {
var segs = [];
if ('.' != path.charAt(0)) return path;
curr = curr.split('/');
path = path.split('/');
for (var i = 0; i < path.length; ++i) {
if ('..' == path[i]) {
curr.pop();
} else if ('.' != path[i] && '' != path[i]) {
segs.push(path[i]);
}
}
return curr.concat(segs).join('/');
};
/**
* Register module at `path` with callback `definition`.
*
* @param {String} path
* @param {Function} definition
* @api private
*/
require.register = function(path, definition) {
require.modules[path] = definition;
};
/**
* Alias a module definition.
*
* @param {String} from
* @param {String} to
* @api private
*/
require.alias = function(from, to) {
if (!require.modules.hasOwnProperty(from)) {
throw new Error('Failed to alias "' + from + '", it does not exist');
}
require.aliases[to] = from;
};
/**
* Return a require function relative to the `parent` path.
*
* @param {String} parent
* @return {Function}
* @api private
*/
require.relative = function(parent) {
var p = require.normalize(parent, '..');
/**
* lastIndexOf helper.
*/
function lastIndexOf(arr, obj) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) return i;
}
return -1;
}
/**
* The relative require() itself.
*/
function localRequire(path) {
var resolved = localRequire.resolve(path);
return require(resolved, parent, path);
}
/**
* Resolve relative to the parent.
*/
localRequire.resolve = function(path) {
var c = path.charAt(0);
if ('/' == c) return path.slice(1);
if ('.' == c) return require.normalize(p, path);
// resolve deps by returning
// the dep in the nearest "deps"
// directory
var segs = parent.split('/');
var i = lastIndexOf(segs, 'deps') + 1;
if (!i) i = 0;
path = segs.slice(0, i + 1).join('/') + '/deps/' + path;
return path;
};
/**
* Check if module is defined at `path`.
*/
localRequire.exists = function(path) {
return require.modules.hasOwnProperty(localRequire.resolve(path));
};
return localRequire;
};
require.register("calvinmetcalf-setImmediate/lib/index.js", function(exports, require, module){
"use strict";
var types = [
require("./nextTick"),
require("./mutation"),
require("./postMessage"),
require("./messageChannel"),
require("./stateChange"),
require("./timeout")
];
var handlerQueue = [];
function drainQueue() {
var i = 0,
task,
innerQueue = handlerQueue;
handlerQueue = [];
/*jslint boss: true */
while (task = innerQueue[i++]) {
task();
}
}
var nextTick;
types.some(function (obj) {
var t = obj.test();
if (t) {
nextTick = obj.install(drainQueue);
}
return t;
});
var retFunc = function (task) {
var len, args;
if (arguments.length > 1 && typeof task === "function") {
args = Array.prototype.slice.call(arguments, 1);
args.unshift(undefined);
task = task.bind.apply(task, args);
}
if ((len = handlerQueue.push(task)) === 1) {
nextTick(drainQueue);
}
return len;
};
retFunc.clear = function (n) {
if (n <= handlerQueue.length) {
handlerQueue[n - 1] = function () {};
}
return this;
};
module.exports = retFunc;
});
require.register("calvinmetcalf-setImmediate/lib/nextTick.js", function(exports, require, module){
"use strict";
exports.test = function () {
// Don't get fooled by e.g. browserify environments.
return typeof process === "object" && Object.prototype.toString.call(process) === "[object process]";
};
exports.install = function () {
return process.nextTick;
};
});
require.register("calvinmetcalf-setImmediate/lib/postMessage.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
// The test against `importScripts` prevents this implementation from being installed inside a web worker,
// where `global.postMessage` means something completely different and can"t be used for this purpose.
if (!globe.postMessage || globe.importScripts) {
return false;
}
var postMessageIsAsynchronous = true;
var oldOnMessage = globe.onmessage;
globe.onmessage = function () {
postMessageIsAsynchronous = false;
};
globe.postMessage("", "*");
globe.onmessage = oldOnMessage;
return postMessageIsAsynchronous;
};
exports.install = function (func) {
var codeWord = "com.calvinmetcalf.setImmediate" + Math.random();
function globalMessage(event) {
if (event.source === globe && event.data === codeWord) {
func();
}
}
if (globe.addEventListener) {
globe.addEventListener("message", globalMessage, false);
} else {
globe.attachEvent("onmessage", globalMessage);
}
return function () {
globe.postMessage(codeWord, "*");
};
};
});
require.register("calvinmetcalf-setImmediate/lib/messageChannel.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
return !!globe.MessageChannel;
};
exports.install = function (func) {
var channel = new globe.MessageChannel();
channel.port1.onmessage = func;
return function () {
channel.port2.postMessage(0);
};
};
});
require.register("calvinmetcalf-setImmediate/lib/stateChange.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
return "document" in globe && "onreadystatechange" in globe.document.createElement("script");
};
exports.install = function (handle) {
return function () {
// Create a <script> element; its readystatechange event will be fired asynchronously once it is inserted
// into the document. Do so, thus queuing up the task. Remember to clean up once it's been called.
var scriptEl = globe.document.createElement("script");
scriptEl.onreadystatechange = function () {
handle();
scriptEl.onreadystatechange = null;
scriptEl.parentNode.removeChild(scriptEl);
scriptEl = null;
};
globe.document.documentElement.appendChild(scriptEl);
return handle;
};
};
});
require.register("calvinmetcalf-setImmediate/lib/timeout.js", function(exports, require, module){
"use strict";
exports.test = function () {
return true;
};
exports.install = function (t) {
return function () {
setTimeout(t, 0);
};
};
});
require.register("calvinmetcalf-setImmediate/lib/global.js", function(exports, require, module){
module.exports = typeof global === "object" && global ? global : this;
});
require.register("calvinmetcalf-setImmediate/lib/mutation.js", function(exports, require, module){
"use strict";
//based off rsvp
//https://github.com/tildeio/rsvp.js/blob/master/lib/rsvp/async.js
var globe = require("./global");
var MutationObserver = globe.MutationObserver || globe.WebKitMutationObserver;
exports.test = function () {
return MutationObserver;
};
exports.install = function (handle) {
var observer = new MutationObserver(handle);
var element = globe.document.createElement("div");
observer.observe(element, { attributes: true });
// Chrome Memory Leak: https://bugs.webkit.org/show_bug.cgi?id=93661
globe.addEventListener("unload", function () {
observer.disconnect();
observer = null;
}, false);
return function () {
element.setAttribute("drainQueue", "drainQueue");
};
};
});
require.register("lie/lie.js", function(exports, require, module){
var immediate = require('immediate');
// Creates a deferred: an object with a promise and corresponding resolve/reject methods
function Promise(resolver) {
if (!(this instanceof Promise)) {
return new Promise(resolver);
}
var queue = [];
var resolved = false;
// The `handler` variable points to the function that will
// 1) handle a .then(onFulfilled, onRejected) call
// 2) handle a .resolve or .reject call (if not fulfilled)
// Before 2), `handler` holds a queue of callbacks.
// After 2), `handler` is a simple .then handler.
// We use only one function to save memory and complexity.
// Case 1) handle a .then(onFulfilled, onRejected) call
function pending(onFulfilled, onRejected){
return Promise(function(resolver,rejecter){
queue.push({
resolve: onFulfilled,
reject: onRejected,
resolver:resolver,
rejecter:rejecter
});
});
}
function then(onFulfilled, onRejected) {
return resolved?resolved(onFulfilled, onRejected):pending(onFulfilled, onRejected);
}
// Case 2) handle a .resolve or .reject call
// (`onFulfilled` acts as a sentinel)
// The actual function signature is
// .re[ject|solve](sentinel, success, value)
function resolve(success, value){
var action = success ? 'resolve' : 'reject';
var queued;
var callback;
for (var i = 0, l = queue.length; i < l; i++) {
queued = queue[i];
callback = queued[action];
if (typeof callback === 'function') {
immediate(execute,callback, value, queued.resolver, queued.rejecter);
}else if(success){
queued.resolver(value);
}else{
queued.rejecter(value);
}
}
// Replace this handler with a simple resolved or rejected handler
resolved = createHandler(then, value, success);
}
this.then = then;
function yes(value) {
if (!resolved) {
resolve(true, value);
}
}
function no (reason) {
if (!resolved) {
resolve(false, reason);
}
}
try{
resolver(function(a){
if(a && typeof a.then==='function'){
a.then(yes,no);
}else{
yes(a);
}
},no);
}catch(e){
no(e);
}
}
// Creates a fulfilled or rejected .then function
function createHandler(then, value, success) {
return function(onFulfilled, onRejected) {
var callback = success ? onFulfilled : onRejected;
if (typeof callback !== 'function') {
return Promise(function(resolve,reject){
then(resolve,reject);
});
}
return Promise(function(resolve,reject){
immediate(execute,callback,value,resolve,reject);
});
};
}
// Executes the callback with the specified value,
// resolving or rejecting the deferred
function execute(callback, value, resolve, reject) {
try {
var result = callback(value);
if (result && typeof result.then === 'function') {
result.then(resolve, reject);
}
else {
resolve(result);
}
}
catch (error) {
reject(error);
}
}
module.exports = Promise;
});
require.alias("calvinmetcalf-setImmediate/lib/index.js", "lie/deps/immediate/lib/index.js");
require.alias("calvinmetcalf-setImmediate/lib/nextTick.js", "lie/deps/immediate/lib/nextTick.js");
require.alias("calvinmetcalf-setImmediate/lib/postMessage.js", "lie/deps/immediate/lib/postMessage.js");
require.alias("calvinmetcalf-setImmediate/lib/messageChannel.js", "lie/deps/immediate/lib/messageChannel.js");
require.alias("calvinmetcalf-setImmediate/lib/stateChange.js", "lie/deps/immediate/lib/stateChange.js");
require.alias("calvinmetcalf-setImmediate/lib/timeout.js", "lie/deps/immediate/lib/timeout.js");
require.alias("calvinmetcalf-setImmediate/lib/global.js", "lie/deps/immediate/lib/global.js");
require.alias("calvinmetcalf-setImmediate/lib/mutation.js", "lie/deps/immediate/lib/mutation.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "lie/deps/immediate/index.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "immediate/index.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "calvinmetcalf-setImmediate/index.js");
require.alias("lie/lie.js", "lie/index.js");
L.Util.Promise = require("lie");
})();
L.Util.ajax = function(url, options) {
'use strict';
options = options || {};
if (options.jsonp) {
return L.Util.ajax.jsonp(url, options);
}
var request;
var cancel;
var out = L.Util.Promise(function(resolve,reject){
var Ajax;
cancel=reject;
// the following is from JavaScript: The Definitive Guide
if (window.XMLHttpRequest === undefined) {
Ajax = function() {
try {
return new ActiveXObject('Microsoft.XMLHTTP.6.0');
}
catch (e1) {
try {
return new ActiveXObject('Microsoft.XMLHTTP.3.0');
}
catch (e2) {
reject('XMLHttpRequest is not supported');
}
}
};
}
else {
Ajax = window.XMLHttpRequest;
}
var response;
request = new Ajax();
request.open('GET', url);
request.onreadystatechange = function() {
/*jslint evil: true */
if (request.readyState === 4) {
if((request.status < 400&&options.local)|| request.status===200) {
if (window.JSON) {
response = JSON.parse(request.responseText);
} else if (options.evil) {
response = eval('(' + request.responseText + ')');
}
resolve(response);
} else {
if(!request.status){
reject('Attempted cross origin request without CORS enabled');
}else{
reject(request.statusText);
}
}
}
};
request.send();
});
out.then(null,function(reason){
request.abort();
return reason;
});
out.abort = cancel;
return out;
};
L.Util.jsonp = function(url, options) {
options = options || {};
var head = document.getElementsByTagName('head')[0];
var scriptNode = L.DomUtil.create('script', '', head);
var cbName, ourl, cbSuffix, cancel;
var out = L.Util.Promise(function(resolve, reject){
cancel=reject;
var cbParam = options.cbParam || 'callback';
if (options.callbackName) {
cbName = options.callbackName;
}
else {
cbSuffix = '_' + ('' + Math.random()).slice(2);
cbName = 'L.Util.jsonp.cb.' + cbSuffix;
}
scriptNode.type = 'text/javascript';
if (cbSuffix) {
L.Util.jsonp.cb[cbSuffix] = function(data) {
head.removeChild(scriptNode);
delete L.Util.jsonp.cb[cbSuffix];
resolve(data);
};
}
if (url.indexOf('?') === -1) {
ourl = url + '?' + cbParam + '=' + cbName;
}
else {
ourl = url + '&' + cbParam + '=' + cbName;
}
scriptNode.src = ourl;
}).then(null,function(reason){
head.removeChild(scriptNode);
delete L.Util.ajax.cb[cbSuffix];
return reason;
});
out.abort = cancel;
return out;
};
L.Util.jsonp.cb = {};
L.GeoJSON.AJAX = L.GeoJSON.extend({
defaultAJAXparams: {
dataType: 'json',
callbackParam: 'callback',
local:false,
middleware: function(f) {
return f;
}
},
initialize: function(url, options) {
this.urls = [];
if (url) {
if (typeof url === 'string') {
this.urls.push(url);
}
else if (typeof url.pop === 'function') {
this.urls = this.urls.concat(url);
}
else {
options = url;
url = undefined;
}
}
var ajaxParams = L.Util.extend({}, this.defaultAJAXparams);
for (var i in options) {
if (this.defaultAJAXparams.hasOwnProperty(i)) {
ajaxParams[i] = options[i];
}
}
this.ajaxParams = ajaxParams;
this._layers = {};
L.Util.setOptions(this, options);
this.on('data:loaded', function() {
if (this.filter) {
this.refilter(this.filter);
}
}, this);
var self = this;
if (this.urls.length > 0) {
L.Util.Promise(function(yes){
yes();
}).then(function(){
self.addUrl();
});
}
},
clearLayers: function() {
this.urls = [];
L.GeoJSON.prototype.clearLayers.call(this);
return this;
},
addUrl: function(url) {
var self = this;
if (url) {
if (typeof url === 'string') {
self.urls.push(url);
}
else if (typeof url.pop === 'function') {
self.urls = self.urls.concat(url);
}
}
var loading = self.urls.length;
var done = 0;
self.fire('data:loading');
self.urls.forEach(function(url) {
if (self.ajaxParams.dataType.toLowerCase() === 'json') {
L.Util.ajax(url,self.ajaxParams).then(function(d) {
var data = self.ajaxParams.middleware(d);
self.addData(data);
self.fire('data:progress',data);
},function(err){
self.fire('data:progress',{error:err});
});
}
else if (self.ajaxParams.dataType.toLowerCase() === 'jsonp') {
L.Util.jsonp(url,self.ajaxParams).then(function(d) {
var data = self.ajaxParams.middleware(d);
self.addData(data);
self.fire('data:progress',data);
},function(err){
self.fire('data:progress',{error:err});
});
}
});
self.on('data:progress', function() {
if (++done === loading) {
self.fire('data:loaded');
}
});
},
refresh: function(url) {
url = url || this.urls;
this.clearLayers();
this.addUrl(url);
},
refilter: function(func) {
if (typeof func !== 'function') {
this.filter = false;
this.eachLayer(function(a) {
a.setStyle({
stroke: true,
clickable: true
});
});
}
else {
this.filter = func;
this.eachLayer(function(a) {
if (func(a.feature)) {
a.setStyle({
stroke: true,
clickable: true
});
}
else {
a.setStyle({
stroke: false,
clickable: false
});
}
});
}
}
});
L.geoJson.ajax = function(geojson, options) {
return new L.GeoJSON.AJAX(geojson, options);
};

View File

@ -0,0 +1,125 @@
/*
Leaflet.AwesomeMarkers, a plugin that adds colorful iconic markers for Leaflet, based on the Font Awesome icons
(c) 2012-2013, Lennard Voogdt
http://leafletjs.com
https://github.com/lvoogdt
*/
/*global L*/
(function (window, document, undefined) {
"use strict";
/*
* Leaflet.AwesomeMarkers assumes that you have already included the Leaflet library.
*/
L.AwesomeMarkers = {};
L.AwesomeMarkers.version = '2.0.1';
L.AwesomeMarkers.Icon = L.Icon.extend({
options: {
iconSize: [35, 45],
iconAnchor: [17, 42],
popupAnchor: [1, -32],
shadowAnchor: [10, 12],
shadowSize: [36, 16],
className: 'awesome-marker',
prefix: 'glyphicon',
spinClass: 'fa-spin',
extraClasses: '',
icon: 'home',
markerColor: 'blue',
iconColor: 'white'
},
initialize: function (options) {
options = L.Util.setOptions(this, options);
},
createIcon: function () {
var div = document.createElement('div'),
options = this.options;
if (options.icon) {
div.innerHTML = this._createInner();
}
if (options.bgPos) {
div.style.backgroundPosition =
(-options.bgPos.x) + 'px ' + (-options.bgPos.y) + 'px';
}
this._setIconStyles(div, 'icon-' + options.markerColor);
return div;
},
_createInner: function() {
var iconClass, iconSpinClass = "", iconColorClass = "", iconColorStyle = "", options = this.options;
if(options.icon.slice(0,options.prefix.length+1) === options.prefix + "-") {
iconClass = options.icon;
} else {
iconClass = options.prefix + "-" + options.icon;
}
if(options.spin && typeof options.spinClass === "string") {
iconSpinClass = options.spinClass;
}
if(options.iconColor) {
if(options.iconColor === 'white' || options.iconColor === 'black') {
iconColorClass = "icon-" + options.iconColor;
} else {
iconColorStyle = "style='color: " + options.iconColor + "' ";
}
}
return "<i " + iconColorStyle + "class='" + options.extraClasses + " " + options.prefix + " " + iconClass + " " + iconSpinClass + " " + iconColorClass + "'></i>";
},
_setIconStyles: function (img, name) {
var options = this.options,
size = L.point(options[name === 'shadow' ? 'shadowSize' : 'iconSize']),
anchor;
if (name === 'shadow') {
anchor = L.point(options.shadowAnchor || options.iconAnchor);
} else {
anchor = L.point(options.iconAnchor);
}
if (!anchor && size) {
anchor = size.divideBy(2, true);
}
img.className = 'awesome-marker-' + name + ' ' + options.className;
if (anchor) {
img.style.marginLeft = (-anchor.x) + 'px';
img.style.marginTop = (-anchor.y) + 'px';
}
if (size) {
img.style.width = size.x + 'px';
img.style.height = size.y + 'px';
}
},
createShadow: function () {
var div = document.createElement('div');
this._setIconStyles(div, 'shadow');
return div;
}
});
L.AwesomeMarkers.icon = function (options) {
return new L.AwesomeMarkers.Icon(options);
};
}(this, document));

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 157 B

2140
colors.txt Normal file

File diff suppressed because it is too large Load Diff

102
common/area.go Normal file
View File

@ -0,0 +1,102 @@
// Copyright 2014, 2015, 2017 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"math"
)
type Area struct {
X1, Z1 int16
X2, Z2 int16
}
func (a Area) contains(x, z int16) bool {
return x >= a.X1 && x <= a.X2 && z >= a.Z1 && z <= a.Z2
}
func (a Area) higher() bool {
return a.Z2-a.Z1 > a.X2-a.X1
}
func areasContain(areas []Area, x, z int16) bool {
for _, r := range areas {
if r.contains(x, z) {
return true
}
}
return false
}
// recalculate implements a greedy algorithm to figure out
// a list of disjunct areas of free regions in the domain
// to the (x, z) block plane.
// oldAreas are searched and found free areas are appended
// to newAreas which ist return.
// This is useful to spatial query only blocks from db
// that are not below already rendered blocks.
func (area Area) recalculate(r *Renderer, nareas []Area) []Area {
yM := r.yMin
const ex = 1
const ez = 2
nas := len(nareas)
for z := area.Z1; z <= area.Z2; z++ {
row := z * int16(r.width)
for x := area.X1; x <= area.X2; x++ {
// Uncovered and not in list of new areas?
if yM[row+x] > math.MinInt32 || areasContain(nareas[nas:], x, z) {
continue
}
a := Area{X1: x, Z1: z, X2: x, Z2: z}
// Try to extend the area in x and/or z till no further extension is possible.
ext:
for extend := ex | ez; extend != 0; {
// If we extending in both directions a the current area
// is higher than wide we gain more block if extend
// in the x direction first.
if (extend == ex|ez && a.higher()) || extend&ex == ex { // check x
nx := a.X2 + 1
if nx > area.X2 { // reached border of area
extend &= ^ex
continue
}
// Check column right of the current area if its fully free.
for nz := a.Z1; nz <= a.Z2; nz++ {
if yM[nz*int16(r.width)+nx] > math.MinInt32 ||
areasContain(nareas[nas:], nx, nz) {
extend &= ^ex
continue ext
}
}
// free -> extend
a.X2 = nx
} else if extend&ez == ez { // check z
nz := a.Z2 + 1
if nz > area.Z2 {
extend &= ^ez
continue
}
// Check line right below the current area if its free.
row2 := nz * int16(r.width)
for nx := a.X1; nx <= a.X2; nx++ {
if yM[row2+nx] > math.MinInt32 ||
areasContain(nareas[nas:], nx, nz) {
extend &= ^ez
continue ext
}
}
// free -> extend
a.Z2 = nz
}
}
// At this point the area is extended to max.
nareas = append(nareas, a)
}
}
return nareas
}

206
common/basetilecreator.go Normal file
View File

@ -0,0 +1,206 @@
// Copyright 2014 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"image/color"
"io/ioutil"
"log"
"path/filepath"
"strconv"
)
const (
tileWidth = 18
tileHeight = 18
yOrderCapacity = 512
)
const (
MaxHeight = 1934
MinHeight = -1934
)
// To scan the whole height in terms of the y coordinate
// the database is queried in height units defined in the tileDepths table.
var tileDepths = [...][2]int16{
{1024, MaxHeight},
{256, 1023},
{128, 255},
{64, 127},
{32, 63},
{16, 31},
{8, 15},
{4, 7},
{2, 3},
{0, 1},
{-1, 0},
{-4, -2},
{-8, -5},
{-16, -9},
{-32, -17},
{-64, -33},
{-128, -65},
{-256, -129},
{-1024, -257},
{MinHeight, -1025}}
var BackgroundColor = color.RGBA{R: 0xff, G: 0xff, B: 0xff, A: 0xff}
type BaseTileUpdateFunc func(x, y int, hash []byte) bool
type BaseTileCreator struct {
client *RedisClient
colors *Colors
renderer *Renderer
yOrder *YOrder
yMin int16
yMax int16
baseDir string
emptyImage []byte
bg color.RGBA
}
func NewBaseTileCreator(
client *RedisClient,
colors *Colors,
bg color.RGBA,
yMin, yMax int16,
transparent bool,
baseDir string) *BaseTileCreator {
renderer := NewRenderer(tileWidth, tileHeight, transparent)
yMin, yMax = Order16(yMin, yMax)
btc := &BaseTileCreator{
client: client,
colors: colors,
bg: bg,
renderer: renderer,
yMin: yMin,
yMax: yMax,
baseDir: baseDir,
}
btc.yOrder = NewYOrder(btc.renderBlock, yOrderCapacity)
return btc
}
func (btc *BaseTileCreator) Close() error {
return btc.client.Close()
}
// renderBlock is a callback to draw a block with a YOrder.
func (btc *BaseTileCreator) renderBlock(block *Block) error {
return btc.renderer.RenderBlock(block, btc.colors)
}
// blockLoaded is a callback for RedisClient.QueryCuboid.
func (btc *BaseTileCreator) blockLoaded(block *Block) *Block {
block, err := btc.yOrder.RenderBlock(block)
if err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
return block
}
func (btc *BaseTileCreator) RenderArea(x, z int16) error {
btc.renderer.Reset()
btc.renderer.SetPos(x, z)
btc.yOrder.Reset()
var c1, c2 Coord
nareas := make([]Area, 0, tileWidth*tileHeight/2)
areas := make([]Area, 1, tileWidth*tileHeight/2)
areas[0] = Area{
X1: 0, Z1: 0,
X2: int16(tileWidth) - 1, Z2: int16(tileHeight) - 1}
for _, yRange := range tileDepths {
if yRange[0] > btc.yMax || yRange[1] < btc.yMin {
continue
}
c1.Y = max16(yRange[0], btc.yMin)
c2.Y = min16(yRange[1], btc.yMax)
for _, area := range areas {
c1.X = area.X1 + x
c1.Z = area.Z1 + z
c2.X = area.X2 + x
c2.Z = area.Z2 + z
query := Cuboid{P1: c1, P2: c2}
var count int
var err error
if count, err = btc.client.QueryCuboid(query, btc.blockLoaded); err != nil {
return err
}
if err = btc.yOrder.Drain(); err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
// If there where loaded blocks in this area recalculate coverage.
if count > 0 {
nareas = area.recalculate(btc.renderer, nareas)
} else {
nareas = append(nareas, area)
}
}
if len(nareas) == 0 {
break
}
areas, nareas = nareas, areas[:0]
}
return nil
}
func (btc *BaseTileCreator) blankImage() []byte {
// To avoid redundant encoding cache the resulting empty image.
if btc.emptyImage == nil {
m := BackgroundImage((tileWidth-2)*16, (tileHeight-2)*16, btc.bg)
btc.emptyImage = EncodeToMem(m)
}
return btc.emptyImage
}
// WriteFunc returns a function intended to be run in background so
// the creation of the next tile with this creator can be done
// concurrently.
func (btc *BaseTileCreator) WriteFunc(i, j int, update BaseTileUpdateFunc) func() (bool, error) {
path := filepath.Join(btc.baseDir, strconv.Itoa(i), strconv.Itoa(j)+".png")
// Empty images are likely to be produced during seeding.
if update == nil && btc.renderer.IsEmpty() {
return func() (bool, error) {
//log.Printf("Writing empty (%d, %d) to file %s\n", x, z, path)
return true, ioutil.WriteFile(path, btc.blankImage(), 0666)
}
}
image := btc.renderer.CreateShadedImage(
16, 16, (tileWidth-2)*16, (tileHeight-2)*16,
btc.colors, btc.bg)
x, z := btc.renderer.GetPos()
if update == nil {
return func() (bool, error) {
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
return true, SaveAsPNG(path, image)
}
}
return func() (bool, error) {
if update(i, j, HashImage(image)) {
log.Printf("Writing (%d, %d) to file %s.\n", x, z, path)
return true, SaveAsPNGAtomic(path, image)
}
log.Printf("(%d, %d) is unchanged.\n", x, z)
return false, nil
}
}

92
common/basetilehash.go Normal file
View File

@ -0,0 +1,92 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bytes"
"sync"
)
type btKey struct {
x int
y int
}
type btHashEntry struct {
prev *btHashEntry
next *btHashEntry
hash []byte
btKey
}
type BaseTileHash struct {
hashes map[btKey]*btHashEntry
maxEntries int
root btHashEntry
sync.Mutex
}
func NewBaseTileHash(maxEntries int) *BaseTileHash {
bth := &BaseTileHash{
hashes: map[btKey]*btHashEntry{},
maxEntries: maxEntries}
bth.root.next = &bth.root
bth.root.prev = &bth.root
return bth
}
func (bth *BaseTileHash) toFront(entry *btHashEntry) {
if bth.root.next == entry {
return
}
entry.prev.next = entry.next
entry.next.prev = entry.prev
entry.next = bth.root.next
entry.prev = &bth.root
bth.root.next.prev = entry
bth.root.next = entry
}
func (bth *BaseTileHash) removeLast() *btHashEntry {
last := bth.root.prev
bth.root.prev = last.prev
last.prev.next = &bth.root
delete(bth.hashes, last.btKey)
return last
}
func (bth *BaseTileHash) insertFront(entry *btHashEntry) {
entry.next = bth.root.next
entry.prev = &bth.root
bth.root.next.prev = entry
bth.root.next = entry
}
func (bth *BaseTileHash) Update(x, y int, hash []byte) bool {
bth.Lock()
defer bth.Unlock()
key := btKey{x, y}
if old, found := bth.hashes[key]; found {
if !bytes.Equal(old.hash, hash) {
old.hash = hash
bth.toFront(old)
return true
}
return false
}
var entry *btHashEntry
if len(bth.hashes) >= bth.maxEntries {
entry = bth.removeLast()
} else {
entry = new(btHashEntry)
}
entry.btKey = key
entry.hash = hash
bth.hashes[key] = entry
bth.insertFront(entry)
return true
}

147
common/basetilehash_test.go Normal file
View File

@ -0,0 +1,147 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"math/rand"
"testing"
)
func randomBaseTileHash(updates int) *BaseTileHash {
bth := NewBaseTileHash(256)
h1 := []byte{1}
h2 := []byte{2}
for i := 0; i < updates; i++ {
x, y := rand.Intn(100), rand.Intn(100)
var h []byte
if i%2 == 0 {
h = h1
} else {
h = h2
}
bth.Update(x, y, h)
}
return bth
}
func TestBaseTileHashLenList(t *testing.T) {
for _, updates := range []int{53, 111, 1345, 11261} {
bth := randomBaseTileHash(updates)
countNext := 0
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
countNext++
}
countPrev := 0
for cur := bth.root.prev; cur != &bth.root; cur = cur.prev {
countPrev++
}
if countPrev != countNext {
t.Errorf("count prev != count next: %d %d", countPrev, countNext)
}
if countPrev != len(bth.hashes) {
t.Errorf("count prev != len(hash): %d %d", countPrev, len(bth.hashes))
}
}
}
func TestBaseTileHashIntegrity(t *testing.T) {
for _, updates := range []int{10, 100, 1000, 10000} {
bth := randomBaseTileHash(updates)
entries := map[*btHashEntry]bool{}
for cur := bth.root.next; cur != &bth.root; cur = cur.next {
if entries[cur] {
t.Errorf("hash element found more than once: %d", updates)
}
entries[cur] = true
}
if len(entries) != len(bth.hashes) {
t.Errorf("List has differnt length than hashes: %d : %d",
len(entries), len(bth.hashes))
}
var already1 bool
var already2 bool
for k, v := range bth.hashes {
if !entries[v] {
if !already1 {
already1 = true
t.Errorf("Hash contains pointer to element not being in list: %d",
updates)
}
}
if k != v.btKey {
if !already2 {
already2 = true
t.Errorf("Key in entry does not match hash key: %d", updates)
}
}
delete(entries, v)
}
if len(entries) > 0 {
t.Error("There are more entries than indexed by hash")
}
}
}
func TestBaseTileHashOverwrite(t *testing.T) {
bth := NewBaseTileHash(256)
h1 := []byte{1}
h2 := []byte{2}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 0, h2); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(0, 0, h2); updated {
t.Error("Third insert does trigger update")
}
}
func TestBaseTileHashSeparate(t *testing.T) {
bth := NewBaseTileHash(256)
h1 := []byte{1}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 1, h1); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(1, 0, h1); !updated {
t.Error("Third insert does trigger update")
}
if len(bth.hashes) != 3 {
t.Errorf("Expected size to be 3. Current size: %d", len(bth.hashes))
}
}
func TestBaseTileHashLRU(t *testing.T) {
bth := NewBaseTileHash(2)
h1 := []byte{1}
if updated := bth.Update(0, 0, h1); !updated {
t.Error("First insert does not trigger update")
}
if updated := bth.Update(0, 1, h1); !updated {
t.Error("Second insert does not trigger update")
}
if updated := bth.Update(1, 0, h1); !updated {
t.Error("Third insert does trigger update")
}
if len(bth.hashes) != 2 {
t.Errorf("Expected size to be 2. Current size: %d", len(bth.hashes))
}
}

331
common/block.go Normal file
View File

@ -0,0 +1,331 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bytes"
"compress/zlib"
"encoding/binary"
"errors"
"io"
"io/ioutil"
"log"
"sync"
)
// Error returned if a Producer has run to its end.
var (
ErrNoMoreBlocks = errors.New("No more blocks.")
ErrMapContentSizeMismatch = errors.New("Content size does not match.")
ErrBlockTruncated = errors.New("Block is truncated.")
)
const (
mapBlockSize = 16
nodeCount = mapBlockSize * mapBlockSize * mapBlockSize
)
type (
// Block data from Minetest database.
Block struct {
Coord Coord
Data []byte
}
// BlockProducer is used to over a existing Minetest database
// and return its content block by block.
BlockProducer interface {
// error is ErrNoMoreBlocks if it run out of blocks.
Next(*Block) error
// Closes the open database connections.
Close() error
}
// BlockConsumer is used to store blocks in a new Minetest database.
BlockConsumer interface {
Consume(*Block) error
// Closes the open database connections.
Close() error
}
DecodedBlock struct {
Version byte
Transparent bool
MapContent []byte
AirID int32
IgnoreID int32
IndexMap map[int32]int32
}
)
// zlibEmpty is a minimal zlib stream with zero length.
// zlib.NewReader needs a valid zlib stream to start with
// even if Reset is called directly afterwards.
var zlibEmpty = []byte{
0x78, 0x9c, 0x00, 0x00,
0x00, 0xff, 0xff, 0x01,
0x00, 0x00, 0xff, 0xff,
0x00, 0x00, 0x00, 0x01}
// zlibReaderPool is a pool of zlib Readers to be reused
// for decoding the compressed parts of database blocks.
// Decoding blocks relies heavly on zlib decompression.
// Reusing the internal structures of already allocated
// zlib readers speeds up the decoding significantly.
var zlibReaderPool = sync.Pool{
New: func() interface{} {
reader, _ := zlib.NewReader(bytes.NewBuffer(zlibEmpty))
return reader
},
}
// The content of the map and the meta data are compressed with zlib.
// Unfortunately the byte length of this two structures are not stored
// explicitly in the block data. To access the informations behind
// them (e.g. the node id mappings) we have to count the bytes consumed
// by the zlib reader and continue our extraction process behind this
// offset. posBuf implements such a counting reader source.
type posBuf struct {
Data []byte
Pos int
}
func NewDecodedBlock(data []byte, colors *Colors) (db *DecodedBlock, err error) {
dataLen := len(data)
if dataLen < 4 {
return nil, ErrBlockTruncated
}
version := data[0]
contentWidth := Min(int(data[2]), 2)
paramsWidth := Min(int(data[3]), 2)
uncompressedLen := nodeCount * (contentWidth + paramsWidth)
var offset int
switch {
case version >= 27:
offset = 6
case version >= 22:
offset = 4
default:
offset = 2
}
zr := zlibReaderPool.Get().(interface {
io.ReadCloser
zlib.Resetter
})
defer func() {
zr.Close() // This sould not be necessary.
zlibReaderPool.Put(zr)
}()
buf := posBuf{Data: data[offset:]}
if err = zr.Reset(&buf, nil); err != nil {
return
}
mapContent := make([]byte, uncompressedLen)
var k int
k, err = io.ReadFull(zr, mapContent)
if err != nil {
return
}
if k != uncompressedLen {
err = ErrMapContentSizeMismatch
return
}
// There is a bug before Go 1.7 that enforces
// to add 4 as an offset after the compressed
// geometry data. This is resolved via build tags
// and definitions in pre17offset.go and
// post17offset.go.
offset += buf.Pos + afterCompressOfs
buf.Pos = 0
if offset >= dataLen {
return nil, ErrBlockTruncated
}
buf.Data = data[offset:]
if err = zr.(zlib.Resetter).Reset(&buf, nil); err != nil {
return
}
// Discard the meta data.
if _, err = io.Copy(ioutil.Discard, zr); err != nil {
return
}
offset += buf.Pos
switch {
case version <= 21:
offset += 2
case version == 23:
offset++
case version == 24:
if offset >= dataLen {
return nil, ErrBlockTruncated
}
ver := data[offset]
offset++
if ver == 1 {
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
num := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2 + 10*num
}
}
offset++
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
numStaticObjects := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2
for i := 0; i < numStaticObjects; i++ {
offset += 13
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
dataSize := int(binary.BigEndian.Uint16(data[offset:]))
offset += dataSize + 2
}
offset += 4
airID, ignoreID := int32(-1), int32(-1)
indexMap := make(map[int32]int32)
var transparent bool
if version >= 22 {
offset++
if offset+1 >= dataLen {
return nil, ErrBlockTruncated
}
numMappings := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2
// Be a bit more tolerant with truncated node name table.
// We should probably issue an error here, too!?
const outOfBounds = "Offset in node id table out of bounds. Ignored."
for i := 0; i < numMappings; i++ {
if offset+1 >= dataLen {
log.Println(outOfBounds)
break
}
nodeID := int32(binary.BigEndian.Uint16(data[offset:]))
offset += 2
if offset+1 >= dataLen {
log.Println(outOfBounds)
break
}
nameLen := int(binary.BigEndian.Uint16(data[offset:]))
offset += 2
if offset+nameLen-1 >= dataLen {
log.Println(outOfBounds)
break
}
name := string(data[offset : offset+nameLen])
offset += nameLen
switch name {
case "air":
airID = nodeID
case "ignore":
ignoreID = nodeID
default:
if index, found := colors.NameIndex[name]; found {
indexMap[nodeID] = index
if !transparent && colors.IsTransparent(index) {
transparent = true
}
} else {
logMissing(name)
}
}
}
}
db = &DecodedBlock{
Version: version,
Transparent: transparent,
MapContent: mapContent,
AirID: airID,
IgnoreID: ignoreID,
IndexMap: indexMap}
return
}
var missingColors = struct {
sync.Mutex
cols map[string]struct{}
}{cols: map[string]struct{}{}}
func logMissing(name string) {
missingColors.Lock()
defer missingColors.Unlock()
if _, found := missingColors.cols[name]; !found {
missingColors.cols[name] = struct{}{}
log.Printf("Missing color entry for %s.\n", name)
}
}
func (db *DecodedBlock) AirOnly() bool {
return db.AirID != -1 && len(db.IndexMap) == 0
}
func (db *DecodedBlock) Content(x, y, z int) (content int32, found bool) {
pos := z<<8 + y<<4 + x
switch {
case db.Version >= 24:
pos <<= 1
content = int32(db.MapContent[pos])<<8 | int32(db.MapContent[pos+1])
case db.Version >= 20:
if c := db.MapContent[pos]; c <= 0x80 {
content = int32(c)
} else {
content = int32(c)<<4 | int32(db.MapContent[pos+0x2000])>>4
}
default:
return
}
if content != db.AirID && content != db.IgnoreID {
content, found = db.IndexMap[content]
}
return
}
func (pb *posBuf) Read(p []byte) (int, error) {
pl := len(p)
ml := len(pb.Data)
if pb.Pos >= ml {
return 0, io.EOF
}
rest := ml - pb.Pos
if pl > rest {
copy(p, pb.Data[pb.Pos:])
pb.Pos = ml
return rest, io.EOF
}
copy(p, pb.Data[pb.Pos:pb.Pos+pl])
pb.Pos += pl
return pl, nil
}
func (pb *posBuf) ReadByte() (byte, error) {
if pb.Pos >= len(pb.Data) {
return 0, io.EOF
}
c := pb.Data[pb.Pos]
pb.Pos++
return c, nil
}

152
common/colors.go Normal file
View File

@ -0,0 +1,152 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bufio"
"fmt"
"image/color"
"log"
"os"
"sort"
"strconv"
"strings"
)
// DefaultTransparentDim sets the default dimming
// factor of transparent nodes to 2%.
const DefaultTransparentDim = 2.0 / 100.0
type Colors struct {
Colors []color.RGBA
NameIndex map[string]int32
NumTransparent int32
TransparentDim float32
}
type namedColor struct {
name string
color color.RGBA
}
type sortByAlpha []namedColor
func (colors sortByAlpha) Less(i, j int) bool {
return colors[i].color.A < colors[j].color.A
}
func (colors sortByAlpha) Len() int {
return len(colors)
}
func (colors sortByAlpha) Swap(i, j int) {
colors[i], colors[j] = colors[j], colors[i]
}
func ParseColors(filename string) (colors *Colors, err error) {
var file *os.File
if file, err = os.Open(filename); err != nil {
return
}
defer file.Close()
cols := make([]namedColor, 0, 2200)
scanner := bufio.NewScanner(file)
for scanner.Scan() {
line := scanner.Text()
if strings.HasPrefix(line, "#") {
continue
}
c := color.RGBA{A: 0xff}
var name string
if n, _ := fmt.Sscanf(
line, "%s %d %d %d %d", &name, &c.R, &c.G, &c.B, &c.A); n > 0 {
cols = append(cols, namedColor{name: name, color: c})
}
}
err = scanner.Err()
// Sort transparent colors to front. Makes it easier to figure out
// if an index corresponds to a transparent color (i < Transparent).
sort.Sort(sortByAlpha(cols))
cs := make([]color.RGBA, len(cols))
nameIndex := make(map[string]int32, len(cols))
numTransparent := int32(0)
for i, nc := range cols {
if nc.color.A < 0xff {
numTransparent++
}
cs[i] = nc.color
nameIndex[nc.name] = int32(i)
}
colors = &Colors{
Colors: cs,
NameIndex: nameIndex,
NumTransparent: numTransparent,
TransparentDim: DefaultTransparentDim}
return
}
func (colors *Colors) IsTransparent(index int32) bool {
return index < colors.NumTransparent
}
func BlendColor(c1, c2 color.RGBA, a float32) color.RGBA {
b := float32(1) - a
return color.RGBA{
R: uint8(float32(c1.R)*a + float32(c2.R)*b),
G: uint8(float32(c1.G)*a + float32(c2.G)*b),
B: uint8(float32(c1.B)*a + float32(c2.B)*b),
A: 0xff}
}
func (colors *Colors) BlendColors(span *Span, col color.RGBA, pos int32) color.RGBA {
curr := span
// Ignore colors below pos.
for ; curr != nil && pos >= curr.To; curr = curr.Next {
}
if curr == nil {
return col
}
dim := colors.TransparentDim
for ; curr != nil; curr = curr.Next {
c := colors.Colors[curr.Value]
// At least alpha channel attenuation + dim% extra for each depth meter.
base := float32(c.A) / 255.0
factor := min32f(1.0, base+float32(curr.To-curr.From)*dim)
col = BlendColor(c, col, factor)
}
return col
}
func ParseColor(col string) (color.RGBA, error) {
col = strings.TrimLeft(col, "#")
rgb, err := strconv.ParseUint(col, 16, 32)
if err != nil {
return color.RGBA{}, err
}
return color.RGBA{
R: uint8(rgb >> 16),
G: uint8(rgb >> 8),
B: uint8(rgb),
A: 0xff}, nil
}
func ParseColorDefault(col string, def color.RGBA) color.RGBA {
c, err := ParseColor(col)
if err != nil {
log.Printf("WARN: cannot parse color '%s': %s\n", col, err)
return def
}
return c
}
func ColorToHex(col color.RGBA) string {
return fmt.Sprintf("#%02x%02x%02x", col.R, col.G, col.B)
}

311
common/coords.go Normal file
View File

@ -0,0 +1,311 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"encoding/binary"
"fmt"
"strconv"
)
const (
numBitsPerComponent = 12
modulo = 1 << numBitsPerComponent
maxPositive = modulo / 2
minValue = -1 << (numBitsPerComponent - 1)
maxValue = 1<<(numBitsPerComponent-1) - 1
)
type (
Coord struct {
X, Y, Z int16
}
Cuboid struct {
P1, P2 Coord
}
KeyTransformer func(int64) int64
KeyEncoder func(int64) ([]byte, error)
KeyDecoder func([]byte) (int64, error)
KeyTranscoder func([]byte) ([]byte, error)
KeySplitter func(int64) Coord
KeyJoiner func(Coord) int64
)
func (cub Cuboid) Contains(c Coord) bool {
return c.X >= cub.P1.X && c.X <= cub.P2.X &&
c.Y >= cub.P1.Y && c.Y <= cub.P2.Y &&
c.Z >= cub.P1.Z && c.Z <= cub.P2.Z
}
func (c Coord) String() string {
return fmt.Sprintf("(%d, %d, %d)", c.X, c.Y, c.Z)
}
func clipComponent(x int16) int16 {
if x < minValue {
return minValue
}
if x > maxValue {
return maxValue
}
return x
}
func ClipCoord(c Coord) Coord {
return Coord{
X: clipComponent(c.X),
Y: clipComponent(c.Y),
Z: clipComponent(c.Z)}
}
func MinCoord(a, b Coord) Coord {
return Coord{
X: min16(a.X, b.X),
Y: min16(a.Y, b.Y),
Z: min16(a.Z, b.Z)}
}
func MaxCoord(a, b Coord) Coord {
return Coord{
X: max16(a.X, b.X),
Y: max16(a.Y, b.Y),
Z: max16(a.Z, b.Z)}
}
// DecodeStringFromBytes constructs a database key out of byte slice.
func DecodeStringFromBytes(key []byte) (pos int64, err error) {
return strconv.ParseInt(string(key), 10, 64)
}
func keyToBytes(key int64, buf []byte) []byte {
return strconv.AppendInt(buf, key, 10)
}
func StringToBytes(key int64) []byte {
return strconv.AppendInt(nil, key, 10)
}
// EncodeStringToBytes encodes a block pos to byte slice.
func EncodeStringToBytes(key int64) ([]byte, error) {
return StringToBytes(key), nil
}
func ToBigEndian(key int64) []byte {
enc := make([]byte, 8)
binary.BigEndian.PutUint64(enc, uint64(key))
return enc
}
func EncodeToBigEndian(key int64) ([]byte, error) {
return ToBigEndian(key), nil
}
func FromBigEndian(key []byte) int64 {
return int64(binary.BigEndian.Uint64(key))
}
func DecodeFromBigEndian(key []byte) (int64, error) {
return FromBigEndian(key), nil
}
func CoordToInterleaved(c Coord) (result int64) {
const end = 1 << (numBitsPerComponent + 1)
x := c.X - minValue
y := c.Y - minValue
z := c.Z - minValue
setmask := int64(1)
for mask := int16(1); mask != end; mask <<= 1 {
if x&mask != 0 {
result |= setmask
}
setmask <<= 1
if y&mask != 0 {
result |= setmask
}
setmask <<= 1
if z&mask != 0 {
result |= setmask
}
setmask <<= 1
}
return
}
func InterleavedToCoord(pos int64) Coord {
const end = 1 << (numBitsPerComponent + 1)
var x, y, z int16
for mask := int16(1); mask != end; mask <<= 1 {
if pos&1 == 1 {
x |= mask
}
pos >>= 1
if pos&1 == 1 {
y |= mask
}
pos >>= 1
if pos&1 == 1 {
z |= mask
}
pos >>= 1
}
return Coord{X: x + minValue, Y: y + minValue, Z: z + minValue}
}
func CoordToPlain(c Coord) int64 {
return int64(c.Z)<<(2*numBitsPerComponent) +
int64(c.Y)<<numBitsPerComponent +
int64(c.X)
}
func unsignedToSigned(i int16) int16 {
if i < maxPositive {
return i
}
return i - maxPositive*2
}
// To match C++ code.
func pythonModulo(i int16) int16 {
const mask = modulo - 1
if i >= 0 {
return i & mask
}
return modulo - -i&mask
}
func PlainToCoord(i int64) (c Coord) {
c.X = unsignedToSigned(pythonModulo(int16(i)))
i = (i - int64(c.X)) >> numBitsPerComponent
c.Y = unsignedToSigned(pythonModulo(int16(i)))
i = (i - int64(c.Y)) >> numBitsPerComponent
c.Z = unsignedToSigned(pythonModulo(int16(i)))
return
}
func TransformPlainToInterleaved(pos int64) int64 {
return CoordToInterleaved(PlainToCoord(pos))
}
func TransformInterleavedToPlain(pos int64) int64 {
return CoordToPlain(InterleavedToCoord(pos))
}
func DecodeStringFromBytesToInterleaved(key []byte) (v int64, err error) {
if v, err = DecodeStringFromBytes(key); err != nil {
return
}
v = TransformPlainToInterleaved(v)
return
}
func DecodeStringBytesToCoord(key []byte) (coord Coord, err error) {
var k int64
if k, err = DecodeStringFromBytes(key); err != nil {
return
}
coord = PlainToCoord(k)
return
}
func EncodeStringToBytesFromInterleaved(key int64) ([]byte, error) {
return EncodeStringToBytes(TransformInterleavedToPlain(key))
}
func IdentityTranscoder(key []byte) ([]byte, error) {
return key, nil
}
func TranscodePlainToInterleaved(key []byte) ([]byte, error) {
pos, err := DecodeStringFromBytesToInterleaved(key)
if err != nil {
return nil, err
}
return EncodeToBigEndian(pos)
}
func TranscodeInterleavedToPlain(key []byte) ([]byte, error) {
pos, err := DecodeFromBigEndian(key)
if err != nil {
return nil, err
}
return EncodeStringToBytes(TransformInterleavedToPlain(pos))
}
// NaiveBigMin is for correctness checks of BigMin only.
func NaiveBigMin(minz, maxz, zcode int64) int64 {
var (
c1 = InterleavedToCoord(minz)
c2 = InterleavedToCoord(maxz)
cand = maxz
c Coord
)
for c.X = c1.X; c.X <= c2.X; c.X++ {
for c.Y = c1.Y; c.Y <= c2.Y; c.Y++ {
for c.Z = c1.Z; c.Z <= c2.Z; c.Z++ {
if z := CoordToInterleaved(c); z > zcode && z < cand {
cand = z
}
}
}
}
return cand
}
const (
msb = uint8(3*numBitsPerComponent - 1)
mask = int64(0x924924924)
full = int64(0xfffffffff)
)
func setbits(p uint8, v int64) int64 {
m := (mask >> (msb - p)) & (^(full << p) & full)
return (v | m) & ^(1 << p) & full
}
func unsetbits(p uint8, v int64) int64 {
m := ^(mask >> (msb - p)) & full
return (v & m) | (int64(1) << p)
}
func BigMin(minz, maxz, zcode int64) int64 {
const (
b001 = 1
b010 = 2
b011 = 2 | 1
b100 = 4
b101 = 4 | 1
)
bigmin := maxz
pos := msb
for m := int64(1) << msb; m != 0; m >>= 1 {
var v uint8
if zcode&m == m {
v = b100
}
if minz&m == m {
v |= b010
}
if maxz&m == m {
v |= b001
}
switch v {
case b001:
bigmin = unsetbits(pos, minz)
maxz = setbits(pos, maxz)
case b011:
return minz
case b100:
return bigmin
case b101:
minz = unsetbits(pos, minz)
}
pos--
}
return bigmin
}

227
common/coords_test.go Normal file
View File

@ -0,0 +1,227 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"math/rand"
"testing"
)
var data = []int16{
-2045, -1850, -1811, -1629, -1104,
-967, -725, -646, -329, -212,
-150, -1, 0, 1, 88, 524, 527, 549,
1783, 1817, 1826, 2028, 2032}
func allData(f func(Coord)) {
for _, z := range data {
for _, y := range data {
for _, x := range data {
f(Coord{X: x, Y: y, Z: z})
}
}
}
}
func checkEncodeDecode(
desc string,
join KeyJoiner,
encode KeyEncoder, decode KeyDecoder,
c Coord, t *testing.T) {
k1 := join(c)
var err error
var b []byte
if b, err = encode(k1); err != nil {
t.Errorf("%s: Failed to encode %s %s\n", desc, c, err)
return
}
var k2 int64
if k2, err = decode(b); err != nil {
t.Errorf("%s: Failed to decode %s %s\n", desc, c, err)
return
}
if k1 != k2 {
t.Errorf("%s: Expected %d got %d for %s\n", desc, k1, k2, c)
}
}
func TestEncodeDecode(t *testing.T) {
allData(func(c Coord) {
checkEncodeDecode(
"Big endian - interleaved",
CoordToInterleaved,
EncodeToBigEndian, DecodeFromBigEndian,
c, t)
})
allData(func(c Coord) {
checkEncodeDecode(
"String - interleaved",
CoordToInterleaved,
EncodeStringToBytes, DecodeStringFromBytes,
c, t)
})
allData(func(c Coord) {
checkEncodeDecode(
"Big endian - plain",
CoordToPlain,
EncodeToBigEndian, DecodeFromBigEndian,
c, t)
})
allData(func(c Coord) {
checkEncodeDecode(
"String - plain",
CoordToPlain,
EncodeStringToBytes, DecodeStringFromBytes,
c, t)
})
}
func checkJoinSplit(
desc string,
join KeyJoiner, split KeySplitter,
c Coord, t *testing.T) {
k := join(c)
s := split(k)
if s != c {
t.Errorf("%s: Expected %s got %s %b\n", desc, c, s, k)
}
}
func TestJoinSplit(t *testing.T) {
allData(func(c Coord) {
checkJoinSplit(
"P2C(C2P(xyz))",
CoordToPlain, PlainToCoord,
c, t)
})
allData(func(c Coord) {
checkJoinSplit(
"I2C(C2I(xyz))",
CoordToInterleaved, InterleavedToCoord,
c, t)
})
}
func checkTransformer(
desc string, joiner KeyJoiner,
transform KeyTransformer,
c Coord, t *testing.T) {
k1 := joiner(c)
k2 := transform(k1)
if k2 != k1 {
t.Errorf("%s: Expected %v got %v for %s\n", desc, k1, k2, c)
}
}
func compose(transforms ...KeyTransformer) KeyTransformer {
return func(x int64) int64 {
for _, transform := range transforms {
x = transform(x)
}
return x
}
}
func TestTransforms(t *testing.T) {
// Mainly to check the test itself.
allData(func(c Coord) {
checkTransformer(
"plain",
CoordToPlain,
compose(),
c, t)
})
allData(func(c Coord) {
checkTransformer(
"I2P(P2I(plain))",
CoordToPlain,
compose(TransformPlainToInterleaved, TransformInterleavedToPlain),
c, t)
})
allData(func(c Coord) {
checkTransformer(
"P2I(I2P(interleaved))",
CoordToInterleaved,
compose(TransformInterleavedToPlain, TransformPlainToInterleaved),
c, t)
})
}
func TestCoordInterleaving(t *testing.T) {
allData(func(c Coord) {
d := InterleavedToCoord(CoordToInterleaved(c))
if c != d {
t.Errorf("Expected %v got %v\n", c, d)
}
})
}
func outsiders(zmin, zmax int64, fn func(int64)) {
c1 := InterleavedToCoord(zmin)
c2 := InterleavedToCoord(zmax)
cub := Cuboid{P1: c1, P2: c2}
var c Coord
for c.X = c1.X; c.X <= c2.X; c.X++ {
for c.Y = c1.Y; c.Y <= c2.Y; c.Y++ {
for c.Z = c1.Z; c.Z <= c2.Z; c.Z++ {
zn := CoordToInterleaved(c) + 1
if zn > zmin && zn < zmax && !cub.Contains(InterleavedToCoord(zn)) {
fn(zn)
}
}
}
}
}
func TestBigMin(t *testing.T) {
const tries = 20
for i := 0; i < tries; i++ {
x1 := rand.Intn(4000) - 2000
y1 := rand.Intn(4000) - 2000
z1 := rand.Intn(4000) - 2000
w := rand.Intn(18) + 1
h := rand.Intn(18) + 1
d := rand.Intn(18) + 1
x2 := x1 + w
y2 := y1 + h
z2 := z1 + d
c1 := Coord{X: int16(x1), Y: int16(y1), Z: int16(z1)}
c2 := Coord{X: int16(x2), Y: int16(y2), Z: int16(z2)}
zmin := CoordToInterleaved(c1)
zmax := CoordToInterleaved(c2)
if zmin > zmax {
t.Errorf("zmin > zmax: %d > %d\n", zmin, zmax)
}
errors, success := 0, 0
outsiders(zmin, zmax, func(zcode int64) {
nbm := NaiveBigMin(zmin, zmax, zcode)
cbm := BigMin(zmin, zmax, zcode)
//fmt.Printf("nbm: %b\n", nbm)
//fmt.Printf("cbm: %b\n", cbm)
if nbm != cbm {
errors++
} else {
success++
}
})
if errors > 0 {
cub := Cuboid{P1: c1, P2: c2}
t.Errorf("BigMin: %s (%d %d) %d errors out of %d (%f)\n",
cub,
zmin, zmax,
errors, errors+success,
float64(errors)/float64(errors+success))
}
}
}

86
common/coverage.go Normal file
View File

@ -0,0 +1,86 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import "sync"
type zRange struct {
y1 int16
y2 int16
xRange *Span
}
type Coverage3D struct {
pool *SpanPool
zRanges map[int16]*zRange
mu sync.RWMutex
}
type Range struct {
Z int16
Y1 int16
Y2 int16
X1 int16
X2 int16
}
func NewCoverage3D() *Coverage3D {
return &Coverage3D{
pool: NewSpanPool(),
zRanges: map[int16]*zRange{}}
}
func (c3d *Coverage3D) Insert(c Coord) {
c3d.mu.Lock()
defer c3d.mu.Unlock()
zr := c3d.zRanges[c.Z]
if zr == nil {
xr := c3d.pool.Alloc()
xr.From = int32(c.X)
xr.To = int32(c.X)
xr.Next = nil
c3d.zRanges[c.Z] = &zRange{
y1: c.Y,
y2: c.Y,
xRange: xr}
return
}
zr.xRange = c3d.pool.Insert(zr.xRange, int32(c.X), 0)
if c.Y < zr.y1 {
zr.y1 = c.Y
}
if c.Y > zr.y2 {
zr.y2 = c.Y
}
}
func (c3d *Coverage3D) Query(c1, c2 Coord) []Range {
c1, c2 = MinCoord(c1, c2), MaxCoord(c1, c2)
c3d.mu.RLock()
defer c3d.mu.RUnlock()
r := make([]Range, 0, 32)
for z := c1.Z; z <= c2.Z; z++ {
zr := c3d.zRanges[z]
if zr == nil || c1.Y > zr.y2 || c2.Y < zr.y1 {
continue
}
y1, y2 := max16(c1.Y, zr.y1), min16(c2.Y, zr.y2)
for xr := zr.xRange; xr != nil && xr.From <= int32(c2.X); xr = xr.Next {
if xr.To < int32(c1.X) {
continue
}
r = append(r, Range{
Z: z,
Y1: y1,
Y2: y2,
X1: max16(c1.X, int16(xr.From)),
X2: min16(c2.X, int16(xr.To))})
}
}
return r
}

136
common/image.go Normal file
View File

@ -0,0 +1,136 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bufio"
"bytes"
"errors"
"image"
"image/color"
"image/draw"
"image/png"
"log"
"os"
"strconv"
"sync"
"time"
"golang.org/x/crypto/blake2b"
"github.com/bamiaux/rez"
)
// ResizeFilter is used to scale down the pyramid tiles.
var ResizeFilter = rez.NewLanczosFilter(3)
var rrand uint32
var rrandmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
rrandmu.Lock()
r := rrand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rrand = r
rrandmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
func EncodeToMem(img image.Image) []byte {
var buf bytes.Buffer
enc := png.Encoder{CompressionLevel: png.BestCompression}
if err := enc.Encode(&buf, img); err != nil {
// This really should not happen.
panic(err)
}
return buf.Bytes()
}
func SaveAsPNG(path string, img image.Image) (err error) {
var file *os.File
if file, err = os.Create(path); err != nil {
return
}
writer := bufio.NewWriter(file)
err = png.Encode(writer, img)
writer.Flush()
file.Close()
return
}
func tmpName(tmpl string) (string, error) {
tmpPre := tmpl + ".tmp"
nconflict := 0
for i := 0; i < 10000; i++ {
tmp := tmpPre + nextSuffix()
if _, err := os.Stat(tmp); err != nil {
if os.IsNotExist(err) {
return tmp, nil
}
return "", err
}
if nconflict++; nconflict > 10 {
nconflict = 0
rrand = reseed()
}
}
return "", errors.New("Cannot create temp name")
}
func SaveAsPNGAtomic(path string, img image.Image) (err error) {
var tmpPath string
if tmpPath, err = tmpName(path); err != nil {
return
}
// Still a bit racy
if err = SaveAsPNG(tmpPath, img); err != nil {
return
}
return os.Rename(tmpPath, path)
}
func LoadPNG(path string, bg color.RGBA) image.Image {
var err error
var file *os.File
if file, err = os.Open(path); err != nil {
return image.NewUniform(bg)
}
defer file.Close()
reader := bufio.NewReader(file)
var img image.Image
if img, err = png.Decode(reader); err != nil {
log.Printf("WARN: decoding '%s' failed: %s\n", path, err)
return image.NewUniform(bg)
}
return img
}
func HashImage(img *image.RGBA) []byte {
hash, _ := blake2b.New256(nil)
w, h := img.Rect.Dx()*4, img.Rect.Dy()
pos := img.PixOffset(img.Rect.Min.X, img.Rect.Min.Y)
for ; h > 0; h, pos = h-1, pos+img.Stride {
hash.Write(img.Pix[pos : pos+w])
}
return hash.Sum(nil)
}
func BackgroundImage(width, height int, bg color.RGBA) *image.RGBA {
m := image.NewRGBA(image.Rect(0, 0, width, height))
draw.Draw(m, m.Bounds(), &image.Uniform{bg}, image.ZP, draw.Src)
return m
}

78
common/math.go Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
func Max(a, b int) int {
if a > b {
return a
}
return b
}
func Min(a, b int) int {
if a < b {
return a
}
return b
}
func max32(a, b int32) int32 {
if a > b {
return a
}
return b
}
func max16(a, b int16) int16 {
if a > b {
return a
}
return b
}
func min16(a, b int16) int16 {
if a < b {
return a
}
return b
}
func min32f(a, b float32) float32 {
if a < b {
return a
}
return b
}
func Clamp32f(x, a, b float32) float32 {
switch {
case x < a:
return a
case x > b:
return b
}
return x
}
func Order(a, b int) (int, int) {
if a < b {
return a, b
}
return b, a
}
func Order16(a, b int16) (int16, int16) {
if a < b {
return a, b
}
return b, a
}
func Order64(a, b int64) (int64, int64) {
if a < b {
return a, b
}
return b, a
}

10
common/post17offset.go Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
// +build go1.7
package common
// afterCompressOfs is not necessary after Go 1.7.
const afterCompressOfs = 0

10
common/pre17offset.go Normal file
View File

@ -0,0 +1,10 @@
// Copyright 2016 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
// +build !go1.7
package common
// afterCompressOfs is necessary before Go 1.7.
const afterCompressOfs = 4

161
common/redisclient.go Normal file
View File

@ -0,0 +1,161 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bufio"
"bytes"
"errors"
"fmt"
"net"
"strconv"
"unicode"
)
type RedisClient struct {
conn net.Conn
reader *bufio.Reader
arena []byte
scratch [130]byte
}
func NewRedisClient(network, address string) (client *RedisClient, err error) {
var conn net.Conn
if conn, err = net.Dial(network, address); err != nil {
return
}
client = &RedisClient{conn: conn, reader: bufio.NewReaderSize(conn, 8*1024)}
return
}
func (client *RedisClient) Close() error {
return client.conn.Close()
}
var (
writeArray4 = []byte("*4\r\n")
hspatial = []byte("HSPATIAL")
nl = []byte("\r\n")
ignore = []byte("IGNORE")
)
func writeBulkString(buf []byte, data []byte) []byte {
buf = append(buf, '$')
buf = strconv.AppendInt(buf, int64(len(data)), 10)
buf = append(buf, nl...)
buf = append(buf, data...)
buf = append(buf, nl...)
return buf
}
func (client *RedisClient) writeHSpatial(p1, p2 int64) error {
tmp := client.scratch[:0:40]
buf := client.scratch[40:40]
buf = append(buf, writeArray4...)
buf = writeBulkString(buf, hspatial)
buf = writeBulkString(buf, ignore)
buf = writeBulkString(buf, keyToBytes(p1, tmp))
buf = writeBulkString(buf, keyToBytes(p2, tmp))
_, err := client.conn.Write(buf)
return err
}
func isError(line []byte) error {
if len(line) > 0 && line[0] == '-' {
return fmt.Errorf("error: %s", line[1:])
}
return nil
}
// parseSize is a cheaper replacement for fmt.Sscanf(string(line), "$%d\r\n", &size).
func parseSize(line []byte) (int, error) {
if len(line) < 1 || line[0] != '$' {
return 0, errors.New("Missing '$' at begin of line")
}
line = bytes.TrimFunc(line[1:], unicode.IsSpace)
v, err := strconv.ParseInt(string(line), 10, 0)
return int(v), err
}
func (client *RedisClient) alloc(size int) []byte {
a := client.arena
if len(a) < size {
a = make([]byte, Max(size, 16*1024))
}
x := a[:size:size]
client.arena = a[size:]
return x
}
func (client *RedisClient) readBulkString(data *[]byte) (size int, err error) {
var line []byte
if line, err = client.reader.ReadBytes('\n'); err != nil {
return
}
if err = isError(line); err != nil {
return
}
if size, err = parseSize(line); err != nil || size <= 0 {
return
}
if cap(*data) < size {
*data = client.alloc(size)
} else {
*data = (*data)[:size]
}
for rest := size; rest > 0; {
var n int
if n, err = client.reader.Read((*data)[size-rest : size]); err != nil {
return
}
rest -= n
}
_, err = client.reader.ReadBytes('\n')
return
}
func (client *RedisClient) QueryCuboid(cuboid Cuboid, fn func(*Block) *Block) (count int, err error) {
p1 := CoordToPlain(cuboid.P1)
p2 := CoordToPlain(cuboid.P2)
if err = client.writeHSpatial(p1, p2); err != nil {
return
}
var (
block *Block
size int
key int64
data []byte
)
for s := client.scratch[:]; ; count++ {
p := &s
if size, err = client.readBulkString(p); err != nil {
return
}
if size <= 0 {
break
}
if key, err = DecodeStringFromBytes(*p); err != nil {
return
}
if size, err = client.readBulkString(&data); err != nil || size < 0 {
return
}
if block == nil {
block = &Block{Coord: PlainToCoord(key), Data: data}
} else {
*block = Block{Coord: PlainToCoord(key), Data: data}
}
if block = fn(block); block != nil {
data = block.Data[:0]
} else {
data = nil
}
}
return
}

397
common/renderer.go Normal file
View File

@ -0,0 +1,397 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"image"
"image/color"
"math"
)
type Renderer struct {
width int
height int
xOfs int16
zOfs int16
yBuffer []int32
yMin []int32
cBuffer []int32
RejectedBlocks int
SolidBlocks int
TransparentBlocks int
spans *SpanPool
tBuffer []*Span
}
func NewRenderer(width, height int, transparent bool) (renderer *Renderer) {
dim := width * height
pixSize := dim * 16 * 16
yBuffer := make([]int32, pixSize)
cBuffer := make([]int32, pixSize)
yMin := make([]int32, dim)
var tBuffer []*Span
var spans *SpanPool
if transparent {
tBuffer = make([]*Span, pixSize)
spans = NewSpanPool()
}
renderer = &Renderer{
width: width,
height: height,
yBuffer: yBuffer,
cBuffer: cBuffer,
yMin: yMin,
tBuffer: tBuffer,
spans: spans}
renderer.initBuffers()
return
}
func (r *Renderer) SetPos(xOfs, zOfs int16) {
r.xOfs = xOfs
r.zOfs = zOfs
}
func (r *Renderer) GetPos() (int16, int16) {
return r.xOfs, r.zOfs
}
func (r *Renderer) initBuffers() {
yb := r.yBuffer
yb = yb[:len(yb)]
for i := range yb {
yb[i] = math.MinInt32
}
cb := r.cBuffer
cb = cb[:len(cb)]
for i := range cb {
cb[i] = -1
}
ym := r.yMin
ym = ym[:len(ym)]
for i := range ym {
ym[i] = math.MinInt32
}
}
func (r *Renderer) Reset() {
r.RejectedBlocks = 0
if r.SolidBlocks > 0 || r.TransparentBlocks > 0 {
r.SolidBlocks = 0
r.initBuffers()
}
if r.TransparentBlocks > 0 {
r.TransparentBlocks = 0
tb := r.tBuffer
for i, t := range tb {
if t != nil {
r.spans.FreeAll(t)
tb[i] = nil
}
}
}
}
func (r *Renderer) IsFilled() bool {
for _, y := range r.yMin {
if y == math.MinInt32 {
return false
}
}
return true
}
func (r *Renderer) IsEmpty() bool {
return r.SolidBlocks == 0 && r.TransparentBlocks == 0
}
// down goes down the y direction in a block from top to bottom.
// In its loop it copies the logic of Block.Content pulling some
// things like the version check and common indexing out to
// save some cycles.
func down(db *DecodedBlock, x, y, z int) (int32, int) {
mc := db.MapContent
switch {
case db.Version >= 24:
for sliver := (z<<8 + x) << 1; y >= 0; y-- {
pos := sliver + y<<5
content := int32(mc[pos])<<8 | int32(mc[pos+1])
if content != db.AirID && content != db.IgnoreID {
if c, found := db.IndexMap[content]; found {
return c, y
}
}
}
case db.Version >= 20:
for sliver := z<<8 + x; y >= 0; y-- {
pos := sliver + y<<4
var content int32
if c := mc[pos]; c <= 0x80 {
content = int32(c)
} else {
content = int32(c)<<4 | int32(mc[pos+0x2000])>>4
}
if content != db.AirID && content != db.IgnoreID {
if c, found := db.IndexMap[content]; found {
return c, y
}
}
}
}
return -1, -1
}
func (r *Renderer) RenderBlock(block *Block, colors *Colors) (err error) {
bx := block.Coord.X - r.xOfs
bz := block.Coord.Z - r.zOfs
// We do not need to render the block if the whole 16x16 area
// is already filled and the block is strictly below.
blockY := int32(block.Coord.Y) << 4
pos := int(bz)*r.width + int(bx)
if blockY < r.yMin[pos] {
r.RejectedBlocks++
return
}
// Decoding is pretty expensive so do it that late.
var db *DecodedBlock
if db, err = NewDecodedBlock(block.Data, colors); err != nil {
return
}
if db.AirOnly() {
r.RejectedBlocks++
return
}
w := r.width << 4
ofs := int(bz)*w<<4 + int(bx)<<4
yB := r.yBuffer
yMin := int32(math.MaxInt32)
if db.Transparent && r.tBuffer != nil {
r.TransparentBlocks++
for z := 0; z < 16; z++ {
for x := 0; x < 16; x++ {
currentY := yB[ofs]
if currentY < blockY {
var c int32
for y := 15; ; y-- {
if c, y = down(db, x, y, z); y < 0 {
break
}
cY := blockY + int32(y)
if colors.IsTransparent(c) {
r.tBuffer[ofs] = r.spans.Insert(r.tBuffer[ofs], cY, c)
// We need to continue to go down because we
// can see through this node.
} else {
r.cBuffer[ofs] = c
currentY = cY
yB[ofs] = currentY
break
}
}
}
if currentY < yMin {
yMin = currentY
}
ofs++
}
ofs += w - 16
}
} else {
r.SolidBlocks++
for z := 0; z < 16; z++ {
for x := 0; x < 16; x++ {
currentY := yB[ofs]
if currentY < blockY {
if c, y := down(db, x, 15, z); y >= 0 {
r.cBuffer[ofs] = c
currentY = blockY + int32(y)
yB[ofs] = currentY
}
}
if currentY < yMin {
yMin = currentY
}
ofs++
}
ofs += w - 16
}
}
r.yMin[pos] = yMin
return
}
func (r *Renderer) CreateImage(colors []color.RGBA, background color.RGBA) *image.RGBA {
pw, ph := r.width<<4, r.height<<4
image := image.NewRGBA(image.Rect(0, 0, pw, ph))
ofs, numCols := 0, int32(len(colors))
for z := ph - 1; z >= 0; z-- {
for x := 0; x < pw; x++ {
colIdx := r.cBuffer[ofs]
if colIdx >= 0 && colIdx < numCols {
image.Set(x, z, colors[colIdx])
} else {
image.Set(x, z, background)
}
ofs++
}
}
return image
}
func safeColor(x int32) uint8 {
switch {
case x < 0:
return 0
case x > 255:
return 255
default:
return uint8(x)
}
}
func (r *Renderer) CreateShadedImage(
xOfs, zOfs, width, height int,
cols *Colors, background color.RGBA) *image.RGBA {
image := image.NewRGBA(image.Rect(0, 0, width, height))
pw := r.width << 4
cs := cols.Colors
ofs, numCols := zOfs*pw+xOfs, int32(len(cs))
stride := pw - width
istride := image.Stride + 4*width
iofs := image.PixOffset(0, height-1)
pix := image.Pix
if r.TransparentBlocks > 0 { // Fast path for transparent images.
for z := height - 1; z >= 0; z-- {
for x := 0; x < width; x++ {
colIdx := r.cBuffer[ofs]
if colIdx < 0 || colIdx >= numCols {
pix[iofs] = background.R
pix[iofs+1] = background.G
pix[iofs+2] = background.B
pix[iofs+3] = 0xff
} else {
y := r.yBuffer[ofs]
t := r.tBuffer[ofs]
opaque := t == nil || t.Top() < y
var y1, y2 int32
if x == 0 {
y1 = y
} else {
y1 = r.yBuffer[ofs-1]
if opaque {
if s := r.tBuffer[ofs-1]; s != nil {
y1 = max32(y1, s.Top())
}
}
}
if z == 0 {
y2 = y
} else {
y2 = r.yBuffer[ofs+pw]
if opaque {
if s := r.tBuffer[ofs+pw]; s != nil {
y1 = max32(y1, s.Top())
}
}
}
d := ((y - y1) + (y - y2)) * 12
if d > 36 {
d = 36
}
col := cs[colIdx]
col = color.RGBA{
R: safeColor(int32(col.R) + d),
G: safeColor(int32(col.G) + d),
B: safeColor(int32(col.B) + d),
A: 0xff}
if !opaque {
col = cols.BlendColors(t, col, y)
}
pix[iofs] = col.R
pix[iofs+1] = col.G
pix[iofs+2] = col.B
pix[iofs+3] = col.A
}
iofs += 4
ofs++
}
ofs += stride
iofs -= istride
}
} else { // Solid images.
for z := height - 1; z >= 0; z-- {
for x := 0; x < width; x++ {
colIdx := r.cBuffer[ofs]
if colIdx < 0 || colIdx >= numCols {
pix[iofs] = background.R
pix[iofs+1] = background.G
pix[iofs+2] = background.B
pix[iofs+3] = 0xff
} else {
var y, y1, y2 int32
y = r.yBuffer[ofs]
if x == 0 {
y1 = y
} else {
y1 = r.yBuffer[ofs-1]
}
if z == 0 {
y2 = y
} else {
y2 = r.yBuffer[ofs+pw]
}
d := ((y - y1) + (y - y2)) * 12
if d > 36 {
d = 36
}
col := cs[colIdx]
pix[iofs] = safeColor(int32(col.R) + d)
pix[iofs+1] = safeColor(int32(col.G) + d)
pix[iofs+2] = safeColor(int32(col.B) + d)
pix[iofs+3] = 0xff
}
iofs += 4
ofs++
}
ofs += stride
iofs -= istride
}
}
return image
}

157
common/spans.go Normal file
View File

@ -0,0 +1,157 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"bytes"
"fmt"
)
const chunkSize = 1024
type Span struct {
Value int32
From int32
To int32
Next *Span
}
type SpanPool struct {
freeList *Span
}
func NewSpanPool() *SpanPool {
return &SpanPool{}
}
func (sp *SpanPool) Alloc() *Span {
if sp.freeList != nil {
next := sp.freeList
sp.freeList = next.Next
return next
}
spans := make([]Span, chunkSize)
for i := chunkSize - 1; i > 0; i-- {
spans[i].Next = sp.freeList
sp.freeList = &spans[i]
}
return &spans[0]
}
func (sp *SpanPool) Free(s *Span) {
if s != nil {
s.Next = sp.freeList
sp.freeList = s
}
}
func (sp *SpanPool) FreeAll(s *Span) {
if s == nil {
return
}
head, prev := s, s
for ; s != nil; s = s.Next {
prev = s
}
prev.Next = sp.freeList
sp.freeList = head
}
func (sp *SpanPool) Insert(s *Span, pos, value int32) *Span {
// No head -> create.
if s == nil {
s = sp.Alloc()
s.From = pos
s.To = pos
s.Value = value
s.Next = nil
return s
}
if pos < s.From {
// Same value and directly neighbored -> extend head.
if value == s.Value && pos == s.From-1 {
s.From = pos
return s
}
// Disjunct -> create new head.
prev := sp.Alloc()
prev.From = pos
prev.To = pos
prev.Value = value
prev.Next = s
return prev
}
head := s
for ; s != nil && pos > s.To; s = s.Next {
next := s.Next
if pos == s.To+1 && value == s.Value { // directly neighbored
s.To = pos
// Check if a gap has to be closed
if next != nil && next.From == s.To+1 && value == next.Value {
s.To = next.To
s.Next = next.Next
sp.Free(next)
}
return head
}
// Extend next?
if next != nil && pos == next.From-1 && value == next.Value {
next.From = pos
return head
}
// Before next -> New between current and next
if next == nil || pos < next.From {
sn := sp.Alloc()
sn.From = pos
sn.To = pos
sn.Value = value
sn.Next = next
s.Next = sn
return head
}
}
return head
}
func (s *Span) Visit(v func(*Span)) {
for ; s != nil; s = s.Next {
v(s)
}
}
func (s *Span) Len() int {
n := 0
for ; s != nil; s = s.Next {
n++
}
return n
}
func (s *Span) Top() int32 {
for ; s.Next != nil; s = s.Next {
}
return s.To
}
func (s *Span) String() string {
var buf bytes.Buffer
first := true
s.Visit(func(s1 *Span) {
if !first {
buf.WriteString(", ")
} else {
first = false
}
buf.WriteString(fmt.Sprintf("(%d, %d)", s1.From, s1.To))
})
return buf.String()
}

78
common/spans_test.go Normal file
View File

@ -0,0 +1,78 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"math/rand"
"testing"
)
const spanItems = 3000
func TestSpans(t *testing.T) {
sp := NewSpanPool()
var s *Span
for i := 0; i < spanItems; i++ {
s = sp.Insert(s, int32(i), 42)
}
if n := s.Len(); n != 1 {
t.Errorf("inc: Span length %d expected 1\n", n)
t.Errorf("spans: %s\n", s)
}
sp.FreeAll(s)
s = nil
for i := spanItems - 1; i >= 0; i-- {
s = sp.Insert(s, int32(i), 42)
}
if n := s.Len(); n != 1 {
t.Errorf("dec: Span length %d expected 1\n", n)
t.Errorf("spans: %s\n", s)
}
sp.FreeAll(s)
s = nil
for i := 0; i < spanItems/2; i++ {
j := spanItems - 1 - i
s = sp.Insert(s, int32(i), 42)
s = sp.Insert(s, int32(j), 21)
}
if n := s.Len(); n != 2 {
t.Errorf("two: Span length %d expected 2\n", n)
t.Errorf("spans: %s\n", s)
}
sp.FreeAll(s)
inp := make([]int32, spanItems)
for i := 0; i < spanItems; i++ {
inp[i] = int32(i)
}
for i := 0; i < spanItems; i++ {
i1 := rand.Int31n(int32(spanItems))
i2 := rand.Int31n(int32(spanItems))
inp[i1], inp[i2] = inp[i2], inp[i1]
}
s = nil
for i := 0; i < spanItems; i++ {
s = sp.Insert(s, inp[i], 42)
}
if n := s.Len(); n != 1 {
t.Errorf("rand: Span length %d expected 1\n", n)
t.Errorf("spans: %s\n", s)
}
sp.FreeAll(s)
}

17
common/version.go Normal file
View File

@ -0,0 +1,17 @@
// Copyright 2014 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import (
"fmt"
"os"
)
const MTSatelliteVersion = "0.9.1"
func PrintVersionAndExit() {
fmt.Printf("Version: %s\n", MTSatelliteVersion)
os.Exit(0)
}

98
common/yorder.go Normal file
View File

@ -0,0 +1,98 @@
// Copyright 2014, 2015, 2017 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package common
import "container/heap"
// YOrder is a "streaming" Y sorter. The blocks comming from the
// database are not sorted in Y order. To unpack only the
// relevant blocks (the one at the surface) it would be nice
// to have them sorted in inverse Y order so that blocks with
// lower Y value are shadowed by ones wither higher value.
//
// Sorting all blocks correctly would leadind to load all blocks
// before rendering. But a perfect order is not strictly necessary
// because the problem is (expensively) solved at per node level.
//
// The YOrder defines a "windowed" data structure in which all blocks
// are sorted correctly. So for small amounts of blocks the
// sorting is perfect. For larger amounts it is possible to
// have partial incorrect sortings but as stated above it doesn't
// matter. The window allows not to preload all blocks.
type YOrder struct {
RenderFn func(*Block) error
blocks []*Block
capacity int
}
func NewYOrder(renderFn func(*Block) error, capacity int) *YOrder {
return &YOrder{
RenderFn: renderFn,
blocks: make([]*Block, 0, capacity),
capacity: capacity}
}
func (yo *YOrder) Reset() {
blocks := yo.blocks
for i := range blocks {
blocks[i] = nil
}
yo.blocks = blocks[:0]
}
func (yo *YOrder) RenderBlock(block *Block) (*Block, error) {
if len(yo.blocks) == yo.capacity {
oblock := yo.blocks[0]
if oblock.Coord.Y < block.Coord.Y {
// New one is above highest old. Directly render new.
err := yo.RenderFn(block)
return block, err
}
// Render old one. Store copy of new in heap.
heap.Pop(yo)
heap.Push(yo, block)
err := yo.RenderFn(oblock)
return oblock, err
}
heap.Push(yo, block)
return nil, nil
}
func (yo *YOrder) Drain() error {
for len(yo.blocks) > 0 {
if err := yo.RenderFn(heap.Pop(yo).(*Block)); err != nil {
return err
}
}
return nil
}
func (yo *YOrder) Len() int {
return len(yo.blocks)
}
func (yo *YOrder) Swap(i, j int) {
yo.blocks[i], yo.blocks[j] = yo.blocks[j], yo.blocks[i]
}
func (yo *YOrder) Less(i, j int) bool {
// Reverse order intented.
return yo.blocks[i].Coord.Y > yo.blocks[j].Coord.Y
}
func (yo *YOrder) Push(x interface{}) {
yo.blocks = append(yo.blocks, x.(*Block))
}
func (yo *YOrder) Pop() interface{} {
blocks := yo.blocks
l := len(blocks)
x := blocks[l-1]
blocks[l-1] = nil
yo.blocks = blocks[:l-1]
return x
}

View File

@ -0,0 +1,28 @@
local time_interval = 1.0
local fifo_path = "/tmp/mt_players_fifo"
function players_data()
local ps = minetest.get_connected_players()
local pcount = #ps
if pcount == 0 then
return "[]\n"
end
for i = 1,pcount do
local player = ps[i]
local data = player:getpos()
data.name = player:get_player_name()
ps[i] = data
end
return minetest.write_json(ps) .. "\n"
end
function time_interval_func()
local fifo = io.open(fifo_path, "w")
if fifo then
fifo:write(players_data())
fifo:close()
end
minetest.after(time_interval, time_interval_func)
end
minetest.after(time_interval, time_interval_func)