Remplissage du dépôt

This commit is contained in:
sys4-fr
2018-12-13 21:09:02 +01:00
commit 6af26530ca
71 changed files with 12648 additions and 0 deletions

View File

@ -0,0 +1,145 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"os"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
leveldb "github.com/jmhodges/levigo"
)
type (
// LevelDBBlockProducer is a helper to fetch blocks from a LevelDB.
LevelDBBlockProducer struct {
db *leveldb.DB
opts *leveldb.Options
ro *leveldb.ReadOptions
iterator *leveldb.Iterator
splitter common.KeySplitter
decoder common.KeyDecoder
}
// LevelDBBlockConsumer is a helper to store blocks in a LevelDB.
LevelDBBlockConsumer struct {
db *leveldb.DB
opts *leveldb.Options
wo *leveldb.WriteOptions
joiner common.KeyJoiner
encoder common.KeyEncoder
}
)
// NewLevelDBBlockProducer returns a new helper to fetch blocks from a LevelDB.
func NewLevelDBBlockProducer(path string,
splitter common.KeySplitter,
decoder common.KeyDecoder) (ldbp *LevelDBBlockProducer, err error) {
// check if we can stat it -> exists.
if _, err = os.Stat(path); err != nil {
return
}
opts := leveldb.NewOptions()
opts.SetCreateIfMissing(false)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
opts.Close()
return
}
ro := leveldb.NewReadOptions()
ro.SetFillCache(false)
iterator := db.NewIterator(ro)
iterator.SeekToFirst()
ldbp = &LevelDBBlockProducer{
db: db,
opts: opts,
ro: ro,
iterator: iterator,
splitter: splitter,
decoder: decoder}
return
}
// Close closes a helper to fetch blocks from a LevelDB.
func (ldbp *LevelDBBlockProducer) Close() error {
if ldbp.iterator != nil {
ldbp.iterator.Close()
}
ldbp.ro.Close()
ldbp.db.Close()
ldbp.opts.Close()
return nil
}
// Next fetches the next block from a LevelDB.
func (ldbp *LevelDBBlockProducer) Next(block *common.Block) (err error) {
if ldbp.iterator == nil {
err = common.ErrNoMoreBlocks
return
}
if !ldbp.iterator.Valid() {
if err = ldbp.iterator.GetError(); err == nil {
err = common.ErrNoMoreBlocks
}
ldbp.iterator.Close()
ldbp.iterator = nil
return
}
var key int64
if key, err = ldbp.decoder(ldbp.iterator.Key()); err != nil {
return
}
block.Coord = ldbp.splitter(key)
block.Data = ldbp.iterator.Value()
ldbp.iterator.Next()
return
}
// NewLevelDBBlockConsumer returns a new helper to store blocks in a LevelDB.
func NewLevelDBBlockConsumer(
path string,
joiner common.KeyJoiner,
encoder common.KeyEncoder) (ldbc *LevelDBBlockConsumer, err error) {
opts := leveldb.NewOptions()
opts.SetCreateIfMissing(true)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
return
}
ldbc = &LevelDBBlockConsumer{
db: db,
opts: opts,
wo: leveldb.NewWriteOptions(),
joiner: joiner,
encoder: encoder}
return
}
// Close closes a helper to store blocks in a LevelDB.
func (ldbc *LevelDBBlockConsumer) Close() error {
ldbc.wo.Close()
ldbc.db.Close()
ldbc.opts.Close()
return nil
}
// Consume stores a block in LevelDB.
func (ldbc *LevelDBBlockConsumer) Consume(block *common.Block) (err error) {
var encodedKey []byte
if encodedKey, err = ldbc.encoder(ldbc.joiner(block.Coord)); err != nil {
return
}
err = ldbc.db.Put(ldbc.wo, encodedKey, block.Data)
return
}

175
cmd/mtdbconverter/main.go Normal file
View File

@ -0,0 +1,175 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"os"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func usage() {
fmt.Fprintf(os.Stderr,
"Usage: %s [<options>] <source database> <dest database>\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
func selectKeySplitter(interleaved bool) common.KeySplitter {
if interleaved {
return common.InterleavedToCoord
}
return common.PlainToCoord
}
func selectKeyJoiner(interleaved bool) common.KeyJoiner {
if interleaved {
return common.CoordToInterleaved
}
return common.CoordToPlain
}
func selectKeyDecoder(interleaved bool) common.KeyDecoder {
if interleaved {
return common.DecodeFromBigEndian
}
return common.DecodeStringFromBytes
}
func selectKeyEncoder(interleaved bool) common.KeyEncoder {
if interleaved {
return common.EncodeToBigEndian
}
return common.EncodeStringToBytes
}
func copyProducerToConsumer(producer common.BlockProducer, consumer common.BlockConsumer) error {
blocks := make(chan *common.Block)
done := make(chan struct{})
defer close(done)
pool := sync.Pool{New: func() interface{} { return new(common.Block) }}
go func() {
defer close(blocks)
for {
block := pool.Get().(*common.Block)
if err := producer.Next(block); err != nil {
if err != common.ErrNoMoreBlocks {
log.Printf("Reading failed: %s\n", err)
}
return
}
select {
case blocks <- block:
case <-done:
return
}
}
}()
i := 0
for block := range blocks {
if err := consumer.Consume(block); err != nil {
return err
}
block.Data = nil
pool.Put(block)
i++
if i%1000 == 0 {
log.Printf("%d blocks transferred.\n", i)
}
}
log.Printf("%d blocks transferred in total.\n", i)
return nil
}
func main() {
var (
srcBackend string
dstBackend string
srcInterleaved bool
dstInterleaved bool
version bool
)
flag.Usage = usage
flag.StringVar(&srcBackend, "source-backend", "sqlite",
"type of source database (leveldb, sqlite)")
flag.StringVar(&srcBackend, "sb", "sqlite",
"type of source database (leveldb, sqlite). Shorthand")
flag.StringVar(&dstBackend, "dest-backend", "leveldb",
"type of destination database (leveldb, sqlite)")
flag.StringVar(&dstBackend, "db", "leveldb",
"type of destination database (leveldb, sqlite). Shorthand")
flag.BoolVar(&srcInterleaved, "source-interleaved", false,
"Is source database interleaved?")
flag.BoolVar(&srcInterleaved, "si", false,
"Is source database interleaved? Shorthand")
flag.BoolVar(&dstInterleaved, "dest-interleaved", true,
"Should dest database be interleaved?")
flag.BoolVar(&dstInterleaved, "di", true,
"Should source database be interleaved? Shorthand")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
if flag.NArg() < 2 {
log.Fatal("Missing source and/or destination database.")
}
var (
producer common.BlockProducer
consumer common.BlockConsumer
err error
)
if srcBackend == "sqlite" {
if producer, err = NewSQLiteBlockProducer(
flag.Arg(0),
selectKeySplitter(srcInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(0), err)
}
} else { // LevelDB
if producer, err = NewLevelDBBlockProducer(
flag.Arg(0),
selectKeySplitter(srcInterleaved),
selectKeyDecoder(srcInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(0), err)
}
}
defer producer.Close()
if dstBackend == "sqlite" {
if consumer, err = NewSQLiteBlockConsumer(
flag.Arg(1),
selectKeyJoiner(dstInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(1), err)
}
} else { // LevelDB
if consumer, err = NewLevelDBBlockConsumer(
flag.Arg(1),
selectKeyJoiner(dstInterleaved),
selectKeyEncoder(dstInterleaved)); err != nil {
log.Fatalf("Cannot open '%s': %s", flag.Arg(1), err)
}
}
defer consumer.Close()
if err = copyProducerToConsumer(producer, consumer); err != nil {
log.Fatalf("Database transfer failed: %s\n", err)
}
}

185
cmd/mtdbconverter/sqlite.go Normal file
View File

@ -0,0 +1,185 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"database/sql"
"errors"
"os"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
_ "github.com/mattn/go-sqlite3"
)
const (
createSQL = "CREATE TABLE blocks (pos INT NOT NULL PRIMARY KEY, data BLOB)"
insertSQL = "INSERT INTO blocks (pos, data) VALUES (?, ?)"
deleteSQL = "DELETE FROM blocks"
selectSQL = "SELECT pos, data FROM blocks"
)
// ErrDatabaseNotExists indicates that the database does not exist.
var ErrDatabaseNotExists = errors.New("Database does not exists.")
const blocksPerTx = 128 // Number of blocks copied in a transaction.
type (
// SQLiteBlockProducer helps getting blocks from a SQLite database.
SQLiteBlockProducer struct {
db *sql.DB
rows *sql.Rows
splitter common.KeySplitter
}
// SQLiteBlockConsumer helps storing blocks into a SQLite database.
SQLiteBlockConsumer struct {
db *sql.DB
insertStmt *sql.Stmt
tx *sql.Tx
txCounter int
joiner common.KeyJoiner
}
)
func fileExists(path string) bool {
_, err := os.Stat(path)
return !os.IsNotExist(err)
}
// NewSQLiteBlockConsumer returns a storage helper for SQLite databases.
func NewSQLiteBlockConsumer(
path string,
joiner common.KeyJoiner) (sbc *SQLiteBlockConsumer, err error) {
createNew := !fileExists(path)
var db *sql.DB
if db, err = sql.Open("sqlite3", path); err != nil {
return
}
if createNew {
if _, err = db.Exec(createSQL); err != nil {
db.Close()
return
}
} else {
if _, err = db.Exec(deleteSQL); err != nil {
db.Close()
return
}
}
var insertStmt *sql.Stmt
if insertStmt, err = db.Prepare(insertSQL); err != nil {
db.Close()
return
}
var tx *sql.Tx
if tx, err = db.Begin(); err != nil {
insertStmt.Close()
db.Close()
return
}
sbc = &SQLiteBlockConsumer{
db: db,
insertStmt: insertStmt,
tx: tx,
joiner: joiner}
return
}
// Close closes a SQLite storage helper.
func (sbc *SQLiteBlockConsumer) Close() error {
sbc.tx.Commit()
sbc.insertStmt.Close()
return sbc.db.Close()
}
func (sbc *SQLiteBlockConsumer) getTx() (tx *sql.Tx, err error) {
if sbc.txCounter >= blocksPerTx {
sbc.txCounter = 0
if err = sbc.tx.Commit(); err != nil {
return
}
if sbc.tx, err = sbc.db.Begin(); err != nil {
return
}
}
sbc.txCounter++
tx = sbc.tx
return
}
// Consume stores a block in an SQLite database.
func (sbc *SQLiteBlockConsumer) Consume(block *common.Block) (err error) {
var tx *sql.Tx
if tx, err = sbc.getTx(); err != nil {
return
}
_, err = tx.Stmt(sbc.insertStmt).Exec(sbc.joiner(block.Coord), block.Data)
return
}
// NewSQLiteBlockProducer returns a new producer to fetch blocks from a
// SQLite database.
func NewSQLiteBlockProducer(
path string,
splitter common.KeySplitter) (sbp *SQLiteBlockProducer, err error) {
if !fileExists(path) {
err = ErrDatabaseNotExists
return
}
var db *sql.DB
if db, err = sql.Open("sqlite3", path); err != nil {
return
}
var rows *sql.Rows
if rows, err = db.Query(selectSQL); err != nil {
db.Close()
return
}
sbp = &SQLiteBlockProducer{
db: db,
rows: rows,
splitter: splitter}
return
}
// Next fetches the next block from a SQLite database.
func (sbp *SQLiteBlockProducer) Next(block *common.Block) (err error) {
if sbp.rows == nil {
err = common.ErrNoMoreBlocks
return
}
if sbp.rows.Next() {
var key int64
if err = sbp.rows.Scan(&key, &block.Data); err == nil {
block.Coord = sbp.splitter(key)
}
} else {
sbp.rows.Close()
sbp.rows = nil
err = common.ErrNoMoreBlocks
}
return
}
// Close closes a block producer from a SQLite database.
func (sbp *SQLiteBlockProducer) Close() error {
if sbp.rows != nil {
sbp.rows.Close()
}
return sbp.db.Close()
}

View File

@ -0,0 +1,45 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
type (
// Block is the essential transfer unit from to the database.
// Key is the serialized spatial position.
// Data is the serialized from of the corresponding block data.
Block struct {
Key []byte
Data []byte
}
// Session is a database session.
Session interface {
// Del deletes a block by a given key.
Del(hash, key []byte) (bool, error)
// Fetch fetches the block data for a given position.
Fetch(hash, key []byte) ([]byte, error)
// InTransaction returns true if a transaction is running.
InTransaction() bool
// Store stores a block with a given position and data.
Store(hash, key, value []byte) (bool, error)
// AllKeys returns all keys in the database.
AllKeys(hash []byte, done <-chan struct{}) (<-chan []byte, int, error)
// SpatialQuery performs a box query between the positions first and second.
SpatialQuery(hash, first, second []byte, done <-chan struct{}) (<-chan Block, error)
// BeginTransaction starts a transcation.
BeginTransaction() error
// CommitTransaction finishes a transaction.
CommitTransaction() error
// Close closes the database session.
Close() error
}
// Backend is the interface representing a database.
Backend interface {
// NewSession opens a new session.
NewSession() (Session, error)
// Shutdown shuts down the database server.
Shutdown() error
}
)

View File

@ -0,0 +1,77 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"encoding/json"
"log"
"net/http"
"sync"
"bytes"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
// Pull up if it _really_ produces too much data.
const quantizationFactor = 1
type quantizedXZ struct {
X, Z int16
}
type changeTracker struct {
changes map[quantizedXZ]struct{}
mutex sync.Mutex
}
func newChangeTracker() *changeTracker {
return &changeTracker{changes: make(map[quantizedXZ]struct{})}
}
func (ct *changeTracker) BlockChanged(coord common.Coord) {
ct.mutex.Lock()
ct.changes[quantizedXZ{
X: coord.X / quantizationFactor,
Z: coord.Z / quantizationFactor}] = struct{}{}
ct.mutex.Unlock()
}
func (ct *changeTracker) FlushChanges(url string) {
var oldChanges map[quantizedXZ]struct{}
ct.mutex.Lock()
if len(ct.changes) > 0 {
oldChanges = ct.changes
ct.changes = make(map[quantizedXZ]struct{})
}
ct.mutex.Unlock()
if oldChanges == nil {
return
}
go func() {
changes := make([]quantizedXZ, len(oldChanges))
i := 0
for change := range oldChanges {
changes[i] = change
i++
}
var err error
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err = encoder.Encode(changes); err != nil {
log.Printf("WARN: encode changes to JSON failed: %s\n", err)
return
}
var resp *http.Response
resp, err = http.Post(
url, "application/json", bytes.NewBuffer(buf.Bytes()))
if resp != nil {
defer resp.Body.Close()
}
if err != nil {
log.Printf("WARN: posting changes to %s failed: %s\n", url, err)
}
}()
}

View File

@ -0,0 +1,241 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"log"
"net"
"strconv"
)
var (
redisOk = []byte("+OK\r\n")
redisPong = []byte("+PONG\r\n")
redisError = []byte("-ERR\r\n")
redisNoSuchBlock = []byte("$-1\r\n")
redisCrnl = []byte("\r\n")
redisEmptyArray = []byte("*0\r\n")
redisQueued = []byte("+QUEUED\r\n")
redisTrue = []byte(":1\r\n")
redisFalse = []byte(":0\r\n")
)
type Connection struct {
conn net.Conn
session Session
maxBulkStringSize int64
boolArray []bool
}
func NewConnection(conn net.Conn, session Session, maxBulkStringSize int64) *Connection {
return &Connection{
conn: conn,
session: session,
maxBulkStringSize: maxBulkStringSize,
boolArray: []bool{}}
}
func (c *Connection) Run() {
defer func() {
c.session.Close()
c.conn.Close()
}()
r := bufio.NewReaderSize(c.conn, 8*1024)
parser := NewRedisParser(r, c, c.maxBulkStringSize)
parser.Parse()
log.Println("client disconnected")
}
func logError(err error) bool {
if err != nil {
log.Printf("ERROR: %s\n", err)
return false
}
return true
}
func (c *Connection) Hdel(hash, key []byte) bool {
success, err := c.session.Del(hash, key)
if err != nil {
return c.writeError(err)
}
return c.writeBool(success)
}
func (c *Connection) Hget(hash, key []byte) bool {
var err error
var data []byte
if data, err = c.session.Fetch(hash, key); err != nil {
return c.writeError(err)
}
return c.writeBlock(data)
}
func (c *Connection) Hset(hash, key, data []byte) bool {
var err error
var exists bool
if exists, err = c.session.Store(hash, key, data); err != nil {
return c.writeError(err)
}
if c.session.InTransaction() {
c.boolArray = append(c.boolArray, exists)
return c.writeQueued()
}
return c.writeBool(exists)
}
func (c *Connection) Multi() bool {
if c.session.InTransaction() {
log.Println("WARN: Already running transaction.")
} else {
if err := c.session.BeginTransaction(); err != nil {
return c.writeError(err)
}
}
return c.writeOk()
}
func (c *Connection) Exec() bool {
if !c.session.InTransaction() {
return c.writeEmptyArray()
}
arr := c.boolArray
c.boolArray = []bool{}
if err := c.session.CommitTransaction(); err != nil {
return c.writeError(err)
}
return c.writeBoolArray(arr)
}
func (c *Connection) Hkeys(hash []byte) bool {
var (
err error
n int
keys <-chan []byte
done = make(chan struct{})
)
defer close(done)
if keys, n, err = c.session.AllKeys(hash, done); err != nil {
return c.writeError(err)
}
if n == 0 {
return c.writeEmptyArray()
}
if _, err := c.conn.Write(redisLength('*', n)); err != nil {
return logError(err)
}
for key := range keys {
if err = c.writeBulkString(key); err != nil {
return logError(err)
}
}
return true
}
func (c *Connection) Ping() bool {
return c.writeMessage(redisPong)
}
func (c *Connection) HSpatial(hash, first, second []byte) bool {
var (
err error
blocks <-chan Block
done = make(chan struct{})
)
defer close(done)
if blocks, err = c.session.SpatialQuery(hash, first, second, done); err != nil {
return c.writeError(err)
}
for block := range blocks {
if err = c.writeBulkString(block.Key); err != nil {
return logError(err)
}
if err = c.writeBulkString(block.Data); err != nil {
return logError(err)
}
}
return logError(c.writeBulkString(nil))
}
func (c *Connection) writeError(err error) bool {
logError(err)
return c.writeMessage(redisError)
}
func (c *Connection) writeEmptyArray() bool {
return c.writeMessage(redisEmptyArray)
}
func (c *Connection) writeBool(b bool) bool {
if b {
return c.writeMessage(redisTrue)
}
return c.writeMessage(redisFalse)
}
func redisLength(prefix byte, s int) []byte {
buf := append(make([]byte, 0, 16), prefix)
return append(strconv.AppendInt(buf, int64(s), 10), '\r', '\n')
}
func (c *Connection) writeBoolArray(arr []bool) bool {
if _, err := c.conn.Write(redisLength('*', len(arr))); err != nil {
return logError(err)
}
for _, b := range arr {
if !c.writeBool(b) {
return false
}
}
return true
}
func (c *Connection) writeMessage(msg []byte) bool {
_, err := c.conn.Write(msg)
return logError(err)
}
func (c *Connection) writeOk() bool {
return c.writeMessage(redisOk)
}
func (c *Connection) writeQueued() bool {
return c.writeMessage(redisQueued)
}
func (c *Connection) writeBlock(data []byte) bool {
return logError(c.writeBulkString(data))
}
func (c *Connection) writeBulkString(data []byte) (err error) {
con := c.conn
if data == nil {
_, err = con.Write(redisNoSuchBlock)
} else {
if _, err = con.Write(redisLength('$', len(data))); err != nil {
return
}
if _, err = con.Write(data); err != nil {
return
}
_, err = con.Write(redisCrnl)
}
return
}

429
cmd/mtredisalize/leveldb.go Normal file
View File

@ -0,0 +1,429 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"log"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
leveldb "github.com/jmhodges/levigo"
)
type LevelDBBackend struct {
cache *leveldb.Cache
db *leveldb.DB
interleaved bool
coverage *common.Coverage3D
encoder common.KeyTranscoder
decoder common.KeyTranscoder
changeTracker *changeTracker
}
type LevelDBSession struct {
backend *LevelDBBackend
tx *leveldb.WriteBatch
}
func NewLeveDBBackend(
path string,
changeTracker *changeTracker,
interleaved bool,
cacheSize int) (ldb *LevelDBBackend, err error) {
opts := leveldb.NewOptions()
var cache *leveldb.Cache
if cacheSize > 0 {
cache = leveldb.NewLRUCache(cacheSize * 1024 * 1024)
opts.SetCache(cache)
}
opts.SetCreateIfMissing(true)
var db *leveldb.DB
if db, err = leveldb.Open(path, opts); err != nil {
if cache != nil {
cache.Close()
}
return
}
var (
encoder common.KeyTranscoder
decoder common.KeyTranscoder
)
if interleaved {
encoder = common.TranscodeInterleavedToPlain
decoder = common.TranscodePlainToInterleaved
} else {
encoder = common.IdentityTranscoder
decoder = common.IdentityTranscoder
}
ldb = &LevelDBBackend{
cache: cache,
db: db,
interleaved: interleaved,
encoder: encoder,
decoder: decoder,
changeTracker: changeTracker,
}
if !interleaved {
if err = ldb.buildCoverage(); err != nil {
ldb.Shutdown()
ldb = nil
return
}
}
return
}
func (ldb *LevelDBBackend) buildCoverage() error {
log.Println("INFO: Start building coverage index (this may take some time)...")
coverage := common.NewCoverage3D()
ro := leveldb.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
it := ldb.db.NewIterator(ro)
it.SeekToFirst()
for ; it.Valid(); it.Next() {
c, err := common.DecodeStringBytesToCoord(it.Key())
if err != nil {
return err
}
coverage.Insert(c)
}
if err := it.GetError(); err != nil {
return err
}
ldb.coverage = coverage
log.Println("INFO: Finished building coverage index.")
return nil
}
func (ldb *LevelDBBackend) NewSession() (Session, error) {
return &LevelDBSession{ldb, nil}, nil
}
func (ldbs *LevelDBSession) Close() error {
if ldbs.tx != nil {
ldbs.tx.Close()
}
return nil
}
func (ldb *LevelDBBackend) Shutdown() error {
ldb.db.Close()
if ldb.cache != nil {
ldb.cache.Close()
}
return nil
}
func (ldbs *LevelDBSession) Del(hash, key []byte) (success bool, err error) {
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
ro := leveldb.NewReadOptions()
defer ro.Close()
var data []byte
data, err = ldbs.backend.db.Get(ro, key)
if err != nil {
return
}
if data == nil {
success = false
return
}
success = true
wo := leveldb.NewWriteOptions()
defer wo.Close()
err = ldbs.backend.db.Delete(wo, key)
return
}
func (ldbs *LevelDBSession) Fetch(hash, key []byte) (value []byte, err error) {
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
ro := leveldb.NewReadOptions()
value, err = ldbs.backend.db.Get(ro, key)
//if err != nil {
// log.Printf("Fetch key '%s' failed.\n", key)
//} else {
// log.Printf("Fetch key = '%s' len(value) = %d\n", key, len(value))
//}
ro.Close()
return
}
func (ldbs *LevelDBSession) InTransaction() bool {
return ldbs.tx != nil
}
func keyExists(db *leveldb.DB, key []byte) (exists bool, err error) {
ro := leveldb.NewReadOptions()
defer ro.Close()
var data []byte
if data, err = db.Get(ro, key); err != nil {
return
}
exists = data != nil
return
}
func (ldbs *LevelDBSession) Store(hash, key, value []byte) (exists bool, err error) {
origKey := key
if key, err = ldbs.backend.decoder(key); err != nil {
return
}
if exists, err = keyExists(ldbs.backend.db, key); err != nil {
return
}
if ldbs.tx != nil {
ldbs.tx.Put(key, value)
} else {
wo := leveldb.NewWriteOptions()
err = ldbs.backend.db.Put(wo, key, value)
wo.Close()
if err != nil {
return
}
}
// This technically too early because this is done in a transactions
// which are commited (and possible fail) later.
if ldbs.backend.changeTracker != nil || ldbs.backend.coverage != nil {
c, err := common.DecodeStringBytesToCoord(origKey)
if err != nil {
return exists, err
}
if ldbs.backend.coverage != nil && !exists {
ldbs.backend.coverage.Insert(c)
}
if ldbs.backend.changeTracker != nil {
ldbs.backend.changeTracker.BlockChanged(c)
}
}
return
}
func (ldbs *LevelDBSession) BeginTransaction() error {
ldbs.tx = leveldb.NewWriteBatch()
return nil
}
func (ldbs *LevelDBSession) CommitTransaction() (err error) {
tx := ldbs.tx
if tx == nil {
log.Println("WARN: No transaction running.")
return
}
ldbs.tx = nil
wo := leveldb.NewWriteOptions()
wo.SetSync(true)
err = ldbs.backend.db.Write(wo, tx)
wo.Close()
tx.Close()
return
}
func (ldbs *LevelDBSession) AllKeys(
hash []byte,
done <-chan struct{}) (<-chan []byte, int, error) {
ro := leveldb.NewReadOptions()
ro.SetFillCache(false)
it := ldbs.backend.db.NewIterator(ro)
it.SeekToFirst()
var n int
for ; it.Valid(); it.Next() {
n++
}
if err := it.GetError(); err != nil {
it.Close()
ro.Close()
return nil, n, err
}
keys := make(chan []byte)
go func() {
defer ro.Close()
defer close(keys)
defer it.Close()
it.SeekToFirst()
encoder := ldbs.backend.encoder
for ; it.Valid(); it.Next() {
if key, err := encoder(it.Key()); err == nil {
select {
case keys <- key:
case <-done:
return
}
} else {
log.Printf("WARN: %s\n", err)
return
}
}
if err := it.GetError(); err != nil {
log.Printf("WARN: %s\n", err)
}
}()
return keys, n, nil
}
func (ldbs *LevelDBSession) SpatialQuery(
hash, first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
if ldbs.backend.interleaved {
return ldbs.interleavedSpatialQuery(first, second, done)
}
return ldbs.plainSpatialQuery(first, second, done)
}
func (ldbs *LevelDBSession) plainSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.PlainToCoord(firstKey)
c2 := common.PlainToCoord(secondKey)
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
go func() {
defer close(blocks)
ro := leveldb.NewReadOptions()
defer ro.Close()
var a, b common.Coord
for _, r := range ldbs.backend.coverage.Query(c1, c2) {
a.Z, b.Z = int16(r.Z), int16(r.Z)
a.X, b.X = int16(r.X1), int16(r.X2)
for a.Y = r.Y2; a.Y >= r.Y1; a.Y-- {
b.Y = a.Y
// The keys in the database are stored and ordered as strings
// "1", "10", ..., "19", "2", "20", "21" so you cannot use
// an iterator and assume it is numerical ordered.
// Each block is fetched with a Get instead.
for f, t := common.CoordToPlain(a), common.CoordToPlain(b); f <= t; f++ {
key := common.StringToBytes(f)
value, err := ldbs.backend.db.Get(ro, key)
if err != nil {
log.Printf("get failed: %s\n", err)
return
}
if value != nil {
select {
case blocks <- Block{Key: key, Data: value}:
case <-done:
return
}
}
}
}
}
}()
return blocks, nil
}
func (ldbs *LevelDBSession) interleavedSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.ClipCoord(common.PlainToCoord(firstKey))
c2 := common.ClipCoord(common.PlainToCoord(secondKey))
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
go func() {
defer close(blocks)
ro := leveldb.NewReadOptions()
defer ro.Close()
ro.SetFillCache(false)
it := ldbs.backend.db.NewIterator(ro)
defer it.Close()
zmin, zmax := common.CoordToInterleaved(c1), common.CoordToInterleaved(c2)
// Should not be necessary.
zmin, zmax = common.Order64(zmin, zmax)
var (
cub = common.Cuboid{P1: c1, P2: c2}
err error
encodedKey []byte
)
//log.Printf("seeking to: %d\n", zmin)
it.Seek(common.ToBigEndian(zmin))
for it.Valid() {
zcode := common.FromBigEndian(it.Key())
if zcode > zmax {
break
}
if c := common.InterleavedToCoord(zcode); cub.Contains(c) {
if encodedKey, err = common.EncodeStringToBytes(common.CoordToPlain(c)); err != nil {
log.Printf("error encoding key: %s\n", err)
return
}
select {
case blocks <- Block{Key: encodedKey, Data: it.Value()}:
case <-done:
return
}
it.Next()
} else {
next := common.BigMin(zmin, zmax, zcode)
//log.Printf("seeking to: %d\n", next)
it.Seek(common.ToBigEndian(next))
//log.Printf("seeking done: %d\n", next)
}
}
//log.Println("iterating done")
if err = it.GetError(); err != nil {
log.Printf("error while iterating: %s\n", err)
return
}
}()
return blocks, nil
}

177
cmd/mtredisalize/main.go Normal file
View File

@ -0,0 +1,177 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"net"
"os"
"os/signal"
"runtime"
"strings"
"time"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
const (
defaultMaxBulkStringSize = 32 * 1024 * 1024
defaultGCDuration = "24h"
defaultChangeDuration = "30s"
)
func usage() {
fmt.Fprintf(os.Stderr,
"Usage: %s [<options>] <database>\n", os.Args[0])
fmt.Fprintln(os.Stderr, "Options:")
flag.PrintDefaults()
}
func main() {
var (
port int
host string
driver string
cacheSize int
version bool
interleaved bool
changeURL string
gcDuration string
changeDuration string
maxBulkStringSize int64
)
flag.Usage = usage
flag.IntVar(&port, "port", 6379, "port to bind")
flag.StringVar(&driver, "driver", "leveldb", "type of database (leveldb, sqlite)")
flag.StringVar(&host, "host", "", "host to bind")
flag.IntVar(&cacheSize, "cache", 32, "cache size in MB")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.BoolVar(&interleaved,
"interleaved", false, "Backend stores key in interleaved form.")
flag.StringVar(&gcDuration,
"gc-duration", defaultGCDuration, "Duration between forced GCs.")
flag.StringVar(&changeDuration,
"change-duration", defaultChangeDuration, "Duration to aggregate changes.")
flag.StringVar(&changeURL, "change-url", "", "URL to send changes to.")
flag.Int64Var(&maxBulkStringSize, "max-bulk-string-size", defaultMaxBulkStringSize,
"max size of a bulk string to be accepted as input (in bytes).")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
if flag.NArg() < 1 {
log.Fatal("Missing path to world")
}
var (
err error
backend Backend
gcDur time.Duration
chDur time.Duration
changeTracker *changeTracker
)
if gcDur, err = time.ParseDuration(gcDuration); err != nil {
log.Fatal(err)
}
// Setup the change listening stuff.
var changeChan <-chan time.Time
useChangeNotification := changeURL != ""
if useChangeNotification {
if chDur, err = time.ParseDuration(changeDuration); err != nil {
log.Fatal(err)
}
changeChan = time.Tick(chDur)
changeTracker = newChangeTracker()
} else {
// We will never receive ticks on this.
changeChan = make(<-chan time.Time)
}
path := flag.Arg(0)
if driver == "sqlite" {
if backend, err = NewSQLiteBackend(path, changeTracker, interleaved); err != nil {
log.Fatal(err)
}
} else {
if backend, err = NewLeveDBBackend(
path, changeTracker, interleaved, cacheSize); err != nil {
log.Fatal(err)
}
}
defer backend.Shutdown()
var listener net.Listener
var proto, address string
if strings.ContainsRune(host, '/') {
proto, address = "unix", host
} else {
proto, address = "tcp", fmt.Sprintf("%s:%d", host, port)
}
listener, err = net.Listen(proto, address)
if err != nil {
log.Fatal(err)
}
defer listener.Close()
log.Printf("Server started at %s\n", listener.Addr())
connChan := make(chan net.Conn)
defer close(connChan)
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt, os.Kill)
go func() {
for {
conn, err := listener.Accept()
if err != nil {
log.Fatal(err)
}
log.Printf("Client accepted from: %s\n", conn.RemoteAddr())
connChan <- conn
}
}()
log.Printf("Doing garbage collection every: %s\n", gcDur)
gcChan := time.Tick(gcDur)
for {
select {
case conn := <-connChan:
var session Session
if session, err = backend.NewSession(); err != nil {
log.Printf("Cannot create session: %s\n", err)
conn.Close()
} else {
go NewConnection(conn, session, maxBulkStringSize).Run()
}
case <-sigChan:
log.Println("Shutting down")
return
case <-gcChan:
log.Println("Starting garbage collection.")
runtime.GC()
log.Println("Garbage collection done.")
case <-changeChan:
if changeTracker != nil {
changeTracker.FlushChanges(changeURL)
}
}
}
}

View File

@ -0,0 +1,273 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"log"
"strconv"
"strings"
)
type RedisCommands interface {
Hdel(hash, key []byte) bool
Hget(hash, key []byte) bool
Hset(hash, key, block []byte) bool
Multi() bool
Exec() bool
Hkeys(hash []byte) bool
HSpatial(hash, first, second []byte) bool
Ping() bool
}
type RedisParser struct {
reader *bufio.Reader
commands RedisCommands
missing int64
args []interface{}
maxBulkStringSize int64
}
func NewRedisParser(reader *bufio.Reader,
commands RedisCommands,
maxBulkStringSize int64) *RedisParser {
return &RedisParser{
reader: reader,
commands: commands,
maxBulkStringSize: maxBulkStringSize}
}
func (rp *RedisParser) Parse() {
for line := rp.nextLine(); line != nil && rp.dispatch(line); line = rp.nextLine() {
}
}
func (rp *RedisParser) nextLine() []byte {
line, err := rp.reader.ReadBytes('\n')
if err != nil {
if err != io.EOF {
rp.consumeError(err)
}
return nil
}
return bytes.TrimRight(line, "\r\n")
}
func (rp *RedisParser) dispatch(line []byte) bool {
if len(line) < 1 {
return false
}
switch line[0] {
case '-':
return true // ignore errors
case ':':
return rp.integer(line)
case '+':
return rp.simpleString(line)
case '$':
return rp.bulkString(line)
case '*':
return rp.array(line)
}
return true
}
func (rp *RedisParser) simpleString(line []byte) bool {
return rp.consumeSimpleString(string(line[1:]))
}
func (rp *RedisParser) integer(line []byte) bool {
i, err := strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
return rp.consumeInteger(i)
}
func (rp *RedisParser) bulkString(line []byte) bool {
var i int64
var err error
i, err = strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
switch {
case i < 0:
return rp.consumeBulkString(nil)
case i == 0:
return rp.consumeBulkString([]byte{})
default:
if i > rp.maxBulkStringSize { // prevent denial of service.
return rp.consumeError(
fmt.Errorf("Bulk string too large (%d bytes).\n", i))
}
data := make([]byte, i)
for rest := i; rest > 0; {
var n int
if n, err = rp.reader.Read(data[i-rest : i]); err != nil {
return rp.consumeError(err)
}
rest -= int64(n)
}
if _, err = rp.reader.ReadBytes('\n'); err != nil {
return rp.consumeError(err)
}
return rp.consumeBulkString(data)
}
}
func (rp *RedisParser) array(line []byte) bool {
var i int64
var err error
i, err = strconv.ParseInt(string(line[1:]), 10, 64)
if err != nil {
return rp.consumeError(err)
}
return rp.consumeArray(i)
}
func (rp *RedisParser) push(i interface{}) bool {
rp.args = append(rp.args, i)
rp.missing--
if rp.missing <= 0 {
rp.missing = 0
res := rp.execute()
rp.args = []interface{}{}
return res
}
return true
}
func asString(i interface{}) string {
switch v := i.(type) {
case string:
return v
case []byte:
return string(v)
}
return fmt.Sprintf("%s", i)
}
func (rp *RedisParser) execute() bool {
l := len(rp.args)
if l < 1 {
log.Println("WARN: Too less argument for command.")
return false
}
cmd := strings.ToUpper(asString(rp.args[0]))
switch cmd {
case "HDEL":
if l < 3 {
log.Println("WARN: Missing argments for HGET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
if !ok1 || !ok2 {
log.Println("WARN: HDEL data are not byte slices.")
return false
}
return rp.commands.Hdel(hash, key)
case "HGET":
if l < 3 {
log.Println("WARN: Missing argments for HGET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
if !ok1 || !ok2 {
log.Println("WARN: HGET data are not byte slices.")
return false
}
return rp.commands.Hget(hash, key)
case "HSET":
if l < 4 {
log.Println("WARN: Missing argments for HSET.")
return false
}
hash, ok1 := rp.args[1].([]byte)
key, ok2 := rp.args[2].([]byte)
value, ok3 := rp.args[3].([]byte)
if !ok1 || !ok2 || !ok3 {
log.Println("WARN: HSET data are not byte slices.")
return false
}
return rp.commands.Hset(hash, key, value)
case "MULTI":
return rp.commands.Multi()
case "EXEC":
return rp.commands.Exec()
case "HKEYS":
if l < 2 {
log.Println("WARN: Missing argments for HKEYS.")
return false
}
hash, ok := rp.args[1].([]byte)
if !ok {
log.Println("WARN: HKEYS data are not byte slices.")
return false
}
return rp.commands.Hkeys(hash)
case "HSPATIAL":
if l < 4 {
log.Println("WARN: Missing argments for HSPATIAL.")
return false
}
hash, ok1 := rp.args[1].([]byte)
first, ok2 := rp.args[2].([]byte)
second, ok3 := rp.args[3].([]byte)
if !ok1 || !ok2 || !ok3 {
log.Println("WARN: HSPATIAL data are not byte slices.")
return false
}
return rp.commands.HSpatial(hash, first, second)
case "PING":
return rp.commands.Ping()
}
log.Printf("WARN: unknown command: '%s'\n", cmd)
return false
}
func (rp *RedisParser) consumeSimpleString(s string) bool {
return rp.push(s)
}
func (rp *RedisParser) consumeBulkString(data []byte) bool {
return rp.push(data)
}
func (rp *RedisParser) consumeInteger(i int64) bool {
return rp.push(i)
}
func (rp *RedisParser) consumeError(err error) bool {
log.Printf("error: %s\n", err)
return true
}
func (rp *RedisParser) consumeArray(i int64) bool {
if rp.missing > 0 {
log.Println("WARN: Nested arrays are not supported!")
return false
}
if i < 0 {
log.Println("Null arrays are not supported")
return false
}
rp.missing = i
return true
}

543
cmd/mtredisalize/sqlite.go Normal file
View File

@ -0,0 +1,543 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"database/sql"
"log"
"sync"
_ "github.com/mattn/go-sqlite3"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
var globalLock sync.RWMutex
const (
deleteSQL = "DELETE FROM blocks WHERE pos = ?"
fetchSQL = "SELECT data FROM blocks WHERE pos = ?"
existsSQL = "SELECT 1 FROM blocks WHERE pos = ?"
updateSQL = "UPDATE blocks SET data = ? WHERE pos = ?"
insertSQL = "INSERT INTO blocks (pos, data) VALUES (?, ?)"
countSQL = "SELECT count(*) FROM blocks"
keysSQL = "SELECT pos FROM blocks"
rangeSQL = "SELECT pos, data FROM blocks WHERE pos BETWEEN ? AND ? ORDER BY pos"
)
type SQLiteBackend struct {
db *sql.DB
encoder common.KeyEncoder
decoder common.KeyDecoder
changeTracker *changeTracker
interleaved bool
coverage *common.Coverage3D
existsStmt *sql.Stmt
deleteStmt *sql.Stmt
fetchStmt *sql.Stmt
insertStmt *sql.Stmt
updateStmt *sql.Stmt
countStmt *sql.Stmt
keysStmt *sql.Stmt
rangeStmt *sql.Stmt
}
type SQLiteSession struct {
backend *SQLiteBackend
tx *sql.Tx
}
func (sqlb *SQLiteBackend) NewSession() (Session, error) {
return &SQLiteSession{sqlb, nil}, nil
}
func (ss *SQLiteSession) Close() error {
t := ss.tx
if t != nil {
ss.tx = nil
return t.Rollback()
}
return nil
}
func NewSQLiteBackend(
path string,
changeTracker *changeTracker, interleaved bool) (sqlb *SQLiteBackend, err error) {
res := SQLiteBackend{interleaved: interleaved, changeTracker: changeTracker}
if res.db, err = sql.Open("sqlite3", path); err != nil {
return
}
if res.existsStmt, err = res.db.Prepare(existsSQL); err != nil {
res.closeAll()
return
}
if res.fetchStmt, err = res.db.Prepare(fetchSQL); err != nil {
res.closeAll()
return
}
if res.deleteStmt, err = res.db.Prepare(deleteSQL); err != nil {
res.closeAll()
return
}
if res.insertStmt, err = res.db.Prepare(insertSQL); err != nil {
res.closeAll()
return
}
if res.updateStmt, err = res.db.Prepare(updateSQL); err != nil {
res.closeAll()
return
}
if res.countStmt, err = res.db.Prepare(countSQL); err != nil {
res.closeAll()
return
}
if res.keysStmt, err = res.db.Prepare(keysSQL); err != nil {
res.closeAll()
return
}
if res.rangeStmt, err = res.db.Prepare(rangeSQL); err != nil {
res.closeAll()
return
}
if interleaved {
res.encoder = common.EncodeStringToBytesFromInterleaved
res.decoder = common.DecodeStringFromBytesToInterleaved
} else {
res.encoder = common.EncodeStringToBytes
res.decoder = common.DecodeStringFromBytes
}
if !interleaved {
if err = res.buildCoverage(); err != nil {
return
}
}
sqlb = &res
return
}
func (sqlb *SQLiteBackend) buildCoverage() (err error) {
log.Println("INFO: Start building coverage index (this may take some time)...")
sqlb.coverage = common.NewCoverage3D()
var rows *sql.Rows
if rows, err = sqlb.keysStmt.Query(); err != nil {
return
}
defer rows.Close()
for rows.Next() {
var key int64
if err = rows.Scan(&key); err != nil {
return
}
sqlb.coverage.Insert(common.PlainToCoord(key))
}
err = rows.Err()
log.Println("INFO: Finished building coverage index.")
return
}
func closeStmt(stmt **sql.Stmt) error {
s := *stmt
if s != nil {
*stmt = nil
return s.Close()
}
return nil
}
func closeDB(db **sql.DB) error {
d := *db
if d != nil {
*db = nil
return d.Close()
}
return nil
}
func (sqlb *SQLiteBackend) closeAll() error {
closeStmt(&sqlb.deleteStmt)
closeStmt(&sqlb.fetchStmt)
closeStmt(&sqlb.insertStmt)
closeStmt(&sqlb.updateStmt)
closeStmt(&sqlb.existsStmt)
closeStmt(&sqlb.countStmt)
closeStmt(&sqlb.keysStmt)
closeStmt(&sqlb.rangeStmt)
return closeDB(&sqlb.db)
}
func (sqlb *SQLiteBackend) Shutdown() error {
globalLock.Lock()
defer globalLock.Unlock()
return sqlb.closeAll()
}
func (ss *SQLiteSession) txStmt(stmt *sql.Stmt) *sql.Stmt {
if ss.tx != nil {
return ss.tx.Stmt(stmt)
}
return stmt
}
func (ss *SQLiteSession) Del(hash, key []byte) (success bool, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.Lock()
defer globalLock.Unlock()
existsStmt := ss.txStmt(ss.backend.existsStmt)
var x int
err2 := existsStmt.QueryRow(pos).Scan(&x)
if err2 == sql.ErrNoRows {
success = false
return
}
if err2 != nil {
err = err2
return
}
success = true
deleteStmt := ss.txStmt(ss.backend.deleteStmt)
_, err = deleteStmt.Exec(pos)
return
}
func (ss *SQLiteSession) Fetch(hash, key []byte) (data []byte, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.RLock()
defer globalLock.RUnlock()
fetchStmt := ss.txStmt(ss.backend.fetchStmt)
err2 := fetchStmt.QueryRow(pos).Scan(&data)
if err2 == sql.ErrNoRows {
return
}
err = err2
return
}
func (ss *SQLiteSession) InTransaction() bool {
return ss.tx != nil
}
func (ss *SQLiteSession) Store(hash, key, value []byte) (exists bool, err error) {
var pos int64
if pos, err = ss.backend.decoder(key); err != nil {
return
}
globalLock.Lock()
defer globalLock.Unlock()
existsStmt := ss.txStmt(ss.backend.existsStmt)
var x int
err2 := existsStmt.QueryRow(pos).Scan(&x)
if err2 == sql.ErrNoRows {
exists = false
} else if err2 != nil {
err = err2
return
} else {
exists = true
}
if exists {
updateStmt := ss.txStmt(ss.backend.updateStmt)
_, err = updateStmt.Exec(value, pos)
} else {
insertStmt := ss.txStmt(ss.backend.insertStmt)
_, err = insertStmt.Exec(pos, value)
}
if err != nil {
return
}
// This technically too early because this is done in a transactions
// which are commited (and possible fail) later.
if ss.backend.changeTracker != nil || ss.backend.coverage != nil {
c := common.PlainToCoord(pos)
if ss.backend.coverage != nil && !exists {
ss.backend.coverage.Insert(c)
}
if ss.backend.changeTracker != nil {
ss.backend.changeTracker.BlockChanged(c)
}
}
return
}
func (ss *SQLiteSession) BeginTransaction() (err error) {
if ss.tx != nil {
log.Println("WARN: Already running transaction.")
return nil
}
globalLock.Lock()
defer globalLock.Unlock()
ss.tx, err = ss.backend.db.Begin()
return
}
func (ss *SQLiteSession) CommitTransaction() error {
tx := ss.tx
if tx == nil {
log.Println("WARN: No transaction running.")
return nil
}
globalLock.Lock()
defer globalLock.Unlock()
ss.tx = nil
return tx.Commit()
}
func (ss *SQLiteSession) AllKeys(
hash []byte,
done <-chan struct{}) (<-chan []byte, int, error) {
globalLock.RLock()
countStmt := ss.txStmt(ss.backend.countStmt)
var n int
var err error
if err = countStmt.QueryRow().Scan(&n); err != nil {
if err == sql.ErrNoRows {
err = nil
}
globalLock.RUnlock()
return nil, n, err
}
keysStmt := ss.txStmt(ss.backend.keysStmt)
var rows *sql.Rows
if rows, err = keysStmt.Query(); err != nil {
globalLock.RUnlock()
return nil, n, err
}
keys := make(chan []byte)
go func() {
defer globalLock.RUnlock()
defer rows.Close()
defer close(keys)
var err error
for rows.Next() {
var key int64
if err = rows.Scan(&key); err != nil {
log.Printf("WARN: %s\n", err)
break
}
var encoded []byte
if encoded, err = ss.backend.encoder(key); err != nil {
log.Printf("Cannot encode key: %d %s\n", key, err)
break
}
select {
case keys <- encoded:
case <-done:
return
}
}
}()
return keys, n, nil
}
func (ss *SQLiteSession) SpatialQuery(
hash, first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
if ss.backend.interleaved {
return ss.interleavedSpatialQuery(first, second, done)
}
return ss.plainSpatialQuery(first, second, done)
}
func (ss *SQLiteSession) interleavedSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.ClipCoord(common.PlainToCoord(firstKey))
c2 := common.ClipCoord(common.PlainToCoord(secondKey))
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
globalLock.RLock()
go func() {
defer close(blocks)
defer globalLock.RUnlock()
zmin, zmax := common.CoordToInterleaved(c1), common.CoordToInterleaved(c2)
// Should not be necessary.
zmin, zmax = common.Order64(zmin, zmax)
cub := common.Cuboid{P1: c1, P2: c2}
rangeStmt := ss.txStmt(ss.backend.rangeStmt)
zcode := zmin
loop:
rows, err := rangeStmt.Query(zcode, zmax)
if err != nil {
log.Printf("error: fetching range failed: %s\n", err)
return
}
for rows.Next() {
var data []byte
if err = rows.Scan(&zcode, &data); err != nil {
rows.Close()
log.Printf("error: scanning row failed: %s\n", err)
return
}
c := common.InterleavedToCoord(zcode)
if cub.Contains(c) {
key := common.StringToBytes(common.CoordToPlain(c))
//fmt.Printf("sending: %q\n", c)
select {
case blocks <- Block{Key: key, Data: data}:
case <-done:
return
}
} else {
if err = rows.Close(); err != nil {
log.Printf("error: closing range failed: %s\n", err)
return
}
zcode = common.BigMin(zmin, zmax, zcode)
goto loop
}
}
if err = rows.Err(); err != nil {
log.Printf("error: iterating range failed: %s\n", err)
}
if err = rows.Close(); err != nil {
log.Printf("error: closing range failed: %s\n", err)
}
}()
return blocks, nil
}
func (ss *SQLiteSession) plainSpatialQuery(
first, second []byte,
done <-chan struct{}) (<-chan Block, error) {
var (
firstKey int64
secondKey int64
err error
)
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
return nil, err
}
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
return nil, err
}
c1 := common.PlainToCoord(firstKey)
c2 := common.PlainToCoord(secondKey)
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
blocks := make(chan Block)
globalLock.RLock()
go func() {
defer globalLock.RUnlock()
defer close(blocks)
rangeStmt := ss.txStmt(ss.backend.rangeStmt)
send := func(rows *sql.Rows, err error) bool {
if err != nil {
log.Printf("Error in range query: %s\n", err)
return false
}
defer rows.Close()
for rows.Next() {
var key int64
var data []byte
if err = rows.Scan(&key, &data); err != nil {
log.Printf("Error in scanning row: %s\n", err)
return false
}
var encodedKey []byte
if encodedKey, err = common.EncodeStringToBytes(key); err != nil {
log.Printf("Key encoding failed: %s\n", err)
return false
}
select {
case blocks <- Block{Key: encodedKey, Data: data}:
case <-done:
return false
}
}
if err = rows.Err(); err != nil {
log.Printf("Error in range query: %s\n", err)
return false
}
return true
}
var a, b common.Coord
for _, r := range ss.backend.coverage.Query(c1, c2) {
a.Z, b.Z = int16(r.Z), int16(r.Z)
a.X, b.X = int16(r.X1), int16(r.X2)
// log.Printf("y1 y2 x1 x2 z: %d %d, %d %d, %d\n", r.Y1, r.Y2, r.X1, r.X2, r.Z)
for a.Y = r.Y2; a.Y >= r.Y1; a.Y-- {
b.Y = a.Y
from, to := common.CoordToPlain(a), common.CoordToPlain(b)
if !send(rangeStmt.Query(from, to)) {
return
}
}
}
}()
return blocks, nil
}

122
cmd/mtseeder/baselevel.go Normal file
View File

@ -0,0 +1,122 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"image/color"
"log"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
const (
baseLevelDir = "8"
)
type blockPos struct {
x, z int16
j, i int
}
func createTiles(
btc *common.BaseTileCreator,
jobs chan blockPos,
done *sync.WaitGroup) {
wFns := make(chan func() (bool, error))
// Writing already rendered tiles to disk can be done in background.
go func() {
for wfn := range wFns {
if _, err := wfn(); err != nil {
log.Printf("WARN: writing file failed: %v.\n", err)
}
}
}()
defer func() {
close(wFns)
btc.Close()
done.Done()
}()
for job := range jobs {
if err := btc.RenderArea(job.x-1, job.z-1); err != nil {
log.Printf("WARN: rendering failed: %v.\n", err)
continue
}
wFns <- btc.WriteFunc(job.i, job.j, nil)
}
}
func createBaseLevel(
address string,
xMin, yMin, zMin, xMax, yMax, zMax int,
transparent bool, transparentDim float32,
colorsFile string, bg color.RGBA, outDir string,
numWorkers int) (err error) {
var colors *common.Colors
if colors, err = common.ParseColors(colorsFile); err != nil {
return
}
colors.TransparentDim = transparentDim
baseDir := filepath.Join(outDir, baseLevelDir)
if err = os.MkdirAll(baseDir, os.ModePerm); err != nil {
return
}
jobs := make(chan blockPos)
var done sync.WaitGroup
var proto string
if strings.ContainsRune(address, '/') {
proto = "unix"
} else {
proto = "tcp"
}
for i := 0; i < numWorkers; i++ {
var client *common.RedisClient
if client, err = common.NewRedisClient(proto, address); err != nil {
return
}
done.Add(1)
btc := common.NewBaseTileCreator(
client, colors, bg,
int16(yMin), int16(yMax),
transparent, baseDir)
go createTiles(btc, jobs, &done)
}
zMin, zMax = common.Order(zMin, zMax)
for x, i := int16(xMin), 0; x <= int16(xMax); x += 16 {
xDir := filepath.Join(baseDir, strconv.Itoa(i))
log.Printf("creating dir: %s\n", xDir)
if err = os.MkdirAll(xDir, os.ModePerm); err != nil {
log.Fatalf("Cannot create directory '%s': %s\n", xDir, err)
}
for z, j := int16(zMin), 0; z <= int16(zMax); z += 16 {
jobs <- blockPos{x: x, z: z, i: i, j: j}
j++
}
i++
}
close(jobs)
done.Wait()
return
}

97
cmd/mtseeder/main.go Normal file
View File

@ -0,0 +1,97 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func main() {
var (
port int
host string
xMin, yMin, zMin int
xMax, yMax, zMax int
colorsFile string
bgColor string
outDir string
numWorkers int
skipBaseLevel bool
skipPyramid bool
transparent bool
transparentDim float64
version bool
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&port, "port", 6379, "port to of mtredisalize server")
flag.IntVar(&port, "p", 6379, "port to of mtredisalize server (shorthand)")
flag.StringVar(&host, "host", "localhost", "host to mtredisalize server")
flag.IntVar(&xMin, "xmin", -1933, "x min of the area to tile")
flag.IntVar(&xMax, "xmax", 1932, "x max of the area to tile")
flag.IntVar(&yMin, "ymin", common.MinHeight, "Minimum y in blocks.")
flag.IntVar(&yMax, "ymax", common.MaxHeight, "Maximum y in blocks.")
flag.IntVar(&zMin, "zmin", -1933, "z min of the area to tile")
flag.IntVar(&zMax, "zmax", 1932, "z max of the area to tile")
flag.StringVar(&colorsFile, "colors", "colors.txt", "definition of colors")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.StringVar(&outDir, "output-dir", "map", "directory with the resulting image tree")
flag.StringVar(&outDir, "o", "map", "directory with the resulting image tree")
flag.IntVar(&numWorkers, "workers", 1, "number of workers")
flag.IntVar(&numWorkers, "w", 1, "number of workers (shorthand)")
flag.BoolVar(&skipBaseLevel, "skip-base-level", false, "Do not generate base level tiles")
flag.BoolVar(&skipBaseLevel, "sb", false, "Do not generate base level tiles (shorthand)")
flag.BoolVar(&skipPyramid, "skip-pyramid", false, "Do not generate pyramid tiles")
flag.BoolVar(&skipPyramid, "sp", false, "Do not generate pyramid tiles (shorthand)")
flag.BoolVar(&transparent, "transparent", false, "Render transparent blocks.")
flag.BoolVar(&transparent, "t", false, "Render transparent blocks (shorthand).")
flag.Float64Var(&transparentDim,
"transparent-dim", common.DefaultTransparentDim*100.0,
"Extra dimming of transparent nodes each depth meter in percent.")
flag.Float64Var(&transparentDim,
"td", common.DefaultTransparentDim*100.0,
"Extra fimming of transparent nodes each depth meter in percent. (shorthand)")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
if !skipBaseLevel {
td := common.Clamp32f(float32(transparentDim/100.0), 0.0, 1.0)
var address string
if strings.ContainsRune(host, '/') {
address = host
} else {
address = fmt.Sprintf("%s:%d", host, port)
}
if err := createBaseLevel(
address,
xMin, yMin, zMin, xMax, yMax, zMax,
transparent, td,
colorsFile, bg,
outDir,
numWorkers); err != nil {
log.Fatalf("Creating base level tiles failed: %s", err)
}
}
if !skipPyramid {
pc := pyramidCreator{numWorkers: numWorkers, outDir: outDir, bg: bg}
if err := pc.create(); err != nil {
log.Fatalf("Creating pyramid tiles failed: %s", err)
}
}
}

247
cmd/mtseeder/pyramid.go Normal file
View File

@ -0,0 +1,247 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"image"
"image/color"
"image/draw"
"io/ioutil"
"log"
"math"
"os"
"path/filepath"
"strconv"
"strings"
"sync"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/bamiaux/rez"
)
type pyramidCreator struct {
numWorkers int
outDir string
bg color.RGBA
}
func findMaxDir(files []os.FileInfo) (min, max int) {
min, max = math.MaxInt32, math.MinInt32
for _, file := range files {
if !file.Mode().IsDir() {
continue
}
if x, err := strconv.Atoi(file.Name()); err == nil {
if x > max {
max = x
}
if x < min {
min = x
}
}
}
return
}
func findMaxFile(files []os.FileInfo) (min, max int) {
min, max = math.MaxInt32, math.MinInt32
for _, file := range files {
if !file.Mode().IsRegular() {
continue
}
name := file.Name()
name = strings.TrimSuffix(name, filepath.Ext(name))
if x, err := strconv.Atoi(name); err == nil {
if x > max {
max = x
}
if x < min {
min = x
}
}
}
return
}
type pyramidJob struct {
src [4]string
dst string
}
func (pc *pyramidCreator) createParentLevel(
oldDir string,
jobs chan pyramidJob) (newDir string, err error) {
oldName := filepath.Base(oldDir)
var oldLevel int
if oldLevel, err = strconv.Atoi(oldName); err != nil {
return
}
if oldLevel <= 0 {
return
}
var files []os.FileInfo
if files, err = ioutil.ReadDir(oldDir); err != nil {
return
}
xMin, xMax := findMaxDir(files)
if xMax == math.MinInt32 {
return
}
newLevel := oldLevel - 1
log.Printf("Generating tiles of level %d\n", newLevel)
parentDir := filepath.Dir(oldDir)
newDir = filepath.Join(parentDir, strconv.Itoa(newLevel))
if err = os.MkdirAll(newDir, os.ModePerm); err != nil {
return
}
for ox, nx := xMin, xMin; ox <= xMax; ox += 2 {
ox1Dir := filepath.Join(oldDir, strconv.Itoa(ox))
ox2Dir := filepath.Join(oldDir, strconv.Itoa(ox+1))
if files, err = ioutil.ReadDir(ox1Dir); err != nil {
return
}
zMin, zMax := findMaxFile(files)
if zMax == math.MinInt32 {
nx++
continue
}
nxDir := filepath.Join(newDir, strconv.Itoa(nx))
if err = os.MkdirAll(nxDir, os.ModePerm); err != nil {
return
}
for oz, nz := zMin, zMin; oz <= zMax; oz += 2 {
oz1 := strconv.Itoa(oz) + ".png"
oz2 := strconv.Itoa(oz+1) + ".png"
s1 := filepath.Join(ox1Dir, oz1)
s2 := filepath.Join(ox1Dir, oz2)
s3 := filepath.Join(ox2Dir, oz1)
s4 := filepath.Join(ox2Dir, oz2)
d := filepath.Join(nxDir, strconv.Itoa(nz)+".png")
jobs <- pyramidJob{src: [4]string{s1, s2, s3, s4}, dst: d}
nz++
}
nx++
}
return
}
func clip8(x int) int {
switch {
case x < 0:
return 0
case x > 256:
return 256
}
return x
}
func clipRect(r image.Rectangle) image.Rectangle {
return image.Rectangle{
Min: image.Point{X: clip8(r.Min.X), Y: clip8(r.Min.Y)},
Max: image.Point{X: clip8(r.Max.X), Y: clip8(r.Max.Y)}}
}
var dps = [4]image.Point{
image.Pt(0, 256),
image.Pt(0, 0),
image.Pt(256, 256),
image.Pt(256, 0)}
func (pc *pyramidCreator) fuseTile(
scratch, resized *image.RGBA,
conv rez.Converter,
job *pyramidJob) error {
for i, path := range job.src {
img := common.LoadPNG(path, pc.bg)
sr := clipRect(img.Bounds())
r := sr.Sub(sr.Min).Add(dps[i])
draw.Draw(scratch, r, img, sr.Min, draw.Src)
}
if err := conv.Convert(resized, scratch); err != nil {
return err
}
log.Printf("Writing pyramid tile '%s'.\n", job.dst)
return common.SaveAsPNG(job.dst, resized)
}
func (pc *pyramidCreator) fuseTiles(jobs chan pyramidJob, done *sync.WaitGroup) {
defer done.Done()
scratch := image.NewRGBA(image.Rect(0, 0, 512, 512))
resized := image.NewRGBA(image.Rect(0, 0, 256, 256))
cfg, err := rez.PrepareConversion(resized, scratch)
if err != nil {
log.Printf("WARN: cannot prepare rescaling: %s\n", err)
return
}
conv, err := rez.NewConverter(cfg, common.ResizeFilter)
if err != nil {
log.Printf("WARN: Cannot create image converter: %s\n", err)
return
}
for job := range jobs {
if err := pc.fuseTile(scratch, resized, conv, &job); err != nil {
log.Printf("WARN: Writing image failed: %s\n", err)
}
}
}
func (pc *pyramidCreator) create() (err error) {
for oldDir := filepath.Join(pc.outDir, baseLevelDir); oldDir != ""; {
if oldDir, err = pc.createLevel(oldDir); err != nil {
return
}
}
return
}
func (pc *pyramidCreator) createLevel(oldDir string) (string, error) {
jobs := make(chan pyramidJob)
var done sync.WaitGroup
for i := 0; i < pc.numWorkers; i++ {
done.Add(1)
go pc.fuseTiles(jobs, &done)
}
newDir, err := pc.createParentLevel(oldDir, jobs)
close(jobs)
if err != nil {
return newDir, err
}
done.Wait()
return newDir, err
}

168
cmd/mttilemapper/main.go Normal file
View File

@ -0,0 +1,168 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"image"
"log"
"os"
"runtime/pprof"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
func main() {
var (
port int
host string
x, y, z int
width, height, depth int
colorsfile string
bgColor string
outfile string
shaded bool
transparent bool
cpuProfile string
transparentDim float64
version bool
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&port, "port", 6379, "port to of mtredisalize server")
flag.IntVar(&port, "p", 6379, "port to of mtredisalize server (shorthand)")
flag.StringVar(&host, "host", "localhost", "host to mtredisalize server")
flag.IntVar(&x, "x", 0, "x of query cuboid")
flag.IntVar(&y, "y", -75, "y of query cuboid")
flag.IntVar(&z, "z", 0, "z of query cuboid")
flag.IntVar(&width, "width", 16, "width of query cuboid")
flag.IntVar(&height, "height", 16, "height of query cuboid")
flag.IntVar(&depth, "depth", 150, "depth of query cuboid")
flag.IntVar(&width, "w", 16, "width of query cuboid (shorthand)")
flag.IntVar(&height, "h", 16, "height of query cuboid (shorthand)")
flag.IntVar(&depth, "d", 150, "depth of query cuboid (shorthand)")
flag.StringVar(&colorsfile, "colors", "colors.txt", "definition of colors")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.StringVar(&outfile, "output", "out.png", "image file of result")
flag.StringVar(&outfile, "o", "out.png", "image file of result (shorthand)")
flag.BoolVar(&shaded, "shaded", true, "draw relief")
flag.BoolVar(&transparent, "transparent", false, "render transparent blocks")
flag.Float64Var(
&transparentDim, "transparent-dim", common.DefaultTransparentDim*100,
"Extra dimming of transparent nodes every depth meter in percent (0-100).")
flag.StringVar(&cpuProfile, "cpuprofile", "", "write cpu profile to file")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
if cpuProfile != "" {
f, err := os.Create(cpuProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
defer pprof.StopCPUProfile()
}
var colors *common.Colors
var err error
if colors, err = common.ParseColors(colorsfile); err != nil {
log.Fatalf("Cannot open color file: %s", err)
}
colors.TransparentDim = common.Clamp32f(
float32(transparentDim/100.0), 0.0, 100.0)
var proto, address string
if strings.ContainsRune(host, '/') {
proto, address = "unix", host
} else {
proto, address = "tcp", fmt.Sprintf("%s:%d", host, port)
}
var client *common.RedisClient
if client, err = common.NewRedisClient(proto, address); err != nil {
log.Fatalf("Cannot connect to '%s': %s", address, err)
}
defer client.Close()
if shaded {
width += 2
height += 2
x--
z--
}
q1x, q1y, q1z := int16(x), int16(y), int16(z)
q2x, q2y, q2z := q1x+int16(width)-1, q1y+int16(depth)-1, q1z+int16(height)-1
renderer := common.NewRenderer(width, height, transparent)
renderer.SetPos(q1x, q1z)
renderFn := func(block *common.Block) error {
return renderer.RenderBlock(block, colors)
}
yOrder := common.NewYOrder(renderFn, 512)
numBlocks := 0
drawBlock := func(block *common.Block) *common.Block {
block, err := yOrder.RenderBlock(block)
if err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
numBlocks++
return block
}
c1 := common.Coord{X: q1x, Z: q1z}
c2 := common.Coord{X: q2x, Z: q2z}
for c2.Y = q2y; c2.Y > q1y; c2.Y -= 8 {
c1.Y = c2.Y - 7
if c1.Y < q1y {
c1.Y = q1y
}
cuboid := common.Cuboid{P1: common.MinCoord(c1, c2), P2: common.MaxCoord(c1, c2)}
if _, err = client.QueryCuboid(cuboid, drawBlock); err != nil {
log.Fatalf("query failed: %s", err)
}
if err = yOrder.Drain(); err != nil {
log.Printf("WARN: rendering block failed: %s\n", err)
}
if renderer.IsFilled() {
break
}
}
var image image.Image
if shaded {
image = renderer.CreateShadedImage(
16, 16, (width-2)*16, (height-2)*16,
colors, bg)
} else {
image = renderer.CreateImage(colors.Colors, bg)
}
if err = common.SaveAsPNG(outfile, image); err != nil {
log.Fatalf("writing image failed: %s", err)
}
log.Printf("num blocks: %d\n", numBlocks)
log.Printf("rejected blocks: %d\n", renderer.RejectedBlocks)
log.Printf("transparent blocks: %d\n", renderer.TransparentBlocks)
log.Printf("solid blocks: %d\n", renderer.SolidBlocks)
}

View File

@ -0,0 +1,125 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"log"
"net/http"
"github.com/gorilla/websocket"
)
type websocketForwarder struct {
upgrader *websocket.Upgrader
register chan *connection
unregister chan *connection
broadcast chan msg
connections map[*connection]bool
}
type connection struct {
ws *websocket.Conn
send chan []byte
}
type msg struct {
tiles []xz
pls []*player
}
func newWebsocketForwarder() *websocketForwarder {
upgrader := &websocket.Upgrader{ReadBufferSize: 512, WriteBufferSize: 2048}
return &websocketForwarder{
upgrader: upgrader,
register: make(chan *connection),
unregister: make(chan *connection),
broadcast: make(chan msg),
connections: make(map[*connection]bool)}
}
func (wsf *websocketForwarder) run() {
for {
select {
case c := <-wsf.register:
wsf.connections[c] = true
case c := <-wsf.unregister:
if _, ok := wsf.connections[c]; ok {
delete(wsf.connections, c)
close(c.send)
}
case message := <-wsf.broadcast:
if len(wsf.connections) == 0 {
continue
}
encMsg := map[string]interface{}{}
if message.tiles != nil {
encMsg["tiles"] = message.tiles
}
if message.pls != nil {
encMsg["players"] = message.pls
}
var buf bytes.Buffer
encoder := json.NewEncoder(&buf)
if err := encoder.Encode(encMsg); err != nil {
log.Printf("encoding changes failed: %s\n", err)
continue
}
m := buf.Bytes()
for c := range wsf.connections {
select {
case c.send <- m:
default:
delete(wsf.connections, c)
close(c.send)
}
}
}
}
}
func (wsf *websocketForwarder) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
ws, err := wsf.upgrader.Upgrade(rw, r, nil)
if err != nil {
log.Printf("Cannot upgrade to websocket: %s\n", err)
return
}
c := &connection{ws: ws, send: make(chan []byte, 8)}
wsf.register <- c
defer func() { wsf.unregister <- c }()
go c.writer()
c.reader()
}
func (wsf *websocketForwarder) BaseTilesUpdated(changes []xz) {
wsf.broadcast <- msg{tiles: changes}
}
func (wsf *websocketForwarder) BroadcastPlayers(pls []*player) {
wsf.broadcast <- msg{pls: pls}
}
func (c *connection) writer() {
defer c.ws.Close()
for msg := range c.send {
if c.ws.WriteMessage(websocket.TextMessage, msg) != nil {
break
}
}
}
func (c *connection) reader() {
defer c.ws.Close()
for {
// Just read the message and ignore it.
if _, _, err := c.ws.NextReader(); err != nil {
break
}
}
}

149
cmd/mtwebmapper/main.go Normal file
View File

@ -0,0 +1,149 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"flag"
"fmt"
"log"
"net"
"net/http"
"strings"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/gorilla/mux"
)
func main() {
var (
webPort int
webHost string
webDir string
mapDir string
redisPort int
redisHost string
colorsFile string
bgColor string
workers int
transparent bool
transparentDim float64
updateHosts string
websockets bool
playersFIFO string
version bool
yMin int
yMax int
)
defaultBgColor := common.ColorToHex(common.BackgroundColor)
flag.IntVar(&webPort, "web-port", 8808, "port of the web server")
flag.IntVar(&webPort, "p", 8808, "port of the web server (shorthand)")
flag.StringVar(&webHost, "web-host", "localhost", "address to bind web server")
flag.StringVar(&webHost, "h", "localhost", "address to bind web server(shorthand)")
flag.StringVar(&webDir, "web", "web", "static served web files.")
flag.StringVar(&webDir, "w", "web", "static served web files (shorthand)")
flag.StringVar(&mapDir, "map", "map", "directory of prerendered tiles")
flag.StringVar(&mapDir, "m", "map", "directory of prerendered tiles (shorthand)")
flag.StringVar(&updateHosts, "update-hosts", "localhost",
"';' separated list of hosts which are allowed to send map update requests")
flag.StringVar(&updateHosts, "u", "localhost",
"';' separated list of hosts which are allowed to send map update requests (shorthand)")
flag.StringVar(&redisHost, "redis-host", "", "address of the backend Redis server")
flag.StringVar(&redisHost, "rh", "", "address of the backend Redis server (shorthand)")
flag.IntVar(&redisPort, "redis-port", 6379, "port of the backend Redis server")
flag.IntVar(&redisPort, "rp", 6379, "port of the backend Redis server (shorthand)")
flag.IntVar(&workers, "workers", 1, "number of workers to render tiles")
flag.StringVar(&colorsFile, "colors", "colors.txt", "colors used to render map tiles.")
flag.StringVar(&colorsFile, "c", "colors.txt", "colors used to render map tiles (shorthand).")
flag.StringVar(&bgColor, "background", defaultBgColor, "background color")
flag.StringVar(&bgColor, "bg", defaultBgColor, "background color (shorthand)")
flag.BoolVar(&transparent, "transparent", false, "Render transparent blocks.")
flag.BoolVar(&transparent, "t", false, "Render transparent blocks (shorthand).")
flag.Float64Var(&transparentDim,
"transparent-dim", common.DefaultTransparentDim*100.0,
"Extra dimming of transparent nodes each depth meter in percent.")
flag.Float64Var(&transparentDim,
"td", common.DefaultTransparentDim*100.0,
"Extra fimming of transparent nodes each depth meter in percent. (shorthand)")
flag.BoolVar(&websockets, "websockets", false, "Forward tile changes to clients via websockets.")
flag.BoolVar(&websockets, "ws", false, "Forward tile changes to clients via websockets (shorthand).")
flag.StringVar(&playersFIFO, "players", "", "Path to FIFO file to read active players from.")
flag.StringVar(&playersFIFO, "ps", "", "Path to FIFO file to read active players from (shorthand).")
flag.IntVar(&yMin, "ymin", common.MinHeight, "Minimum y in blocks.")
flag.IntVar(&yMax, "ymax", common.MaxHeight, "Maximum y in blocks.")
flag.BoolVar(&version, "version", false, "Print version and exit.")
flag.Parse()
if version {
common.PrintVersionAndExit()
}
bg := common.ParseColorDefault(bgColor, common.BackgroundColor)
router := mux.NewRouter()
subBaseLine := newSubBaseLine(mapDir, bg)
router.Path("/map/{z:[0-9]+}/{x:[0-9]+}/{y:[0-9]+}.png").Handler(subBaseLine)
var btu baseTilesUpdates
var wsf *websocketForwarder
if websockets {
wsf = newWebsocketForwarder()
go wsf.run()
router.Path("/ws").Methods("GET").Handler(wsf)
btu = wsf
}
if playersFIFO != "" {
plys := newPlayers(playersFIFO, wsf)
go plys.run()
router.Path("/players").Methods("GET").Handler(plys)
}
if redisHost != "" {
var colors *common.Colors
var err error
if colors, err = common.ParseColors(colorsFile); err != nil {
log.Fatalf("ERROR: problem loading colors: %s", err)
}
colors.TransparentDim = common.Clamp32f(
float32(transparentDim/100.0), 0.0, 100.0)
var redisAddress string
if strings.ContainsRune(redisHost, '/') {
redisAddress = redisHost
} else {
redisAddress = fmt.Sprintf("%s:%d", redisHost, redisPort)
}
var allowedUpdateIps []net.IP
if allowedUpdateIps, err = ipsFromHosts(updateHosts); err != nil {
log.Fatalf("ERROR: name resolving problem: %s", err)
}
tu := newTileUpdater(
mapDir,
redisAddress,
allowedUpdateIps,
colors, bg,
yMin, yMax,
transparent,
workers,
btu)
go tu.doUpdates()
router.Path("/update").Methods("POST").Handler(tu)
}
router.PathPrefix("/").Handler(http.FileServer(http.Dir(webDir)))
http.Handle("/", router)
addr := fmt.Sprintf("%s:%d", webHost, webPort)
if err := http.ListenAndServe(addr, nil); err != nil {
log.Fatalf("Starting server failed: %s\n", err)
}
}

29
cmd/mtwebmapper/misc.go Normal file
View File

@ -0,0 +1,29 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"net"
"strings"
)
func ipsFromHosts(hosts string) ([]net.IP, error) {
ips := []net.IP{}
if len(hosts) == 0 { // Empty list: allow all hosts.
return ips, nil
}
for _, host := range strings.Split(hosts, ";") {
hips, err := net.LookupIP(host)
if err != nil {
return nil, err
}
ips = append(ips, hips...)
}
return ips, nil
}

148
cmd/mtwebmapper/players.go Normal file
View File

@ -0,0 +1,148 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"bufio"
"bytes"
"encoding/json"
"html/template"
"log"
"math"
"net/http"
"os"
"sort"
"sync"
"time"
)
const sleepInterval = time.Second * 5
var geoJSONTmpl = template.Must(template.New("geojson").Parse(
`{ "type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [{{.Z}}, {{.X}}]
},
"properties": {
"name": "{{.Name | html }}"
}
}`))
type player struct {
X float64 `json:"x"`
Y float64 `json:"y"`
Z float64 `json:"z"`
Name string `json:"name"`
}
type players struct {
fifo string
wsf *websocketForwarder
pls []*player
mu sync.RWMutex
}
func newPlayers(fifo string, wsf *websocketForwarder) *players {
return &players{fifo: fifo, wsf: wsf, pls: []*player{}}
}
func (p *player) MarshalJSON() ([]byte, error) {
var buf bytes.Buffer
if err := geoJSONTmpl.Execute(&buf, p); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (p *player) same(o *player) bool {
return p.Name == o.Name &&
math.Abs(p.X-o.X) < 0.000001 &&
math.Abs(p.Y-o.Y) < 0.000001 &&
math.Abs(p.Z-o.Z) < 0.000001
}
type sortPlayersByName []*player
func (pls sortPlayersByName) Len() int {
return len(pls)
}
func (pls sortPlayersByName) Less(i, j int) bool {
return pls[i].Name < pls[j].Name
}
func (pls sortPlayersByName) Swap(i, j int) {
pls[i], pls[j] = pls[j], pls[i]
}
func (ps *players) readFromFIFO() ([]*player, error) {
file, err := os.Open(ps.fifo)
if err != nil {
return nil, err
}
defer file.Close()
reader := bufio.NewReader(file)
decoder := json.NewDecoder(reader)
var pls []*player
if err = decoder.Decode(&pls); err != nil {
return nil, err
}
return pls, nil
}
func samePlayers(a, b []*player) bool {
if len(a) != len(b) {
return false
}
for i, p := range a {
if !p.same(b[i]) {
return false
}
}
return true
}
func (ps *players) run() {
for {
pls, err := ps.readFromFIFO()
if err != nil {
//log.Printf("err: %s\n", err)
time.Sleep(sleepInterval)
continue
}
if pls == nil {
//log.Println("no players")
continue
}
//log.Printf("%+q\n", pls)
sort.Sort(sortPlayersByName(pls))
var change bool
ps.mu.Lock()
//log.Printf("%+q\n", pls)
//log.Printf("%+q\n", ps.pls)
if change = !samePlayers(pls, ps.pls); change {
ps.pls = pls
}
ps.mu.Unlock()
if change && ps.wsf != nil {
// TODO: Throttle this!
ps.wsf.BroadcastPlayers(pls)
}
}
}
func (ps *players) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json")
var pls []*player
ps.mu.RLock()
pls = ps.pls
ps.mu.RUnlock()
encoder := json.NewEncoder(rw)
if err := encoder.Encode(pls); err != nil {
log.Printf("error: sending JSON failed: %s\n", err)
}
}

View File

@ -0,0 +1,227 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"fmt"
"image"
"image/color"
"image/png"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"time"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
"github.com/gorilla/mux"
)
type subBaseLine struct {
mapDir string
bg color.RGBA
}
func newSubBaseLine(mapDir string, bg color.RGBA) *subBaseLine {
return &subBaseLine{mapDir: mapDir, bg: bg}
}
func (sb *subBaseLine) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Cache-Control", "max-age=0, no-cache, no-store")
vars := mux.Vars(r)
xs := vars["x"]
ys := vars["y"]
zs := vars["z"]
x, y, z := toUint(xs), toUint(ys), toUint(zs)
if z < 9 {
http.ServeFile(rw, r, filepath.Join(sb.mapDir,
strconv.Itoa(int(z)),
strconv.Itoa(int(x)),
strconv.Itoa(int(y))+".png"))
return
}
if z > 16 {
z = 16
}
tx := x >> (z - 8)
ty := y >> (z - 8)
baseTile := filepath.Join(
sb.mapDir,
"8",
strconv.Itoa(int(tx)),
strconv.Itoa(int(ty))+".png")
var err error
var fi os.FileInfo
if fi, err = os.Stat(baseTile); err != nil {
http.NotFound(rw, r)
return
}
if checkLastModified(rw, r, fi.ModTime()) || checkETag(rw, r, fi) {
return
}
rx := x & ^(^uint(0) << (z - 8))
ry := y & ^(^uint(0) << (z - 8))
parts := uint(1) << (z - 8)
w := uint(256) / parts
xo := w * rx
yo := w * (parts - 1 - ry)
img := common.LoadPNG(baseTile, sb.bg)
type subImage interface {
SubImage(image.Rectangle) image.Image
}
if si, ok := img.(subImage); ok {
img = si.SubImage(image.Rect(int(xo), int(yo), int(xo+w), int(yo+w)))
} else {
// Should not happen.
http.Error(rw,
http.StatusText(http.StatusInternalServerError),
http.StatusInternalServerError)
return
}
img = blowUp(img)
rw.Header().Set("Content-Type", "image/png")
if err = png.Encode(rw, img); err != nil {
log.Printf("WARN: encoding image failed: %s\n", err)
}
}
func blowUp(src image.Image) *image.RGBA {
// Fast path for RGBA -> RGBA
if rgba, ok := src.(*image.RGBA); ok {
return blowUpRGBA(rgba)
}
// Fallback
dst := image.NewRGBA(image.Rect(0, 0, 256, 256))
// fix point numbers x:8
dx, dy := src.Bounds().Dx(), src.Bounds().Dy()
bx, by := src.Bounds().Min.X<<8, src.Bounds().Min.Y<<8
//start := time.Now()
pix := dst.Pix
lineOfs := dst.PixOffset(0, 0) // Should be 0.
py := by
var r, g, b, a uint8
for y := 0; y < 256; y++ {
sy := (py >> 8) & 0xff
ox := -1
px := bx
ofs := lineOfs // Should not really b needed
lineOfs += dst.Stride
for x := 0; x < 256; x++ {
sx := (px >> 8) & 0xff
if sx != ox { // Minimize interface indirection access.
ox = sx
xr, xg, xb, xa := src.At(sx, sy).RGBA()
r, g, b, a = uint8(xr), uint8(xg), uint8(xb), uint8(xa)
}
pix[ofs] = r
pix[ofs+1] = g
pix[ofs+2] = b
pix[ofs+3] = a
ofs += 4
px += dx
}
py += dy
}
//log.Printf("Rendering took: %s\n", time.Since(start))
return dst
}
func blowUpRGBA(src *image.RGBA) *image.RGBA {
dst := image.NewRGBA(image.Rect(0, 0, 256, 256))
// fix point numbers x:8
dx, dy := src.Bounds().Dx(), src.Bounds().Dy()
bx, by := src.Bounds().Min.X<<8, src.Bounds().Min.Y<<8
//start := time.Now()
sPix := src.Pix
dPix := dst.Pix
py := by
// Assuming memory layout is packed 256*256*4 with stride of 4*256.
// for dLineOfs, dEnd := dst.PixOffset(0, 0), dst.PixOffset(0, 256); dLineOfs < dEnd; dLineOfs += dst.Stride {
for ofs := 0; ofs < 256*256*4; {
sy := (py >> 8) & 0xff
sLineOfs := src.PixOffset(0, sy)
px := bx
// ofs := dLineOfs
for end := ofs + 4*256; ofs < end; ofs += 4 {
sOfs := sLineOfs + ((px >> 6) & 0x3fc)
px += dx
dPix[ofs] = sPix[sOfs]
dPix[ofs+1] = sPix[sOfs+1]
dPix[ofs+2] = sPix[sOfs+2]
dPix[ofs+3] = sPix[sOfs+3]
}
py += dy
}
//log.Printf("Rendering took: %s\n", time.Since(start))
return dst
}
func checkETag(w http.ResponseWriter, r *http.Request, fi os.FileInfo) bool {
etag := fmt.Sprintf("%x-%x", fi.ModTime().Unix(), fi.Size())
if ifNoneMatch := r.Header.Get("If-None-Match"); ifNoneMatch == etag {
w.WriteHeader(http.StatusNotModified)
return true
}
w.Header().Set("ETag", etag)
return false
}
func checkLastModified(w http.ResponseWriter, r *http.Request, modtime time.Time) bool {
if modtime.IsZero() {
return false
}
// The Date-Modified header truncates sub-second precision, so
// use mtime < t+1s instead of mtime <= t to check for unmodified.
if t, err := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); err == nil && modtime.Before(t.Add(1*time.Second)) {
w.WriteHeader(http.StatusNotModified)
return true
}
w.Header().Set("Last-Modified", modtime.UTC().Format(http.TimeFormat))
return false
}
func toUint(s string) uint {
x, err := strconv.Atoi(s)
if err != nil {
log.Printf("WARN: Cannot convert to int: %s\n", err)
return 0
}
return uint(x)
}

View File

@ -0,0 +1,379 @@
// Copyright 2014, 2015 by Sascha L. Teichmann
// Use of this source code is governed by the MIT license
// that can be found in the LICENSE file.
package main
import (
"encoding/json"
"image"
"image/color"
"image/draw"
"log"
"net"
"net/http"
"path/filepath"
"strconv"
"strings"
"sync"
"github.com/bamiaux/rez"
"bytes"
"bitbucket.org/s_l_teichmann/mtsatellite/common"
)
// Number of check sums to keep in memory.
const maxHashedTiles = 256
type baseTilesUpdates interface {
BaseTilesUpdated([]xz)
}
type tileUpdater struct {
changes map[xz]struct{}
btu baseTilesUpdates
mapDir string
redisAddress string
ips []net.IP
colors *common.Colors
bg color.RGBA
yMin, yMax int16
workers int
transparent bool
cond *sync.Cond
mu sync.Mutex
}
type xz struct {
X int16
Z int16
}
type xzc struct {
xz
canceled bool
}
type xzm struct {
xz
Mask uint16
}
func (c xz) quantize() xz {
return xz{X: (c.X - -1933) / 16, Z: (c.Z - -1933) / 16}
}
func (c xz) dequantize() xz {
return xz{X: c.X*16 + -1933, Z: c.Z*16 + -1933}
}
func (c xz) parent() xzm {
xp, xr := c.X>>1, uint16(c.X&1)
zp, zr := c.Z>>1, uint16(c.Z&1)
return xzm{
xz{X: xp, Z: zp},
1 << (zr<<1 | xr)}
}
func newTileUpdater(
mapDir, redisAddress string,
ips []net.IP,
colors *common.Colors,
bg color.RGBA,
yMin, yMax int,
transparent bool,
workers int,
btu baseTilesUpdates) *tileUpdater {
tu := tileUpdater{
btu: btu,
mapDir: mapDir,
redisAddress: redisAddress,
ips: ips,
changes: map[xz]struct{}{},
colors: colors,
bg: bg,
yMin: int16(yMin),
yMax: int16(yMax),
transparent: transparent,
workers: workers}
tu.cond = sync.NewCond(&tu.mu)
return &tu
}
func (tu *tileUpdater) checkIP(r *http.Request) bool {
if len(tu.ips) == 0 {
return true
}
idx := strings.LastIndex(r.RemoteAddr, ":")
if idx < 0 {
log.Printf("WARN: cannot extract host from '%s'.\n", r.RemoteAddr)
return false
}
host := strings.Trim(r.RemoteAddr[:idx], "[]")
ip := net.ParseIP(host)
if ip == nil {
log.Printf("WARN: cannot get IP for host '%s'.\n", host)
return false
}
for i := range tu.ips {
if bytes.Compare(tu.ips[i], ip) == 0 {
return true
}
}
return false
}
func (tu *tileUpdater) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
if !tu.checkIP(r) {
log.Printf("WARN: Unauthorized update request from '%s'\n", r.RemoteAddr)
http.Error(rw, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
return
}
var err error
var newChanges []xz
decoder := json.NewDecoder(r.Body)
if err = decoder.Decode(&newChanges); err != nil {
log.Printf("WARN: JSON document broken: %s\n", err)
http.Error(rw, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
return
}
if len(newChanges) > 0 {
tu.cond.L.Lock()
for _, c := range newChanges {
tu.changes[c.quantize()] = struct{}{}
}
tu.cond.L.Unlock()
tu.cond.Signal()
}
rw.WriteHeader(http.StatusOK)
}
func extractChanges(changes map[xz]struct{}) []xzc {
chs := make([]xzc, len(changes))
var i int
for ch := range changes {
chs[i] = xzc{ch, false}
i++
}
return chs
}
func activeChanges(changes []xzc) []xz {
chs := make([]xz, 0, len(changes))
for i := range changes {
if !changes[i].canceled {
chs = append(chs, changes[i].xz)
}
}
return chs
}
func (tu *tileUpdater) doUpdates() {
bth := common.NewBaseTileHash(maxHashedTiles)
baseDir := filepath.Join(tu.mapDir, "8")
for {
tu.cond.L.Lock()
for len(tu.changes) == 0 {
tu.cond.Wait()
}
changes := extractChanges(tu.changes)
tu.changes = map[xz]struct{}{}
tu.cond.L.Unlock()
jobs := make(chan *xzc)
var done sync.WaitGroup
var proto string
if strings.ContainsRune(tu.redisAddress, '/') {
proto = "unix"
} else {
proto = "tcp"
}
for i, n := 0, common.Min(tu.workers, len(changes)); i < n; i++ {
var client *common.RedisClient
var err error
if client, err = common.NewRedisClient(proto, tu.redisAddress); err != nil {
log.Printf("WARN: Cannot connect to redis server: %s\n", err)
continue
}
btc := common.NewBaseTileCreator(
client, tu.colors, tu.bg,
tu.yMin, tu.yMax,
tu.transparent, baseDir)
done.Add(1)
go tu.updateBaseTiles(jobs, btc, &done, bth.Update)
}
for i := range changes {
jobs <- &changes[i]
}
close(jobs)
done.Wait()
actChs := activeChanges(changes)
if len(actChs) == 0 {
continue
}
parentJobs := make(map[xz]uint16)
for i := range actChs {
pxz := actChs[i].parent()
parentJobs[pxz.xz] |= pxz.Mask
}
for level := 7; level >= 0; level-- {
pJobs := make(chan xzm)
for i, n := 0, common.Min(len(parentJobs), tu.workers); i < n; i++ {
done.Add(1)
go tu.updatePyramidTiles(level, pJobs, &done)
}
ppJobs := make(map[xz]uint16)
for c, mask := range parentJobs {
pJobs <- xzm{c, mask}
pxz := c.parent()
ppJobs[pxz.xz] |= pxz.Mask
}
close(pJobs)
done.Wait()
parentJobs = ppJobs
}
if tu.btu != nil {
tu.btu.BaseTilesUpdated(actChs)
}
}
}
func (tu *tileUpdater) updatePyramidTiles(
level int, jobs chan xzm, done *sync.WaitGroup) {
defer done.Done()
scratch := image.NewRGBA(image.Rect(0, 0, 256, 256))
resized := image.NewRGBA(image.Rect(0, 0, 128, 128))
for job := range jobs {
if err := tu.updatePyramidTile(scratch, resized, level, job); err != nil {
log.Printf("Updating pyramid tile failed: %s\n", err)
}
}
}
/*
(0,0) (128, 0)
(0, 128) (128, 128)
*/
var dps = [4]image.Point{
image.Pt(0, 128),
image.Pt(128, 128),
image.Pt(0, 0),
image.Pt(128, 0),
}
var ofs = [4][2]int{
{0, 0},
{1, 0},
{0, 1},
{1, 1}}
var windowSize = image.Pt(128, 128)
func (tu *tileUpdater) updatePyramidTile(scratch, resized *image.RGBA, level int, j xzm) error {
var orig image.Image
origPath := filepath.Join(
tu.mapDir,
strconv.Itoa(level),
strconv.Itoa(int(j.X)),
strconv.Itoa(int(j.Z))+".png")
sr := resized.Bounds()
levelDir := strconv.Itoa(level + 1)
for i := uint16(0); i < 4; i++ {
if j.Mask&(1<<i) != 0 {
//log.Printf("level %d: modified %d\n", level, i)
o := ofs[i]
bx, bz := int(2*j.X), int(2*j.Z)
path := filepath.Join(
tu.mapDir,
levelDir,
strconv.Itoa(bx+o[0]),
strconv.Itoa(bz+o[1])+".png")
img := common.LoadPNG(path, tu.bg)
if err := rez.Convert(resized, img, common.ResizeFilter); err != nil {
return err
}
r := sr.Sub(sr.Min).Add(dps[i])
draw.Draw(scratch, r, resized, sr.Min, draw.Src)
} else {
// Load lazy
if orig == nil {
orig = common.LoadPNG(origPath, tu.bg)
}
//log.Printf("level %d: copied %d\n", level, i)
min := orig.Bounds().Min.Add(dps[i])
r := image.Rectangle{min, min.Add(windowSize)}
draw.Draw(scratch, r, orig, min, draw.Src)
}
}
return common.SaveAsPNGAtomic(origPath, scratch)
}
func (tu *tileUpdater) updateBaseTiles(
jobs chan *xzc,
btc *common.BaseTileCreator,
done *sync.WaitGroup,
update common.BaseTileUpdateFunc) {
type jobWriter struct {
job *xzc
wFn func() (bool, error)
}
jWs := make(chan jobWriter)
go func() {
for jw := range jWs {
updated, err := jw.wFn()
if err != nil {
log.Printf("WARN: writing tile failed: %v.\n", err)
}
if !updated {
jw.job.canceled = true
}
}
}()
defer func() {
close(jWs)
btc.Close()
done.Done()
}()
for job := range jobs {
xz := job.dequantize()
if err := btc.RenderArea(xz.X-1, xz.Z-1); err != nil {
log.Printf("WARN: rendering tile failed: %v.\n", err)
job.canceled = true
continue
}
jWs <- jobWriter{job, btc.WriteFunc(int(job.X), int(job.Z), update)}
}
}

View File

@ -0,0 +1 @@
.leaflet-control-coordinates{background-color:#D8D8D8;background-color:rgba(255,255,255,.8);cursor:pointer}.leaflet-control-coordinates,.leaflet-control-coordinates .uiElement input{-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.leaflet-control-coordinates .uiElement{margin:4px}.leaflet-control-coordinates .uiElement .labelFirst{margin-right:4px}.leaflet-control-coordinates .uiHidden{display:none}

1338
cmd/mtwebmapper/web/css/font-awesome.css vendored Normal file

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 14 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 535 B

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.4 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

View File

@ -0,0 +1,124 @@
/*
Author: L. Voogdt
License: MIT
Version: 1.0
*/
/* Marker setup */
.awesome-marker {
background: url('images/markers-soft.png') no-repeat 0 0;
width: 35px;
height: 46px;
position:absolute;
left:0;
top:0;
display: block;
text-align: center;
}
.awesome-marker-shadow {
background: url('images/markers-shadow.png') no-repeat 0 0;
width: 36px;
height: 16px;
}
/* Retina displays */
@media (min--moz-device-pixel-ratio: 1.5),(-o-min-device-pixel-ratio: 3/2),
(-webkit-min-device-pixel-ratio: 1.5),(min-device-pixel-ratio: 1.5),(min-resolution: 1.5dppx) {
.awesome-marker {
background-image: url('images/markers-soft@2x.png');
background-size: 720px 46px;
}
.awesome-marker-shadow {
background-image: url('images/markers-shadow@2x.png');
background-size: 35px 16px;
}
}
.awesome-marker i {
color: #333;
margin-top: 10px;
display: inline-block;
font-size: 14px;
}
.awesome-marker .icon-white {
color: #fff;
}
/* Colors */
.awesome-marker-icon-red {
background-position: 0 0;
}
.awesome-marker-icon-darkred {
background-position: -180px 0;
}
.awesome-marker-icon-lightred {
background-position: -360px 0;
}
.awesome-marker-icon-orange {
background-position: -36px 0;
}
.awesome-marker-icon-beige {
background-position: -396px 0;
}
.awesome-marker-icon-green {
background-position: -72px 0;
}
.awesome-marker-icon-darkgreen {
background-position: -252px 0;
}
.awesome-marker-icon-lightgreen {
background-position: -432px 0;
}
.awesome-marker-icon-blue {
background-position: -108px 0;
}
.awesome-marker-icon-darkblue {
background-position: -216px 0;
}
.awesome-marker-icon-lightblue {
background-position: -468px 0;
}
.awesome-marker-icon-purple {
background-position: -144px 0;
}
.awesome-marker-icon-darkpurple {
background-position: -288px 0;
}
.awesome-marker-icon-pink {
background-position: -504px 0;
}
.awesome-marker-icon-cadetblue {
background-position: -324px 0;
}
.awesome-marker-icon-white {
background-position: -574px 0;
}
.awesome-marker-icon-gray {
background-position: -648px 0;
}
.awesome-marker-icon-lightgray {
background-position: -612px 0;
}
.awesome-marker-icon-black {
background-position: -682px 0;
}

View File

@ -0,0 +1,478 @@
/* required styles */
.leaflet-map-pane,
.leaflet-tile,
.leaflet-marker-icon,
.leaflet-marker-shadow,
.leaflet-tile-pane,
.leaflet-tile-container,
.leaflet-overlay-pane,
.leaflet-shadow-pane,
.leaflet-marker-pane,
.leaflet-popup-pane,
.leaflet-overlay-pane svg,
.leaflet-zoom-box,
.leaflet-image-layer,
.leaflet-layer {
position: absolute;
left: 0;
top: 0;
}
.leaflet-container {
overflow: hidden;
-ms-touch-action: none;
}
.leaflet-tile,
.leaflet-marker-icon,
.leaflet-marker-shadow {
-webkit-user-select: none;
-moz-user-select: none;
user-select: none;
-webkit-user-drag: none;
}
.leaflet-marker-icon,
.leaflet-marker-shadow {
display: block;
}
/* map is broken in FF if you have max-width: 100% on tiles */
.leaflet-container img {
max-width: none !important;
}
/* stupid Android 2 doesn't understand "max-width: none" properly */
.leaflet-container img.leaflet-image-layer {
max-width: 15000px !important;
}
.leaflet-tile {
filter: inherit;
visibility: hidden;
}
.leaflet-tile-loaded {
visibility: inherit;
}
.leaflet-zoom-box {
width: 0;
height: 0;
}
/* workaround for https://bugzilla.mozilla.org/show_bug.cgi?id=888319 */
.leaflet-overlay-pane svg {
-moz-user-select: none;
}
.leaflet-tile-pane { z-index: 2; }
.leaflet-objects-pane { z-index: 3; }
.leaflet-overlay-pane { z-index: 4; }
.leaflet-shadow-pane { z-index: 5; }
.leaflet-marker-pane { z-index: 6; }
.leaflet-popup-pane { z-index: 7; }
.leaflet-vml-shape {
width: 1px;
height: 1px;
}
.lvml {
behavior: url(#default#VML);
display: inline-block;
position: absolute;
}
/* control positioning */
.leaflet-control {
position: relative;
z-index: 7;
pointer-events: auto;
}
.leaflet-top,
.leaflet-bottom {
position: absolute;
z-index: 1000;
pointer-events: none;
}
.leaflet-top {
top: 0;
}
.leaflet-right {
right: 0;
}
.leaflet-bottom {
bottom: 0;
}
.leaflet-left {
left: 0;
}
.leaflet-control {
float: left;
clear: both;
}
.leaflet-right .leaflet-control {
float: right;
}
.leaflet-top .leaflet-control {
margin-top: 10px;
}
.leaflet-bottom .leaflet-control {
margin-bottom: 10px;
}
.leaflet-left .leaflet-control {
margin-left: 10px;
}
.leaflet-right .leaflet-control {
margin-right: 10px;
}
/* zoom and fade animations */
.leaflet-fade-anim .leaflet-tile,
.leaflet-fade-anim .leaflet-popup {
opacity: 0;
-webkit-transition: opacity 0.2s linear;
-moz-transition: opacity 0.2s linear;
-o-transition: opacity 0.2s linear;
transition: opacity 0.2s linear;
}
.leaflet-fade-anim .leaflet-tile-loaded,
.leaflet-fade-anim .leaflet-map-pane .leaflet-popup {
opacity: 1;
}
.leaflet-zoom-anim .leaflet-zoom-animated {
-webkit-transition: -webkit-transform 0.25s cubic-bezier(0,0,0.25,1);
-moz-transition: -moz-transform 0.25s cubic-bezier(0,0,0.25,1);
-o-transition: -o-transform 0.25s cubic-bezier(0,0,0.25,1);
transition: transform 0.25s cubic-bezier(0,0,0.25,1);
}
.leaflet-zoom-anim .leaflet-tile,
.leaflet-pan-anim .leaflet-tile,
.leaflet-touching .leaflet-zoom-animated {
-webkit-transition: none;
-moz-transition: none;
-o-transition: none;
transition: none;
}
.leaflet-zoom-anim .leaflet-zoom-hide {
visibility: hidden;
}
/* cursors */
.leaflet-clickable {
cursor: pointer;
}
.leaflet-container {
cursor: -webkit-grab;
cursor: -moz-grab;
}
.leaflet-popup-pane,
.leaflet-control {
cursor: auto;
}
.leaflet-dragging .leaflet-container,
.leaflet-dragging .leaflet-clickable {
cursor: move;
cursor: -webkit-grabbing;
cursor: -moz-grabbing;
}
/* visual tweaks */
.leaflet-container {
background: #ddd;
outline: 0;
}
.leaflet-container a {
color: #0078A8;
}
.leaflet-container a.leaflet-active {
outline: 2px solid orange;
}
.leaflet-zoom-box {
border: 2px dotted #38f;
background: rgba(255,255,255,0.5);
}
/* general typography */
.leaflet-container {
font: 12px/1.5 "Helvetica Neue", Arial, Helvetica, sans-serif;
}
/* general toolbar styles */
.leaflet-bar {
box-shadow: 0 1px 5px rgba(0,0,0,0.65);
border-radius: 4px;
}
.leaflet-bar a,
.leaflet-bar a:hover {
background-color: #fff;
border-bottom: 1px solid #ccc;
width: 26px;
height: 26px;
line-height: 26px;
display: block;
text-align: center;
text-decoration: none;
color: black;
}
.leaflet-bar a,
.leaflet-control-layers-toggle {
background-position: 50% 50%;
background-repeat: no-repeat;
display: block;
}
.leaflet-bar a:hover {
background-color: #f4f4f4;
}
.leaflet-bar a:first-child {
border-top-left-radius: 4px;
border-top-right-radius: 4px;
}
.leaflet-bar a:last-child {
border-bottom-left-radius: 4px;
border-bottom-right-radius: 4px;
border-bottom: none;
}
.leaflet-bar a.leaflet-disabled {
cursor: default;
background-color: #f4f4f4;
color: #bbb;
}
.leaflet-touch .leaflet-bar a {
width: 30px;
height: 30px;
line-height: 30px;
}
/* zoom control */
.leaflet-control-zoom-in,
.leaflet-control-zoom-out {
font: bold 18px 'Lucida Console', Monaco, monospace;
text-indent: 1px;
}
.leaflet-control-zoom-out {
font-size: 20px;
}
.leaflet-touch .leaflet-control-zoom-in {
font-size: 22px;
}
.leaflet-touch .leaflet-control-zoom-out {
font-size: 24px;
}
/* layers control */
.leaflet-control-layers {
box-shadow: 0 1px 5px rgba(0,0,0,0.4);
background: #fff;
border-radius: 5px;
}
.leaflet-control-layers-toggle {
background-image: url(images/layers.png);
width: 36px;
height: 36px;
}
.leaflet-retina .leaflet-control-layers-toggle {
background-image: url(images/layers-2x.png);
background-size: 26px 26px;
}
.leaflet-touch .leaflet-control-layers-toggle {
width: 44px;
height: 44px;
}
.leaflet-control-layers .leaflet-control-layers-list,
.leaflet-control-layers-expanded .leaflet-control-layers-toggle {
display: none;
}
.leaflet-control-layers-expanded .leaflet-control-layers-list {
display: block;
position: relative;
}
.leaflet-control-layers-expanded {
padding: 6px 10px 6px 6px;
color: #333;
background: #fff;
}
.leaflet-control-layers-selector {
margin-top: 2px;
position: relative;
top: 1px;
}
.leaflet-control-layers label {
display: block;
}
.leaflet-control-layers-separator {
height: 0;
border-top: 1px solid #ddd;
margin: 5px -10px 5px -6px;
}
/* attribution and scale controls */
.leaflet-container .leaflet-control-attribution {
background: #fff;
background: rgba(255, 255, 255, 0.7);
margin: 0;
}
.leaflet-control-attribution,
.leaflet-control-scale-line {
padding: 0 5px;
color: #333;
}
.leaflet-control-attribution a {
text-decoration: none;
}
.leaflet-control-attribution a:hover {
text-decoration: underline;
}
.leaflet-container .leaflet-control-attribution,
.leaflet-container .leaflet-control-scale {
font-size: 11px;
}
.leaflet-left .leaflet-control-scale {
margin-left: 5px;
}
.leaflet-bottom .leaflet-control-scale {
margin-bottom: 5px;
}
.leaflet-control-scale-line {
border: 2px solid #777;
border-top: none;
line-height: 1.1;
padding: 2px 5px 1px;
font-size: 11px;
white-space: nowrap;
overflow: hidden;
-moz-box-sizing: content-box;
box-sizing: content-box;
background: #fff;
background: rgba(255, 255, 255, 0.5);
}
.leaflet-control-scale-line:not(:first-child) {
border-top: 2px solid #777;
border-bottom: none;
margin-top: -2px;
}
.leaflet-control-scale-line:not(:first-child):not(:last-child) {
border-bottom: 2px solid #777;
}
.leaflet-touch .leaflet-control-attribution,
.leaflet-touch .leaflet-control-layers,
.leaflet-touch .leaflet-bar {
box-shadow: none;
}
.leaflet-touch .leaflet-control-layers,
.leaflet-touch .leaflet-bar {
border: 2px solid rgba(0,0,0,0.2);
background-clip: padding-box;
}
/* popup */
.leaflet-popup {
position: absolute;
text-align: center;
}
.leaflet-popup-content-wrapper {
padding: 1px;
text-align: left;
border-radius: 12px;
}
.leaflet-popup-content {
margin: 13px 19px;
line-height: 1.4;
}
.leaflet-popup-content p {
margin: 18px 0;
}
.leaflet-popup-tip-container {
margin: 0 auto;
width: 40px;
height: 20px;
position: relative;
overflow: hidden;
}
.leaflet-popup-tip {
width: 17px;
height: 17px;
padding: 1px;
margin: -10px auto 0;
-webkit-transform: rotate(45deg);
-moz-transform: rotate(45deg);
-ms-transform: rotate(45deg);
-o-transform: rotate(45deg);
transform: rotate(45deg);
}
.leaflet-popup-content-wrapper,
.leaflet-popup-tip {
background: white;
box-shadow: 0 3px 14px rgba(0,0,0,0.4);
}
.leaflet-container a.leaflet-popup-close-button {
position: absolute;
top: 0;
right: 0;
padding: 4px 4px 0 0;
text-align: center;
width: 18px;
height: 14px;
font: 16px/14px Tahoma, Verdana, sans-serif;
color: #c3c3c3;
text-decoration: none;
font-weight: bold;
background: transparent;
}
.leaflet-container a.leaflet-popup-close-button:hover {
color: #999;
}
.leaflet-popup-scrolled {
overflow: auto;
border-bottom: 1px solid #ddd;
border-top: 1px solid #ddd;
}
.leaflet-oldie .leaflet-popup-content-wrapper {
zoom: 1;
}
.leaflet-oldie .leaflet-popup-tip {
width: 24px;
margin: 0 auto;
-ms-filter: "progid:DXImageTransform.Microsoft.Matrix(M11=0.70710678, M12=0.70710678, M21=-0.70710678, M22=0.70710678)";
filter: progid:DXImageTransform.Microsoft.Matrix(M11=0.70710678, M12=0.70710678, M21=-0.70710678, M22=0.70710678);
}
.leaflet-oldie .leaflet-popup-tip-container {
margin-top: -1px;
}
.leaflet-oldie .leaflet-control-zoom,
.leaflet-oldie .leaflet-control-layers,
.leaflet-oldie .leaflet-popup-content-wrapper,
.leaflet-oldie .leaflet-popup-tip {
border: 1px solid #999;
}
/* div icon */
.leaflet-div-icon {
background: #fff;
border: 1px solid #666;
}

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,152 @@
<!DOCTYPE html>
<html>
<head>
<title>Minetest demo map</title>
<meta charset="utf-8" />
<link rel="stylesheet" href="css/leaflet.css" />
<link rel="stylesheet" href="css/Leaflet.Coordinates-0.1.4.css" />
<link rel="stylesheet" href="css/font-awesome.css" />
<link rel="stylesheet" href="css/leaflet.awesome-markers.css" />
<style type="text/css">
body {
height: 100%;
}
#map {
display: block;
position: absolute;
top: 0;
left: 0;
width: 100%;
height: 100%;
background-color: #111111;
}
.leaflet-container {
cursor: crosshair;
}
.leaflet-control-coordinates,
.leaflet-control-layers {
box-shadow: 0 1px 3px rgba(0,0,0,0.3);
background-color:rgba(255,255,255,.85);
}
.awesome-marker i {
font-size: 18px;
margin-left: -1px;
}
</style>
</head>
<body>
<div id="map"></div>
<script src="js/leaflet.js"></script>
<script src="js/Leaflet.Coordinates-0.1.4.min.js"></script>
<script src="js/easy-button.js"></script>
<script src="js/auto-update.js"></script>
<script type="text/javascript" src="js/leaflet-hash.js"></script>
<script type="text/javascript" src="js/leaflet.ajax.js"></script>
<script type="text/javascript" src="js/leaflet.awesome-markers.js"></script>
<script>
var useWebsocket = true; // Set to true if you want websocket support
L.Projection.NoWrap = {
project: function (latlng) {
return new L.Point(latlng.lat, latlng.lng);
},
unproject: function (point, unbounded) {
return new L.LatLng(point.x, point.y, true);
}
};
L.CRS.Direct = L.Util.extend({}, L.CRS, {
code: 'Direct',
projection: L.Projection.NoWrap,
transformation: new L.Transformation(1.0/65536, 30928.0/65536, -1.0/65536, 34608.0/65536)
});
var world = new L.tileLayer('map/{z}/{x}/{y}.png', {
minZoom: 0,
maxZoom: 16,
attribution: 'Demo world',
continuousWorld: false,
noWrap: true,
tms: true,
unloadInvisibleTiles: true
});
var players = L.geoJson.ajax('/players', {
pointToLayer: function(feature, latlng) {
return L.marker(latlng, {
icon: L.AwesomeMarkers.icon({
icon: 'male',
iconColor: 'black',
prefix: 'fa',
markerColor: 'orange'
}),
title: feature.properties.name
})
}
});
var rasterMaps = {
"A demo world": world,
};
var latest = world
var overlayMaps = {'Players': players};
var map = L.map('map', {
center: [0,0],
zoom: 3,
layers: [latest],
worldCopyJump: false,
crs: L.CRS.Direct});
L.control.coordinates({
position:"topright", //optional default "bootomright"
decimals:0, //optional default 4
decimalSeperator:".", //optional default "."
labelTemplateLat:"X: {y}", //optional default "Lat: {y}"
labelTemplateLng:"Y: {x}", //optional default "Lng: {x}"
enableUserInput:false, //optional default true
useDMS:false, //optional default false
useLatLngOrder: true //ordering of labels, default false-> lng-lat
}).addTo(map);
var manualUpdateControl;
if (useWebsocket && 'WebSocket' in window) {
L.autoUpdate('autoUpdate', function(pressed) {
var styleDec = manualUpdateControl.getContainer().style;
styleDec.visibility = pressed ? 'hidden' : 'visible';
},
players);
}
var layersControl = new L.Control.Layers(rasterMaps, overlayMaps, {collapsed: false});
map.addControl(layersControl);
manualUpdateControl = L.easyButton('fa-refresh',
function (){
var tiles = document.getElementsByTagName("img");
for (var i = 0; i < tiles.length; i++) {
var img = tiles[i];
var cl = img.getAttribute("class");
if (cl.indexOf("leaflet-tile-loaded") >= 0) {
var src = img.src;
var idx = src.lastIndexOf("#");
if (idx >= 0) {
src = src.substring(0, idx);
}
img.src = src + "#" + Math.random();
}
}
//map._resetView(map.getCenter(), map.getZoom(), false);
players.refresh("/players");
},
'Update view'
);
var hash = new L.Hash(map)
</script>
</body>
</html>

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,187 @@
L.Control.AutoUpdate = L.Control.extend({
options: {
position: 'topleft',
label: 'Automatic update',
layer: undefined
},
pressed: true,
onAdd: function() {
var container = L.DomUtil.create('div', 'leaflet-bar leaflet-control');
this.link = L.DomUtil.create('a', 'leaflet-bar-part', container);
this.iconStart = L.DomUtil.create('i', 'fa fa-play', this.link);
this.link.href = '#';
L.DomEvent.on(this.link, 'click', this.cbClick, this);
return container;
},
switchButtons: function() {
if (this.pressed) {
this.pressed = false;
this.iconStart.setAttribute('class', 'fa fa-pause');
this.autoUpdate();
} else {
this.pressed = true;
this.iconStart.setAttribute('class', 'fa fa-play');
this.stopUpdate();
}
},
cbClick: function (e) {
L.DomEvent.stopPropagation(e);
this.intendedFunction(this.pressed);
this.switchButtons();
},
intendedFunction: function() {
alert('no function selected');
},
stopUpdate: function() {
if (this.socket) {
var s = this.socket;
this.socket = null;
s.close();
}
},
autoUpdate: function() {
var me = this;
this.socket = new WebSocket('ws://' + window.location.host + '/ws');
this.socket.onerror = function(evt) {
me.stopUpdate();
me.switchButtons();
};
this.socket.onclose = function(evt) {
this.socket = null;
}
this.socket.onopen = function(evt) {
// Sending pings every 5 secs to keep connection alive.
var heartbeat = function() {
if (heartbeat && me.socket) {
me.socket.send("PING");
setTimeout(heartbeat, 8000);
} else {
// Prevent sending pings to re-opened sockets.
heartbeat = null;
}
};
setTimeout(heartbeat, 8000);
};
this.socket.onmessage = function(evt) {
var json = evt.data;
if (!(typeof json === "string")) {
return;
}
var msg;
try {
msg = JSON.parse(json);
}
catch (err) {
return;
}
if (msg.players) {
me.options.layer.clearLayers();
me.options.layer.addData(msg.players);
}
var tilesData = msg.tiles;
if (!tilesData) {
return;
}
var invalidate = function(td) {
var pyramid = new Array(9);
var last = new Object();
pyramid[8] = last;
for (var i = 0; i < td.length; i++) {
var xz = td[i];
last[xz.X + "#" + xz.Z] = xz;
}
for (var p = 7; p >= 0; p--) {
var prev = pyramid[p+1];
var curr = new Object();
pyramid[p] = curr;
for (var k in prev) {
if (prev.hasOwnProperty(k)) {
var oxz = prev[k];
var nxz = { X: oxz.X >> 1, Z: oxz.Z >> 1 };
curr[nxz.X + "#" + nxz.Z] = nxz;
}
}
}
return function(x, y, z) {
if (y > 8) {
x >>= y - 8;
z >>= y - 8;
y = 8;
}
var level = pyramid[y];
var k = x + "#" + z;
return level.hasOwnProperty(k);
};
} (tilesData);
var tiles = document.getElementsByTagName('img');
var re = /\/map\/([0-9]+)\/([0-9]+)\/([0-9]+).*/;
for (var i = 0; i < tiles.length; i++) {
var img = tiles[i];
var cl = img.getAttribute('class');
if (cl.indexOf('leaflet-tile-loaded') < 0) {
continue;
}
var src = img.src;
var coord = src.match(re);
if (coord == null) {
continue;
}
var y = parseInt(coord[1]);
var x = parseInt(coord[2]);
var z = parseInt(coord[3]);
if (invalidate(x, y, z)) {
var idx = src.lastIndexOf('#');
if (idx >= 0) {
src = src.substring(0, idx);
}
img.src = src + '#' + Math.random();
}
}
};
}
});
L.autoUpdate = function(cbLabel, cbFunc, layer, cbMap) {
var control = new L.Control.AutoUpdate();
if (cbLabel) {
control.options.label = cbLabel;
}
if (cbFunc) {
control.intendedFunction = cbFunc;
}
if (layer) {
control.options.layer = layer;
}
if (cbMap === '') {
return control;
}
else if (cbMap) {
cbMap.addControl(control);
}
else {
map.addControl(control);
}
return control;
};

View File

@ -0,0 +1,48 @@
L.Control.EasyButtons = L.Control.extend({
options: {
position: 'topleft',
title: '',
intentedIcon: 'fa-circle-o'
},
onAdd: function () {
var container = L.DomUtil.create('div', 'leaflet-bar leaflet-control');
this.link = L.DomUtil.create('a', 'leaflet-bar-part', container);
L.DomUtil.create('i', 'fa fa-lg ' + this.options.intentedIcon , this.link);
this.link.href = '#';
L.DomEvent.on(this.link, 'click', this._click, this);
this.link.title = this.options.title;
return container;
},
intendedFunction: function(){ alert('no function selected');},
_click: function (e) {
L.DomEvent.stopPropagation(e);
L.DomEvent.preventDefault(e);
this.intendedFunction();
},
});
L.easyButton = function( btnIcon , btnFunction , btnTitle , btnMap ) {
var newControl = new L.Control.EasyButtons;
if (btnIcon) newControl.options.intentedIcon = btnIcon;
if ( typeof btnFunction === 'function'){
newControl.intendedFunction = btnFunction;
}
if (btnTitle) newControl.options.title = btnTitle;
if ( btnMap == '' ){
// skip auto addition
} else if ( btnMap ) {
btnMap.addControl(newControl);
} else {
map.addControl(newControl);
}
return newControl;
};

View File

@ -0,0 +1,162 @@
(function(window) {
var HAS_HASHCHANGE = (function() {
var doc_mode = window.documentMode;
return ('onhashchange' in window) &&
(doc_mode === undefined || doc_mode > 7);
})();
L.Hash = function(map) {
this.onHashChange = L.Util.bind(this.onHashChange, this);
if (map) {
this.init(map);
}
};
L.Hash.parseHash = function(hash) {
if(hash.indexOf('#') === 0) {
hash = hash.substr(1);
}
var args = hash.split("/");
if (args.length == 3) {
var zoom = parseInt(args[0], 10),
lat = parseFloat(args[1]),
lon = parseFloat(args[2]);
if (isNaN(zoom) || isNaN(lat) || isNaN(lon)) {
return false;
} else {
return {
center: new L.LatLng(lat, lon),
zoom: zoom
};
}
} else {
return false;
}
};
L.Hash.formatHash = function(map) {
var center = map.getCenter(),
zoom = map.getZoom(),
precision = Math.max(0, Math.ceil(Math.log(zoom) / Math.LN2));
return "#" + [zoom,
center.lat.toFixed(precision),
center.lng.toFixed(precision)
].join("/");
},
L.Hash.prototype = {
map: null,
lastHash: null,
parseHash: L.Hash.parseHash,
formatHash: L.Hash.formatHash,
init: function(map) {
this.map = map;
// reset the hash
this.lastHash = null;
this.onHashChange();
if (!this.isListening) {
this.startListening();
}
},
removeFrom: function(map) {
if (this.changeTimeout) {
clearTimeout(this.changeTimeout);
}
if (this.isListening) {
this.stopListening();
}
this.map = null;
},
onMapMove: function() {
// bail if we're moving the map (updating from a hash),
// or if the map is not yet loaded
if (this.movingMap || !this.map._loaded) {
return false;
}
var hash = this.formatHash(this.map);
if (this.lastHash != hash) {
location.replace(hash);
this.lastHash = hash;
}
},
movingMap: false,
update: function() {
var hash = location.hash;
if (hash === this.lastHash) {
return;
}
var parsed = this.parseHash(hash);
if (parsed) {
this.movingMap = true;
this.map.setView(parsed.center, parsed.zoom);
this.movingMap = false;
} else {
this.onMapMove(this.map);
}
},
// defer hash change updates every 100ms
changeDefer: 100,
changeTimeout: null,
onHashChange: function() {
// throttle calls to update() so that they only happen every
// `changeDefer` ms
if (!this.changeTimeout) {
var that = this;
this.changeTimeout = setTimeout(function() {
that.update();
that.changeTimeout = null;
}, this.changeDefer);
}
},
isListening: false,
hashChangeInterval: null,
startListening: function() {
this.map.on("moveend", this.onMapMove, this);
if (HAS_HASHCHANGE) {
L.DomEvent.addListener(window, "hashchange", this.onHashChange);
} else {
clearInterval(this.hashChangeInterval);
this.hashChangeInterval = setInterval(this.onHashChange, 50);
}
this.isListening = true;
},
stopListening: function() {
this.map.off("moveend", this.onMapMove, this);
if (HAS_HASHCHANGE) {
L.DomEvent.removeListener(window, "hashchange", this.onHashChange);
} else {
clearInterval(this.hashChangeInterval);
}
this.isListening = false;
}
};
L.hash = function(map) {
return new L.Hash(map);
};
L.Map.prototype.addHash = function() {
this._hash = L.hash(this);
};
L.Map.prototype.removeHash = function() {
this._hash.removeFrom();
};
})(window);

View File

@ -0,0 +1,740 @@
;(function(){
/**
* Require the given path.
*
* @param {String} path
* @return {Object} exports
* @api public
*/
function require(path, parent, orig) {
var resolved = require.resolve(path);
// lookup failed
if (null == resolved) {
orig = orig || path;
parent = parent || 'root';
var err = new Error('Failed to require "' + orig + '" from "' + parent + '"');
err.path = orig;
err.parent = parent;
err.require = true;
throw err;
}
var module = require.modules[resolved];
// perform real require()
// by invoking the module's
// registered function
if (!module.exports) {
module.exports = {};
module.client = module.component = true;
module.call(this, module.exports, require.relative(resolved), module);
}
return module.exports;
}
/**
* Registered modules.
*/
require.modules = {};
/**
* Registered aliases.
*/
require.aliases = {};
/**
* Resolve `path`.
*
* Lookup:
*
* - PATH/index.js
* - PATH.js
* - PATH
*
* @param {String} path
* @return {String} path or null
* @api private
*/
require.resolve = function(path) {
if (path.charAt(0) === '/') path = path.slice(1);
var paths = [
path,
path + '.js',
path + '.json',
path + '/index.js',
path + '/index.json'
];
for (var i = 0; i < paths.length; i++) {
var path = paths[i];
if (require.modules.hasOwnProperty(path)) return path;
if (require.aliases.hasOwnProperty(path)) return require.aliases[path];
}
};
/**
* Normalize `path` relative to the current path.
*
* @param {String} curr
* @param {String} path
* @return {String}
* @api private
*/
require.normalize = function(curr, path) {
var segs = [];
if ('.' != path.charAt(0)) return path;
curr = curr.split('/');
path = path.split('/');
for (var i = 0; i < path.length; ++i) {
if ('..' == path[i]) {
curr.pop();
} else if ('.' != path[i] && '' != path[i]) {
segs.push(path[i]);
}
}
return curr.concat(segs).join('/');
};
/**
* Register module at `path` with callback `definition`.
*
* @param {String} path
* @param {Function} definition
* @api private
*/
require.register = function(path, definition) {
require.modules[path] = definition;
};
/**
* Alias a module definition.
*
* @param {String} from
* @param {String} to
* @api private
*/
require.alias = function(from, to) {
if (!require.modules.hasOwnProperty(from)) {
throw new Error('Failed to alias "' + from + '", it does not exist');
}
require.aliases[to] = from;
};
/**
* Return a require function relative to the `parent` path.
*
* @param {String} parent
* @return {Function}
* @api private
*/
require.relative = function(parent) {
var p = require.normalize(parent, '..');
/**
* lastIndexOf helper.
*/
function lastIndexOf(arr, obj) {
var i = arr.length;
while (i--) {
if (arr[i] === obj) return i;
}
return -1;
}
/**
* The relative require() itself.
*/
function localRequire(path) {
var resolved = localRequire.resolve(path);
return require(resolved, parent, path);
}
/**
* Resolve relative to the parent.
*/
localRequire.resolve = function(path) {
var c = path.charAt(0);
if ('/' == c) return path.slice(1);
if ('.' == c) return require.normalize(p, path);
// resolve deps by returning
// the dep in the nearest "deps"
// directory
var segs = parent.split('/');
var i = lastIndexOf(segs, 'deps') + 1;
if (!i) i = 0;
path = segs.slice(0, i + 1).join('/') + '/deps/' + path;
return path;
};
/**
* Check if module is defined at `path`.
*/
localRequire.exists = function(path) {
return require.modules.hasOwnProperty(localRequire.resolve(path));
};
return localRequire;
};
require.register("calvinmetcalf-setImmediate/lib/index.js", function(exports, require, module){
"use strict";
var types = [
require("./nextTick"),
require("./mutation"),
require("./postMessage"),
require("./messageChannel"),
require("./stateChange"),
require("./timeout")
];
var handlerQueue = [];
function drainQueue() {
var i = 0,
task,
innerQueue = handlerQueue;
handlerQueue = [];
/*jslint boss: true */
while (task = innerQueue[i++]) {
task();
}
}
var nextTick;
types.some(function (obj) {
var t = obj.test();
if (t) {
nextTick = obj.install(drainQueue);
}
return t;
});
var retFunc = function (task) {
var len, args;
if (arguments.length > 1 && typeof task === "function") {
args = Array.prototype.slice.call(arguments, 1);
args.unshift(undefined);
task = task.bind.apply(task, args);
}
if ((len = handlerQueue.push(task)) === 1) {
nextTick(drainQueue);
}
return len;
};
retFunc.clear = function (n) {
if (n <= handlerQueue.length) {
handlerQueue[n - 1] = function () {};
}
return this;
};
module.exports = retFunc;
});
require.register("calvinmetcalf-setImmediate/lib/nextTick.js", function(exports, require, module){
"use strict";
exports.test = function () {
// Don't get fooled by e.g. browserify environments.
return typeof process === "object" && Object.prototype.toString.call(process) === "[object process]";
};
exports.install = function () {
return process.nextTick;
};
});
require.register("calvinmetcalf-setImmediate/lib/postMessage.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
// The test against `importScripts` prevents this implementation from being installed inside a web worker,
// where `global.postMessage` means something completely different and can"t be used for this purpose.
if (!globe.postMessage || globe.importScripts) {
return false;
}
var postMessageIsAsynchronous = true;
var oldOnMessage = globe.onmessage;
globe.onmessage = function () {
postMessageIsAsynchronous = false;
};
globe.postMessage("", "*");
globe.onmessage = oldOnMessage;
return postMessageIsAsynchronous;
};
exports.install = function (func) {
var codeWord = "com.calvinmetcalf.setImmediate" + Math.random();
function globalMessage(event) {
if (event.source === globe && event.data === codeWord) {
func();
}
}
if (globe.addEventListener) {
globe.addEventListener("message", globalMessage, false);
} else {
globe.attachEvent("onmessage", globalMessage);
}
return function () {
globe.postMessage(codeWord, "*");
};
};
});
require.register("calvinmetcalf-setImmediate/lib/messageChannel.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
return !!globe.MessageChannel;
};
exports.install = function (func) {
var channel = new globe.MessageChannel();
channel.port1.onmessage = func;
return function () {
channel.port2.postMessage(0);
};
};
});
require.register("calvinmetcalf-setImmediate/lib/stateChange.js", function(exports, require, module){
"use strict";
var globe = require("./global");
exports.test = function () {
return "document" in globe && "onreadystatechange" in globe.document.createElement("script");
};
exports.install = function (handle) {
return function () {
// Create a <script> element; its readystatechange event will be fired asynchronously once it is inserted
// into the document. Do so, thus queuing up the task. Remember to clean up once it's been called.
var scriptEl = globe.document.createElement("script");
scriptEl.onreadystatechange = function () {
handle();
scriptEl.onreadystatechange = null;
scriptEl.parentNode.removeChild(scriptEl);
scriptEl = null;
};
globe.document.documentElement.appendChild(scriptEl);
return handle;
};
};
});
require.register("calvinmetcalf-setImmediate/lib/timeout.js", function(exports, require, module){
"use strict";
exports.test = function () {
return true;
};
exports.install = function (t) {
return function () {
setTimeout(t, 0);
};
};
});
require.register("calvinmetcalf-setImmediate/lib/global.js", function(exports, require, module){
module.exports = typeof global === "object" && global ? global : this;
});
require.register("calvinmetcalf-setImmediate/lib/mutation.js", function(exports, require, module){
"use strict";
//based off rsvp
//https://github.com/tildeio/rsvp.js/blob/master/lib/rsvp/async.js
var globe = require("./global");
var MutationObserver = globe.MutationObserver || globe.WebKitMutationObserver;
exports.test = function () {
return MutationObserver;
};
exports.install = function (handle) {
var observer = new MutationObserver(handle);
var element = globe.document.createElement("div");
observer.observe(element, { attributes: true });
// Chrome Memory Leak: https://bugs.webkit.org/show_bug.cgi?id=93661
globe.addEventListener("unload", function () {
observer.disconnect();
observer = null;
}, false);
return function () {
element.setAttribute("drainQueue", "drainQueue");
};
};
});
require.register("lie/lie.js", function(exports, require, module){
var immediate = require('immediate');
// Creates a deferred: an object with a promise and corresponding resolve/reject methods
function Promise(resolver) {
if (!(this instanceof Promise)) {
return new Promise(resolver);
}
var queue = [];
var resolved = false;
// The `handler` variable points to the function that will
// 1) handle a .then(onFulfilled, onRejected) call
// 2) handle a .resolve or .reject call (if not fulfilled)
// Before 2), `handler` holds a queue of callbacks.
// After 2), `handler` is a simple .then handler.
// We use only one function to save memory and complexity.
// Case 1) handle a .then(onFulfilled, onRejected) call
function pending(onFulfilled, onRejected){
return Promise(function(resolver,rejecter){
queue.push({
resolve: onFulfilled,
reject: onRejected,
resolver:resolver,
rejecter:rejecter
});
});
}
function then(onFulfilled, onRejected) {
return resolved?resolved(onFulfilled, onRejected):pending(onFulfilled, onRejected);
}
// Case 2) handle a .resolve or .reject call
// (`onFulfilled` acts as a sentinel)
// The actual function signature is
// .re[ject|solve](sentinel, success, value)
function resolve(success, value){
var action = success ? 'resolve' : 'reject';
var queued;
var callback;
for (var i = 0, l = queue.length; i < l; i++) {
queued = queue[i];
callback = queued[action];
if (typeof callback === 'function') {
immediate(execute,callback, value, queued.resolver, queued.rejecter);
}else if(success){
queued.resolver(value);
}else{
queued.rejecter(value);
}
}
// Replace this handler with a simple resolved or rejected handler
resolved = createHandler(then, value, success);
}
this.then = then;
function yes(value) {
if (!resolved) {
resolve(true, value);
}
}
function no (reason) {
if (!resolved) {
resolve(false, reason);
}
}
try{
resolver(function(a){
if(a && typeof a.then==='function'){
a.then(yes,no);
}else{
yes(a);
}
},no);
}catch(e){
no(e);
}
}
// Creates a fulfilled or rejected .then function
function createHandler(then, value, success) {
return function(onFulfilled, onRejected) {
var callback = success ? onFulfilled : onRejected;
if (typeof callback !== 'function') {
return Promise(function(resolve,reject){
then(resolve,reject);
});
}
return Promise(function(resolve,reject){
immediate(execute,callback,value,resolve,reject);
});
};
}
// Executes the callback with the specified value,
// resolving or rejecting the deferred
function execute(callback, value, resolve, reject) {
try {
var result = callback(value);
if (result && typeof result.then === 'function') {
result.then(resolve, reject);
}
else {
resolve(result);
}
}
catch (error) {
reject(error);
}
}
module.exports = Promise;
});
require.alias("calvinmetcalf-setImmediate/lib/index.js", "lie/deps/immediate/lib/index.js");
require.alias("calvinmetcalf-setImmediate/lib/nextTick.js", "lie/deps/immediate/lib/nextTick.js");
require.alias("calvinmetcalf-setImmediate/lib/postMessage.js", "lie/deps/immediate/lib/postMessage.js");
require.alias("calvinmetcalf-setImmediate/lib/messageChannel.js", "lie/deps/immediate/lib/messageChannel.js");
require.alias("calvinmetcalf-setImmediate/lib/stateChange.js", "lie/deps/immediate/lib/stateChange.js");
require.alias("calvinmetcalf-setImmediate/lib/timeout.js", "lie/deps/immediate/lib/timeout.js");
require.alias("calvinmetcalf-setImmediate/lib/global.js", "lie/deps/immediate/lib/global.js");
require.alias("calvinmetcalf-setImmediate/lib/mutation.js", "lie/deps/immediate/lib/mutation.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "lie/deps/immediate/index.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "immediate/index.js");
require.alias("calvinmetcalf-setImmediate/lib/index.js", "calvinmetcalf-setImmediate/index.js");
require.alias("lie/lie.js", "lie/index.js");
L.Util.Promise = require("lie");
})();
L.Util.ajax = function(url, options) {
'use strict';
options = options || {};
if (options.jsonp) {
return L.Util.ajax.jsonp(url, options);
}
var request;
var cancel;
var out = L.Util.Promise(function(resolve,reject){
var Ajax;
cancel=reject;
// the following is from JavaScript: The Definitive Guide
if (window.XMLHttpRequest === undefined) {
Ajax = function() {
try {
return new ActiveXObject('Microsoft.XMLHTTP.6.0');
}
catch (e1) {
try {
return new ActiveXObject('Microsoft.XMLHTTP.3.0');
}
catch (e2) {
reject('XMLHttpRequest is not supported');
}
}
};
}
else {
Ajax = window.XMLHttpRequest;
}
var response;
request = new Ajax();
request.open('GET', url);
request.onreadystatechange = function() {
/*jslint evil: true */
if (request.readyState === 4) {
if((request.status < 400&&options.local)|| request.status===200) {
if (window.JSON) {
response = JSON.parse(request.responseText);
} else if (options.evil) {
response = eval('(' + request.responseText + ')');
}
resolve(response);
} else {
if(!request.status){
reject('Attempted cross origin request without CORS enabled');
}else{
reject(request.statusText);
}
}
}
};
request.send();
});
out.then(null,function(reason){
request.abort();
return reason;
});
out.abort = cancel;
return out;
};
L.Util.jsonp = function(url, options) {
options = options || {};
var head = document.getElementsByTagName('head')[0];
var scriptNode = L.DomUtil.create('script', '', head);
var cbName, ourl, cbSuffix, cancel;
var out = L.Util.Promise(function(resolve, reject){
cancel=reject;
var cbParam = options.cbParam || 'callback';
if (options.callbackName) {
cbName = options.callbackName;
}
else {
cbSuffix = '_' + ('' + Math.random()).slice(2);
cbName = 'L.Util.jsonp.cb.' + cbSuffix;
}
scriptNode.type = 'text/javascript';
if (cbSuffix) {
L.Util.jsonp.cb[cbSuffix] = function(data) {
head.removeChild(scriptNode);
delete L.Util.jsonp.cb[cbSuffix];
resolve(data);
};
}
if (url.indexOf('?') === -1) {
ourl = url + '?' + cbParam + '=' + cbName;
}
else {
ourl = url + '&' + cbParam + '=' + cbName;
}
scriptNode.src = ourl;
}).then(null,function(reason){
head.removeChild(scriptNode);
delete L.Util.ajax.cb[cbSuffix];
return reason;
});
out.abort = cancel;
return out;
};
L.Util.jsonp.cb = {};
L.GeoJSON.AJAX = L.GeoJSON.extend({
defaultAJAXparams: {
dataType: 'json',
callbackParam: 'callback',
local:false,
middleware: function(f) {
return f;
}
},
initialize: function(url, options) {
this.urls = [];
if (url) {
if (typeof url === 'string') {
this.urls.push(url);
}
else if (typeof url.pop === 'function') {
this.urls = this.urls.concat(url);
}
else {
options = url;
url = undefined;
}
}
var ajaxParams = L.Util.extend({}, this.defaultAJAXparams);
for (var i in options) {
if (this.defaultAJAXparams.hasOwnProperty(i)) {
ajaxParams[i] = options[i];
}
}
this.ajaxParams = ajaxParams;
this._layers = {};
L.Util.setOptions(this, options);
this.on('data:loaded', function() {
if (this.filter) {
this.refilter(this.filter);
}
}, this);
var self = this;
if (this.urls.length > 0) {
L.Util.Promise(function(yes){
yes();
}).then(function(){
self.addUrl();
});
}
},
clearLayers: function() {
this.urls = [];
L.GeoJSON.prototype.clearLayers.call(this);
return this;
},
addUrl: function(url) {
var self = this;
if (url) {
if (typeof url === 'string') {
self.urls.push(url);
}
else if (typeof url.pop === 'function') {
self.urls = self.urls.concat(url);
}
}
var loading = self.urls.length;
var done = 0;
self.fire('data:loading');
self.urls.forEach(function(url) {
if (self.ajaxParams.dataType.toLowerCase() === 'json') {
L.Util.ajax(url,self.ajaxParams).then(function(d) {
var data = self.ajaxParams.middleware(d);
self.addData(data);
self.fire('data:progress',data);
},function(err){
self.fire('data:progress',{error:err});
});
}
else if (self.ajaxParams.dataType.toLowerCase() === 'jsonp') {
L.Util.jsonp(url,self.ajaxParams).then(function(d) {
var data = self.ajaxParams.middleware(d);
self.addData(data);
self.fire('data:progress',data);
},function(err){
self.fire('data:progress',{error:err});
});
}
});
self.on('data:progress', function() {
if (++done === loading) {
self.fire('data:loaded');
}
});
},
refresh: function(url) {
url = url || this.urls;
this.clearLayers();
this.addUrl(url);
},
refilter: function(func) {
if (typeof func !== 'function') {
this.filter = false;
this.eachLayer(function(a) {
a.setStyle({
stroke: true,
clickable: true
});
});
}
else {
this.filter = func;
this.eachLayer(function(a) {
if (func(a.feature)) {
a.setStyle({
stroke: true,
clickable: true
});
}
else {
a.setStyle({
stroke: false,
clickable: false
});
}
});
}
}
});
L.geoJson.ajax = function(geojson, options) {
return new L.GeoJSON.AJAX(geojson, options);
};

View File

@ -0,0 +1,125 @@
/*
Leaflet.AwesomeMarkers, a plugin that adds colorful iconic markers for Leaflet, based on the Font Awesome icons
(c) 2012-2013, Lennard Voogdt
http://leafletjs.com
https://github.com/lvoogdt
*/
/*global L*/
(function (window, document, undefined) {
"use strict";
/*
* Leaflet.AwesomeMarkers assumes that you have already included the Leaflet library.
*/
L.AwesomeMarkers = {};
L.AwesomeMarkers.version = '2.0.1';
L.AwesomeMarkers.Icon = L.Icon.extend({
options: {
iconSize: [35, 45],
iconAnchor: [17, 42],
popupAnchor: [1, -32],
shadowAnchor: [10, 12],
shadowSize: [36, 16],
className: 'awesome-marker',
prefix: 'glyphicon',
spinClass: 'fa-spin',
extraClasses: '',
icon: 'home',
markerColor: 'blue',
iconColor: 'white'
},
initialize: function (options) {
options = L.Util.setOptions(this, options);
},
createIcon: function () {
var div = document.createElement('div'),
options = this.options;
if (options.icon) {
div.innerHTML = this._createInner();
}
if (options.bgPos) {
div.style.backgroundPosition =
(-options.bgPos.x) + 'px ' + (-options.bgPos.y) + 'px';
}
this._setIconStyles(div, 'icon-' + options.markerColor);
return div;
},
_createInner: function() {
var iconClass, iconSpinClass = "", iconColorClass = "", iconColorStyle = "", options = this.options;
if(options.icon.slice(0,options.prefix.length+1) === options.prefix + "-") {
iconClass = options.icon;
} else {
iconClass = options.prefix + "-" + options.icon;
}
if(options.spin && typeof options.spinClass === "string") {
iconSpinClass = options.spinClass;
}
if(options.iconColor) {
if(options.iconColor === 'white' || options.iconColor === 'black') {
iconColorClass = "icon-" + options.iconColor;
} else {
iconColorStyle = "style='color: " + options.iconColor + "' ";
}
}
return "<i " + iconColorStyle + "class='" + options.extraClasses + " " + options.prefix + " " + iconClass + " " + iconSpinClass + " " + iconColorClass + "'></i>";
},
_setIconStyles: function (img, name) {
var options = this.options,
size = L.point(options[name === 'shadow' ? 'shadowSize' : 'iconSize']),
anchor;
if (name === 'shadow') {
anchor = L.point(options.shadowAnchor || options.iconAnchor);
} else {
anchor = L.point(options.iconAnchor);
}
if (!anchor && size) {
anchor = size.divideBy(2, true);
}
img.className = 'awesome-marker-' + name + ' ' + options.className;
if (anchor) {
img.style.marginLeft = (-anchor.x) + 'px';
img.style.marginTop = (-anchor.y) + 'px';
}
if (size) {
img.style.width = size.x + 'px';
img.style.height = size.y + 'px';
}
},
createShadow: function () {
var div = document.createElement('div');
this._setIconStyles(div, 'shadow');
return div;
}
});
L.AwesomeMarkers.icon = function (options) {
return new L.AwesomeMarkers.Icon(options);
};
}(this, document));

File diff suppressed because one or more lines are too long

Binary file not shown.

After

Width:  |  Height:  |  Size: 157 B