2015-07-26 16:33:29 +02:00
|
|
|
// Copyright 2014, 2015 by Sascha L. Teichmann
|
2014-08-03 15:59:56 +02:00
|
|
|
// Use of this source code is governed by the MIT license
|
|
|
|
// that can be found in the LICENSE file.
|
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2014-08-06 01:11:41 +02:00
|
|
|
"log"
|
|
|
|
|
2014-10-03 19:10:40 +02:00
|
|
|
"bitbucket.org/s_l_teichmann/mtsatellite/common"
|
2014-08-22 22:26:03 +02:00
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
leveldb "github.com/jmhodges/levigo"
|
|
|
|
)
|
|
|
|
|
|
|
|
type LevelDBBackend struct {
|
2015-07-24 08:13:24 +02:00
|
|
|
cache *leveldb.Cache
|
|
|
|
db *leveldb.DB
|
|
|
|
interleaved bool
|
|
|
|
coverage *common.Coverage3D
|
|
|
|
encoder common.KeyTranscoder
|
|
|
|
decoder common.KeyTranscoder
|
|
|
|
|
2016-04-24 19:41:15 +02:00
|
|
|
changeTracker *changeTracker
|
2014-08-06 01:11:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type LevelDBSession struct {
|
|
|
|
backend *LevelDBBackend
|
|
|
|
tx *leveldb.WriteBatch
|
2014-08-03 11:25:25 +02:00
|
|
|
}
|
|
|
|
|
2014-08-22 22:26:03 +02:00
|
|
|
func NewLeveDBBackend(
|
|
|
|
path string,
|
2016-04-24 19:41:15 +02:00
|
|
|
changeTracker *changeTracker,
|
2014-08-23 19:00:43 +02:00
|
|
|
interleaved bool,
|
2014-08-22 22:26:03 +02:00
|
|
|
cacheSize int) (ldb *LevelDBBackend, err error) {
|
2014-08-23 19:00:43 +02:00
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
opts := leveldb.NewOptions()
|
2014-08-08 10:05:03 +02:00
|
|
|
|
|
|
|
var cache *leveldb.Cache
|
|
|
|
if cacheSize > 0 {
|
|
|
|
cache = leveldb.NewLRUCache(cacheSize * 1024 * 1024)
|
|
|
|
opts.SetCache(cache)
|
|
|
|
}
|
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
opts.SetCreateIfMissing(true)
|
2014-08-08 10:05:03 +02:00
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
var db *leveldb.DB
|
|
|
|
if db, err = leveldb.Open(path, opts); err != nil {
|
2014-08-08 10:05:03 +02:00
|
|
|
if cache != nil {
|
|
|
|
cache.Close()
|
|
|
|
}
|
2014-08-03 11:25:25 +02:00
|
|
|
return
|
|
|
|
}
|
2014-08-23 19:00:43 +02:00
|
|
|
var (
|
|
|
|
encoder common.KeyTranscoder
|
|
|
|
decoder common.KeyTranscoder
|
|
|
|
)
|
|
|
|
|
|
|
|
if interleaved {
|
|
|
|
encoder = common.TranscodeInterleavedToPlain
|
|
|
|
decoder = common.TranscodePlainToInterleaved
|
|
|
|
} else {
|
|
|
|
encoder = common.IdentityTranscoder
|
|
|
|
decoder = common.IdentityTranscoder
|
|
|
|
}
|
|
|
|
|
2014-08-03 11:25:25 +02:00
|
|
|
ldb = &LevelDBBackend{
|
2014-09-17 09:51:34 +02:00
|
|
|
cache: cache,
|
|
|
|
db: db,
|
|
|
|
interleaved: interleaved,
|
|
|
|
encoder: encoder,
|
|
|
|
decoder: decoder,
|
2017-03-20 12:41:24 +01:00
|
|
|
changeTracker: changeTracker,
|
2017-03-21 08:40:44 +01:00
|
|
|
}
|
2015-07-24 08:13:24 +02:00
|
|
|
|
|
|
|
if !interleaved {
|
|
|
|
if err = ldb.buildCoverage(); err != nil {
|
|
|
|
ldb.Shutdown()
|
|
|
|
ldb = nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2014-08-03 11:25:25 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-07-24 08:13:24 +02:00
|
|
|
func (ldb *LevelDBBackend) buildCoverage() error {
|
|
|
|
log.Println("INFO: Start building coverage index (this may take some time)...")
|
|
|
|
|
|
|
|
coverage := common.NewCoverage3D()
|
|
|
|
|
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
defer ro.Close()
|
|
|
|
ro.SetFillCache(false)
|
|
|
|
it := ldb.db.NewIterator(ro)
|
|
|
|
it.SeekToFirst()
|
|
|
|
for ; it.Valid(); it.Next() {
|
|
|
|
c, err := common.DecodeStringBytesToCoord(it.Key())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
coverage.Insert(c)
|
|
|
|
}
|
|
|
|
if err := it.GetError(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ldb.coverage = coverage
|
|
|
|
log.Println("INFO: Finished building coverage index.")
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldb *LevelDBBackend) NewSession() (Session, error) {
|
|
|
|
return &LevelDBSession{ldb, nil}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ldbs *LevelDBSession) Close() error {
|
|
|
|
if ldbs.tx != nil {
|
|
|
|
ldbs.tx.Close()
|
2014-08-04 15:29:26 +02:00
|
|
|
}
|
2014-08-06 01:11:41 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (ldb *LevelDBBackend) Shutdown() error {
|
2014-08-03 11:25:25 +02:00
|
|
|
ldb.db.Close()
|
2014-08-08 10:05:03 +02:00
|
|
|
if ldb.cache != nil {
|
|
|
|
ldb.cache.Close()
|
|
|
|
}
|
2014-08-03 11:25:25 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2016-07-26 16:15:10 +02:00
|
|
|
func (ldbs *LevelDBSession) Del(hash, key []byte) (success bool, err error) {
|
|
|
|
if key, err = ldbs.backend.decoder(key); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-03-21 08:40:44 +01:00
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
defer ro.Close()
|
|
|
|
var data []byte
|
|
|
|
data, err = ldbs.backend.db.Get(ro, key)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if data == nil {
|
|
|
|
success = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
success = true
|
|
|
|
wo := leveldb.NewWriteOptions()
|
|
|
|
defer wo.Close()
|
|
|
|
err = ldbs.backend.db.Delete(wo, key)
|
2016-07-26 16:15:10 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldbs *LevelDBSession) Fetch(hash, key []byte) (value []byte, err error) {
|
2014-08-22 22:26:03 +02:00
|
|
|
if key, err = ldbs.backend.decoder(key); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-03-21 08:40:44 +01:00
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
value, err = ldbs.backend.db.Get(ro, key)
|
|
|
|
//if err != nil {
|
|
|
|
// log.Printf("Fetch key '%s' failed.\n", key)
|
|
|
|
//} else {
|
|
|
|
// log.Printf("Fetch key = '%s' len(value) = %d\n", key, len(value))
|
|
|
|
//}
|
|
|
|
ro.Close()
|
2014-08-04 22:23:16 +02:00
|
|
|
return
|
2014-08-03 11:25:25 +02:00
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldbs *LevelDBSession) InTransaction() bool {
|
|
|
|
return ldbs.tx != nil
|
2014-08-03 11:25:25 +02:00
|
|
|
}
|
|
|
|
|
2014-08-07 15:16:12 +02:00
|
|
|
func keyExists(db *leveldb.DB, key []byte) (exists bool, err error) {
|
2014-08-03 11:25:25 +02:00
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
defer ro.Close()
|
|
|
|
var data []byte
|
2014-08-07 15:16:12 +02:00
|
|
|
if data, err = db.Get(ro, key); err != nil {
|
2014-08-03 11:25:25 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
exists = data != nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldbs *LevelDBSession) Store(hash, key, value []byte) (exists bool, err error) {
|
2014-09-17 09:51:34 +02:00
|
|
|
origKey := key
|
2014-08-22 22:26:03 +02:00
|
|
|
if key, err = ldbs.backend.decoder(key); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-03-21 08:40:44 +01:00
|
|
|
if exists, err = keyExists(ldbs.backend.db, key); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if ldbs.tx != nil {
|
|
|
|
ldbs.tx.Put(key, value)
|
2017-03-21 16:05:59 +01:00
|
|
|
} else {
|
|
|
|
wo := leveldb.NewWriteOptions()
|
|
|
|
err = ldbs.backend.db.Put(wo, key, value)
|
|
|
|
wo.Close()
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2015-07-25 22:18:16 +02:00
|
|
|
}
|
|
|
|
// This technically too early because this is done in a transactions
|
2014-09-17 09:51:34 +02:00
|
|
|
// which are commited (and possible fail) later.
|
2015-07-25 22:18:16 +02:00
|
|
|
if ldbs.backend.changeTracker != nil || ldbs.backend.coverage != nil {
|
2015-07-24 08:13:24 +02:00
|
|
|
c, err := common.DecodeStringBytesToCoord(origKey)
|
|
|
|
if err != nil {
|
2015-07-25 22:18:16 +02:00
|
|
|
return exists, err
|
|
|
|
}
|
|
|
|
if ldbs.backend.coverage != nil && !exists {
|
|
|
|
ldbs.backend.coverage.Insert(c)
|
|
|
|
}
|
|
|
|
if ldbs.backend.changeTracker != nil {
|
|
|
|
ldbs.backend.changeTracker.BlockChanged(c)
|
2015-07-24 08:13:24 +02:00
|
|
|
}
|
2014-09-17 09:51:34 +02:00
|
|
|
}
|
2014-08-03 11:25:25 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldbs *LevelDBSession) BeginTransaction() error {
|
|
|
|
ldbs.tx = leveldb.NewWriteBatch()
|
2014-08-03 11:25:25 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-08-06 01:11:41 +02:00
|
|
|
func (ldbs *LevelDBSession) CommitTransaction() (err error) {
|
|
|
|
tx := ldbs.tx
|
|
|
|
if tx == nil {
|
|
|
|
log.Println("WARN: No transaction running.")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ldbs.tx = nil
|
2017-03-21 08:40:44 +01:00
|
|
|
wo := leveldb.NewWriteOptions()
|
|
|
|
wo.SetSync(true)
|
|
|
|
err = ldbs.backend.db.Write(wo, tx)
|
|
|
|
wo.Close()
|
|
|
|
tx.Close()
|
2014-08-04 15:29:26 +02:00
|
|
|
return
|
2014-08-03 11:25:25 +02:00
|
|
|
}
|
2014-08-11 14:56:01 +02:00
|
|
|
|
2015-07-24 10:01:39 +02:00
|
|
|
func (ldbs *LevelDBSession) AllKeys(
|
|
|
|
hash []byte,
|
2016-04-24 20:47:55 +02:00
|
|
|
done <-chan struct{}) (<-chan []byte, int, error) {
|
2015-07-24 10:01:39 +02:00
|
|
|
|
2014-08-11 14:56:01 +02:00
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
ro.SetFillCache(false)
|
|
|
|
|
|
|
|
it := ldbs.backend.db.NewIterator(ro)
|
2014-08-11 15:13:18 +02:00
|
|
|
it.SeekToFirst()
|
2016-04-24 20:47:55 +02:00
|
|
|
var n int
|
2014-08-11 14:56:01 +02:00
|
|
|
for ; it.Valid(); it.Next() {
|
|
|
|
n++
|
|
|
|
}
|
|
|
|
|
2016-04-24 20:47:55 +02:00
|
|
|
if err := it.GetError(); err != nil {
|
2014-08-11 14:56:01 +02:00
|
|
|
it.Close()
|
2014-08-11 15:13:18 +02:00
|
|
|
ro.Close()
|
2016-04-24 20:47:55 +02:00
|
|
|
return nil, n, err
|
2014-08-11 14:56:01 +02:00
|
|
|
}
|
|
|
|
|
2016-04-24 20:47:55 +02:00
|
|
|
keys := make(chan []byte)
|
2014-08-11 14:56:01 +02:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer ro.Close()
|
|
|
|
defer close(keys)
|
|
|
|
defer it.Close()
|
2014-08-11 15:13:18 +02:00
|
|
|
it.SeekToFirst()
|
2014-08-22 22:26:03 +02:00
|
|
|
encoder := ldbs.backend.encoder
|
2014-08-11 14:56:01 +02:00
|
|
|
for ; it.Valid(); it.Next() {
|
2014-08-22 22:26:03 +02:00
|
|
|
if key, err := encoder(it.Key()); err == nil {
|
2014-08-31 19:21:58 +02:00
|
|
|
select {
|
|
|
|
case keys <- key:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
2014-08-22 22:26:03 +02:00
|
|
|
} else {
|
|
|
|
log.Printf("WARN: %s\n", err)
|
|
|
|
return
|
|
|
|
}
|
2014-08-11 14:56:01 +02:00
|
|
|
}
|
|
|
|
if err := it.GetError(); err != nil {
|
|
|
|
log.Printf("WARN: %s\n", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2016-04-24 20:47:55 +02:00
|
|
|
return keys, n, nil
|
2014-08-11 14:56:01 +02:00
|
|
|
}
|
2014-09-01 00:19:47 +02:00
|
|
|
|
2015-07-24 08:30:52 +02:00
|
|
|
func (ldbs *LevelDBSession) SpatialQuery(
|
|
|
|
hash, first, second []byte,
|
2016-04-24 20:47:55 +02:00
|
|
|
done <-chan struct{}) (<-chan Block, error) {
|
2015-07-24 08:30:52 +02:00
|
|
|
|
2014-09-01 16:12:24 +02:00
|
|
|
if ldbs.backend.interleaved {
|
|
|
|
return ldbs.interleavedSpatialQuery(first, second, done)
|
|
|
|
}
|
|
|
|
return ldbs.plainSpatialQuery(first, second, done)
|
|
|
|
}
|
|
|
|
|
2015-07-24 08:30:52 +02:00
|
|
|
func (ldbs *LevelDBSession) plainSpatialQuery(
|
|
|
|
first, second []byte,
|
2016-04-24 20:47:55 +02:00
|
|
|
done <-chan struct{}) (<-chan Block, error) {
|
2014-09-01 16:12:24 +02:00
|
|
|
|
|
|
|
var (
|
|
|
|
firstKey int64
|
|
|
|
secondKey int64
|
2016-04-24 20:47:55 +02:00
|
|
|
err error
|
2014-09-01 16:12:24 +02:00
|
|
|
)
|
|
|
|
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
|
2016-04-24 20:47:55 +02:00
|
|
|
return nil, err
|
2014-09-01 16:12:24 +02:00
|
|
|
}
|
|
|
|
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
|
2016-04-24 20:47:55 +02:00
|
|
|
return nil, err
|
2014-09-01 16:12:24 +02:00
|
|
|
}
|
|
|
|
c1 := common.PlainToCoord(firstKey)
|
|
|
|
c2 := common.PlainToCoord(secondKey)
|
|
|
|
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
|
|
|
|
|
2016-04-24 20:47:55 +02:00
|
|
|
blocks := make(chan Block)
|
2014-09-01 16:12:24 +02:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(blocks)
|
|
|
|
|
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
defer ro.Close()
|
|
|
|
|
2015-07-24 08:30:52 +02:00
|
|
|
var a, b common.Coord
|
2014-09-01 16:12:24 +02:00
|
|
|
|
2015-07-24 08:30:52 +02:00
|
|
|
for _, r := range ldbs.backend.coverage.Query(c1, c2) {
|
|
|
|
a.Z, b.Z = int16(r.Z), int16(r.Z)
|
|
|
|
a.X, b.X = int16(r.X1), int16(r.X2)
|
|
|
|
for a.Y = r.Y2; a.Y >= r.Y1; a.Y-- {
|
2014-09-01 16:12:24 +02:00
|
|
|
b.Y = a.Y
|
2015-07-24 13:15:05 +02:00
|
|
|
// The keys in the database are stored and ordered as strings
|
|
|
|
// "1", "10", ..., "19", "2", "20", "21" so you cannot use
|
|
|
|
// an iterator and assume it is numerical ordered.
|
|
|
|
// Each block is fetched with a Get instead.
|
|
|
|
for f, t := common.CoordToPlain(a), common.CoordToPlain(b); f <= t; f++ {
|
|
|
|
key := common.StringToBytes(f)
|
|
|
|
value, err := ldbs.backend.db.Get(ro, key)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("get failed: %s\n", err)
|
2014-09-01 16:12:24 +02:00
|
|
|
return
|
|
|
|
}
|
2015-07-24 13:15:05 +02:00
|
|
|
if value != nil {
|
|
|
|
select {
|
|
|
|
case blocks <- Block{Key: key, Data: value}:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
2014-09-01 16:12:24 +02:00
|
|
|
}
|
|
|
|
}
|
2015-07-24 13:15:05 +02:00
|
|
|
|
2014-09-01 16:12:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2016-04-24 20:47:55 +02:00
|
|
|
return blocks, nil
|
2014-09-01 16:12:24 +02:00
|
|
|
}
|
|
|
|
|
2015-07-24 08:30:52 +02:00
|
|
|
func (ldbs *LevelDBSession) interleavedSpatialQuery(
|
|
|
|
first, second []byte,
|
2016-04-24 20:47:55 +02:00
|
|
|
done <-chan struct{}) (<-chan Block, error) {
|
2015-07-24 08:30:52 +02:00
|
|
|
|
2014-09-01 18:04:48 +02:00
|
|
|
var (
|
|
|
|
firstKey int64
|
|
|
|
secondKey int64
|
2016-04-24 20:47:55 +02:00
|
|
|
err error
|
2014-09-01 18:04:48 +02:00
|
|
|
)
|
|
|
|
if firstKey, err = common.DecodeStringFromBytes(first); err != nil {
|
2016-04-24 20:47:55 +02:00
|
|
|
return nil, err
|
2014-09-01 18:04:48 +02:00
|
|
|
}
|
|
|
|
if secondKey, err = common.DecodeStringFromBytes(second); err != nil {
|
2016-04-24 20:47:55 +02:00
|
|
|
return nil, err
|
2014-09-01 18:04:48 +02:00
|
|
|
}
|
2014-09-07 21:36:59 +02:00
|
|
|
c1 := common.ClipCoord(common.PlainToCoord(firstKey))
|
|
|
|
c2 := common.ClipCoord(common.PlainToCoord(secondKey))
|
2014-09-01 18:04:48 +02:00
|
|
|
c1, c2 = common.MinCoord(c1, c2), common.MaxCoord(c1, c2)
|
|
|
|
|
2016-04-24 20:47:55 +02:00
|
|
|
blocks := make(chan Block)
|
2014-09-01 18:04:48 +02:00
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(blocks)
|
|
|
|
|
|
|
|
ro := leveldb.NewReadOptions()
|
|
|
|
defer ro.Close()
|
|
|
|
ro.SetFillCache(false)
|
|
|
|
|
|
|
|
it := ldbs.backend.db.NewIterator(ro)
|
|
|
|
defer it.Close()
|
|
|
|
|
|
|
|
zmin, zmax := common.CoordToInterleaved(c1), common.CoordToInterleaved(c2)
|
|
|
|
// Should not be necessary.
|
2015-07-26 16:40:50 +02:00
|
|
|
zmin, zmax = common.Order64(zmin, zmax)
|
2014-09-01 18:04:48 +02:00
|
|
|
var (
|
|
|
|
cub = common.Cuboid{P1: c1, P2: c2}
|
|
|
|
err error
|
|
|
|
encodedKey []byte
|
|
|
|
)
|
2014-09-01 18:26:33 +02:00
|
|
|
|
2015-07-20 14:19:41 +02:00
|
|
|
//log.Printf("seeking to: %d\n", zmin)
|
2014-09-01 18:26:33 +02:00
|
|
|
it.Seek(common.ToBigEndian(zmin))
|
2014-09-01 18:04:48 +02:00
|
|
|
for it.Valid() {
|
2014-09-01 18:26:33 +02:00
|
|
|
zcode := common.FromBigEndian(it.Key())
|
2014-09-01 18:04:48 +02:00
|
|
|
|
|
|
|
if zcode > zmax {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if c := common.InterleavedToCoord(zcode); cub.Contains(c) {
|
|
|
|
if encodedKey, err = common.EncodeStringToBytes(common.CoordToPlain(c)); err != nil {
|
2015-07-20 14:19:41 +02:00
|
|
|
log.Printf("error encoding key: %s\n", err)
|
2014-09-01 18:04:48 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case blocks <- Block{Key: encodedKey, Data: it.Value()}:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
2014-09-01 18:26:33 +02:00
|
|
|
it.Next()
|
2014-09-01 18:04:48 +02:00
|
|
|
} else {
|
2014-09-07 21:36:59 +02:00
|
|
|
next := common.BigMin(zmin, zmax, zcode)
|
2015-07-20 14:19:41 +02:00
|
|
|
//log.Printf("seeking to: %d\n", next)
|
2014-09-07 21:36:59 +02:00
|
|
|
it.Seek(common.ToBigEndian(next))
|
2015-07-20 14:19:41 +02:00
|
|
|
//log.Printf("seeking done: %d\n", next)
|
2014-09-01 18:04:48 +02:00
|
|
|
}
|
|
|
|
}
|
2014-09-07 21:36:59 +02:00
|
|
|
//log.Println("iterating done")
|
2014-09-01 18:04:48 +02:00
|
|
|
if err = it.GetError(); err != nil {
|
2015-07-20 14:19:41 +02:00
|
|
|
log.Printf("error while iterating: %s\n", err)
|
2014-09-01 18:04:48 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2016-04-24 20:47:55 +02:00
|
|
|
return blocks, nil
|
2014-09-01 00:19:47 +02:00
|
|
|
}
|