Major update

This commit is contained in:
Coder12a 2019-01-08 00:32:34 -06:00
parent 60bc174189
commit 02796f2998
3 changed files with 866 additions and 884 deletions

134
README.md
View File

@ -11,28 +11,28 @@ Copy both *colddb.lua* and *async* files to your minetest mod or game. Copy the
Write this code in your lua file. Write this code in your lua file.
1. create a directory and link it as a database. 1. create a directory and link it as a database.
```lua ```lua
coldbase = colddb.get_db("mydb") coldbase = colddb.Colddb("mydb")
``` ```
2. add an extra folder to the directory. every new file will be added to the global tag(folder). 2. add an extra folder to the directory. every new file will be added to the global tag(folder).
```lua ```lua
colddb.add_global_tag(coldbase,"ips") coldbase.add_global_tag("ips")
``` ```
3. store key item(this key has no value) 3. store key item(this key has no value)
```lua ```lua
colddb.set_key(coldbase,"MyKey") coldbase.set_key("MyKey")
``` ```
4. store key-value item 4. store key-value item
```lua ```lua
colddb.set(coldbase,"MyKeyAndValue","Hello world") coldbase.set("MyKeyAndValue", "Hello world")
``` ```
5. retrieve items (get_key's callback(arg) will return true, false, or nil) 5. retrieve items (get_key's callback(arg) will return true, false, or nil)
```lua ```lua
colddb.get(coldbase,"MyKeyAndValue",nil,function(arg) coldbase.get("MyKeyAndValue", nil, function(arg)
if arg then if arg then
minetest.log(string.format("value:%s", arg)) minetest.log(string.format("value:%s", arg))
end end
end) end)
colddb.get_key(coldbase,"MyKey",nil,function(arg) coldbase.get_key("MyKey", nil, function(arg)
if arg then if arg then
minetest.log("Found key") minetest.log("Found key")
else else
@ -42,7 +42,7 @@ end)
``` ```
6. delete key(file) this function works on both keys and key-value keys. 6. delete key(file) this function works on both keys and key-value keys.
```lua ```lua
colddb.remove(coldbase,"MyKeyAndValue") coldbase.remove("MyKeyAndValue")
``` ```
7. if add_to_mem_pool is true(true by default). keys are stored in a weak lua table(memory) it will be removed by the gc if its not in-use. Storing data in memory is to prevent the database from constantly loading up data from files. 7. if add_to_mem_pool is true(true by default). keys are stored in a weak lua table(memory) it will be removed by the gc if its not in-use. Storing data in memory is to prevent the database from constantly loading up data from files.
```lua ```lua
@ -54,23 +54,23 @@ coldbase.indexes = true
``` ```
9. only if coldbase.indexes is true. returns the amount of keys that are in the indexing file. 9. only if coldbase.indexes is true. returns the amount of keys that are in the indexing file.
```lua ```lua
colddb.get_count(coldbase) coldbase.get_count()
``` ```
10. only if coldbase.indexes is true. iterates through the indexing file(breaks and ends if it reaches the end of the file). 10. only if coldbase.indexes is true. iterates through the indexing file(breaks and ends if it reaches the end of the file).
```lua ```lua
colddb.iterate_index_table(coldbase,nil,func_list_keys,nil) coldbase.iterate_index_table(nil, func_list_keys, nil)
``` ```
11. adds a folder which can be used in other functions that have tag_name arg. 11. adds a folder which can be used in other functions that have tag_name arg.
```lua ```lua
colddb.add_tag(coldbase,"Extra_Folder",{"Extra","Folder"}) coldbase.add_tag("Extra_Folder", {"Extra", "Folder"})
``` ```
12. returns the tag name if the tag does not exists it creates one. 12. returns the tag name if the tag does not exists it creates one.
```lua ```lua
colddb.get_or_add_tag(coldbase,"Extra_Folder",{"Extra","Folder"}) coldbase.get_or_add_tag("Extra_Folder", {"Extra", "Folder"})
``` ```
13. remove tag by name. 13. remove tag by name.
```lua ```lua
colddb.remove_tag(coldbase,"Extra_Folder") coldbase.remove_tag("Extra_Folder")
``` ```
Quick Look Quick Look
@ -78,28 +78,11 @@ Quick Look
```lua ```lua
-- create an directory(watchlist) and link it as a database. -- create an directory(watchlist) and link it as a database.
ip_db = colddb.get_db("watchlist") ip_db = colddb.Colddb("watchlist")
-- add an extra folder to the directory.
colddb.add_global_tag(ip_db,"ips")
-- return a recorded ip address from the data base.
function ip_db.find(player,callback)
colddb.get(ip_db,player,nil,callback)
end
-- Key is the file and file name. Value is the content's within the file.
-- global tag(ips)--->key(Player name)--->value(ip address)
function ip_db.record_ip(player,ip)
colddb.set(ip_db,player,ip)
end
function ip_db.delete(player)
colddb.remove(db,player)
end
-- When ever a player join's his/her ip address is recorded to the database by player name. -- When ever a player join's his/her ip address is recorded to the database by player name.
minetest.register_on_prejoinplayer(function(name, ip) minetest.register_on_prejoinplayer(function(name, ip)
ip_db.record_ip(name,ip) ip_db.set(name, ip, ip_db.get_or_add_tag("ips", "ips"))
end) end)
minetest.register_chatcommand("ip", { minetest.register_chatcommand("ip", {
@ -107,8 +90,8 @@ minetest.register_chatcommand("ip", {
description = "Get an player's ip address.", description = "Get an player's ip address.",
func = function(name, param) func = function(name, param)
-- Get the ip record asynchronously. -- Get the ip record asynchronously.
colddb.get(ip_db,param,nil,function(record) ip_db.get(param, ip_db.get_or_add_tag("ips", "ips"), function(record)
-- If database contains the record data then send it to the player. -- If record is contains data send it to the player.
if record then if record then
minetest.chat_send_player(name, string.format("%s:%s", param, record)) minetest.chat_send_player(name, string.format("%s:%s", param, record))
else else
@ -118,6 +101,16 @@ minetest.register_chatcommand("ip", {
end) end)
end end
}) })
minetest.register_chatcommand("clear", {
params = "<player>",
description = "Clear out the ip database.",
func = function(name, param)
ip_db.remove_tag(ip_db.get_or_add_tag("ips", "ips"))
minetest.chat_send_player(name, "Ip Database Cleared!")
end
})
``` ```
Quick Look Notes Quick Look Notes
@ -126,81 +119,6 @@ Quick Look Notes
In the example above we could also create a more complex ip database using tags. Creating tags named after the player then assigning the ip files to them.<br> In the example above we could also create a more complex ip database using tags. Creating tags named after the player then assigning the ip files to them.<br>
This way we could store many ips associated with the player instead of just one ip. This way we could store many ips associated with the player instead of just one ip.
API
===========
- **Functions**
- **colddb.get_db(directory) --> db**
Creates an directory and links it as a database. Returns a 'db' obeject.
- **colddb.add_global_tag(db,tag)**
Adds an extra folder to the directory and advance the database to the added folder.
- **colddb.add_tag(db,name,tag)**
- Creates a folder from the given table in tag.
- **colddb.get_or_add_tag(db,name,tag) --> tag_name**
Returns a tag or creates a new one if does not exist.
- **colddb.remove_tag(db,name)**
Removes a tag.
- **colddb.get_count(db,tag_name) --> count**
Returns the count from the index table file.
- **colddb.iterate_index_table(db,begin_func,func_on_iterate,end_func,args,tag_name)**
- function iterates through the index table file.
- **begin_func(args) --> args**
- function that is ran before the loop begins.
- **func_on_iterate(key,index,args)**
- function that is ran in the for loop.
- **end_func(args)**
- end function that is ran after the for loop ends.
- **colddb.set(db,name,_table,tag_name)**
- Writes data to the database. Key-Value.
- **colddb.set_key(db,name,tag_name)**
- Writes data to the database. Key-nil.
- **colddb.get(db,name,tag_name,callback(arg))**
- Returns specified data from the database in a callback function.
- **colddb.get_key(db,name,tag_name,callback(arg))**
- Returns if the key exist in the database.
- **colddb.remove(db,name,tag_name)**
- Deletes the specified data from the database.
- **Database object fields**
- **indexes**
- If truth the database makes a indexing file for keys.
- **add_to_mem_pool**
- If truth when you get keys or values it gets cached in the memory for faster access next time.
License License
=========== ===========

194
async.lua
View File

@ -5,202 +5,232 @@ if not extended_api.Async then
extended_api.Async = {} extended_api.Async = {}
end end
function extended_api.Async.create_async_pool() function extended_api.Async()
local pool = {threads = {},globalstep_threads = {},task_queue = {},resting = 200,maxtime = 200,queue_threads = 8,state = "suspended"} local self = {}
return pool
end
function extended_api.Async.create_worker(pool,func) self.pool = {threads = {}, globalstep_threads = {}, task_queue = {}, resting = 200, maxtime = 200, queue_threads = 8, state = "suspended"}
self.create_worker = function(func)
local thread = coroutine.create(func) local thread = coroutine.create(func)
table.insert(pool.threads, thread) if not thread or coroutine.status(thread) == "dead" then
minetest.after(0.3, self.create_worker, func)
minetest.after(0.5, self.schedule_worker)
minetest.chat_send_all("Fall")
return
end
table.insert(self.pool.threads, thread)
end end
function extended_api.Async.create_globalstep_worker(pool,func) self.create_globalstep_worker = function(func)
local thread = coroutine.create(func) local thread = coroutine.create(func)
table.insert(pool.globalstep_threads, thread) if not thread or coroutine.status(thread) == "dead" then
minetest.after(0.3, self.create_globalstep_worker, func)
minetest.after(0.5, self.schedule_globalstep_worker)
return
end end
table.insert(self.pool.globalstep_threads, thread)
function extended_api.Async.run_worker(pool,index) end
local thread = pool.threads[index] self.run_worker = function(index)
if thread == nil or coroutine.status(thread) == "dead" then local thread = self.pool.threads[index]
table.remove(pool.threads, index) if not thread or coroutine.status(thread) == "dead" then
minetest.after(0,extended_api.Async.schedule_worker,pool) table.remove(self.pool.threads, index)
minetest.after(0, self.schedule_worker)
return false return false
else else
coroutine.resume(thread) coroutine.resume(thread)
minetest.after(0,extended_api.Async.schedule_worker,pool) minetest.after(0, self.schedule_worker)
return true return true
end end
end end
function extended_api.Async.run_globalstep_worker(pool,index) self.run_globalstep_worker = function(index)
local thread = pool.globalstep_threads[index] local thread = self.pool.globalstep_threads[index]
if thread == nil or coroutine.status(thread) == "dead" then if not thread or coroutine.status(thread) == "dead" then
table.remove(pool.globalstep_threads, index) table.remove(self.pool.globalstep_threads, index)
minetest.after(0,extended_api.Async.schedule_globalstep_worker,pool) minetest.after(0, self.schedule_globalstep_worker)
return false return false
else else
coroutine.resume(thread) coroutine.resume(thread)
minetest.after(0,extended_api.Async.schedule_globalstep_worker,pool) minetest.after(0, self.schedule_globalstep_worker)
return true return true
end end
end end
function extended_api.Async.schedule_worker(pool) self.schedule_worker = function()
pool.state = "running" self.pool.state = "running"
for index,value in ipairs(pool.threads) do for index, value in ipairs(self.pool.threads) do
minetest.after(pool.resting / 1000,extended_api.Async.run_worker,pool,index) minetest.after(self.pool.resting / 1000, self.run_worker, index)
return true return true
end end
pool.state = "suspended" self.pool.state = "suspended"
return false return false
end end
function extended_api.Async.schedule_globalstep_worker(pool) self.schedule_globalstep_worker = function()
for index,value in ipairs(pool.globalstep_threads) do for index, value in ipairs(self.pool.globalstep_threads) do
minetest.after(0,extended_api.Async.run_globalstep_worker,pool,index) minetest.after(0, self.run_globalstep_worker, index)
return true return true
end end
return false return false
end end
function extended_api.Async.priority(pool,resting,maxtime) self.priority = function(resting, maxtime)
pool.resting = resting self.pool.resting = resting
pool.maxtime = maxtime self.pool.maxtime = maxtime
end end
function extended_api.Async.iterate(pool,from,to,func,callback) self.iterate = function(from, to, func, callback)
extended_api.Async.create_worker(pool,function() self.create_worker(function()
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000
local maxtime = pool.maxtime local maxtime = self.pool.maxtime
for i = from, to do for i = from, to do
local b = func(i) local b = func(i)
if b ~= nil and b == false then if b ~= nil and b == false then
break break
end end
if minetest.get_us_time() * 1000 > last_time + maxtime then if minetest.get_us_time() / 1000 > last_time + maxtime then
coroutine.yield() coroutine.yield()
last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000
end end
end end
if callback then if callback then
callback() callback()
end end
return
end) end)
extended_api.Async.schedule_worker(pool) self.schedule_worker()
end end
function extended_api.Async.foreach(pool,array, func, callback) self.foreach = function(array, func, callback)
extended_api.Async.create_worker(pool,function() self.create_worker(function()
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000
local maxtime = pool.maxtime local maxtime = self.pool.maxtime
for k,v in ipairs(array) do for k,v in ipairs(array) do
local b = func(k,v) local b = func(k,v)
if b ~= nil and b == false then if b ~= nil and b == false then
break break
end end
if minetest.get_us_time() * 1000 > last_time + maxtime then if minetest.get_us_time() / 1000 > last_time + maxtime then
coroutine.yield() coroutine.yield()
last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000
end end
end end
if callback then if callback then
callback() callback()
end end
return
end) end)
extended_api.Async.schedule_worker(pool) self.schedule_worker()
end end
function extended_api.Async.do_while(pool,condition_func, func, callback) self.do_while = function(condition_func, func, callback)
extended_api.Async.create_worker(pool,function() self.create_worker(function()
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000
local maxtime = pool.maxtime local maxtime = self.pool.maxtime
while(condition_func()) do while(condition_func()) do
local c = func() local c = func()
if c ~= nil and c ~= condition_func() then if c ~= nil and c ~= condition_func() then
break break
end end
if minetest.get_us_time() * 1000 > last_time + maxtime then if minetest.get_us_time() / 1000 > last_time + maxtime then
coroutine.yield() coroutine.yield()
last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000
end end
end end
if callback then if callback then
callback() callback()
end end
return
end) end)
extended_api.Async.schedule_worker(pool) self.schedule_worker()
end end
function extended_api.Async.register_globalstep(pool,func) self.register_globalstep = function(func)
extended_api.Async.create_globalstep_worker(pool,function() self.create_globalstep_worker(function()
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000000
local dtime = last_time local dtime = last_time
while(true) do while(true) do
dtime = (minetest.get_us_time() / 1000000) - last_time
func(dtime) func(dtime)
dtime = minetest.get_us_time() * 1000
-- 0.05 seconds -- 0.05 seconds
if minetest.get_us_time() * 1000 > last_time + 50 then if minetest.get_us_time() / 1000000 > last_time + 0.05 then
coroutine.yield() coroutine.yield()
local last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000000
end end
end end
end) end)
extended_api.Async.schedule_globalstep_worker(pool) self.schedule_globalstep_worker()
end end
function extended_api.Async.chain_task(pool,tasks,callback) self.chain_task = function(tasks, callback)
extended_api.Async.create_worker(pool,function() self.create_worker(function()
local pass_arg = nil local pass_arg = nil
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000
local maxtime = pool.maxtime local maxtime = self.pool.maxtime
for index, task_func in pairs(tasks) do for index, task_func in pairs(tasks) do
local p = task_func(pass_arg) local p = task_func(pass_arg)
if p ~= nil then if p ~= nil then
pass_arg = p pass_arg = p
end end
if minetest.get_us_time() * 1000 > last_time + maxtime then if minetest.get_us_time() / 1000 > last_time + maxtime then
coroutine.yield() coroutine.yield()
last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000
end end
end end
if callback then if callback then
callback(pass_arg) callback(pass_arg)
end end
return
end) end)
extended_api.Async.schedule_worker(pool) self.schedule_worker()
end end
function extended_api.Async.queue_task(pool,func,callback) self.queue_task = function(func, callback)
table.insert(pool.task_queue,{func = func,callback = callback}) table.insert(self.pool.task_queue, {func = func,callback = callback})
if pool.queue_threads > 0 then if self.pool.queue_threads > 0 then
pool.queue_threads = pool.queue_threads - 1 self.pool.queue_threads = self.pool.queue_threads - 1
extended_api.Async.create_worker(pool,function() self.create_worker(function()
local pass_arg = nil local pass_arg = nil
local last_time = minetest.get_us_time() * 1000 local last_time = minetest.get_us_time() / 1000
local maxtime = pool.maxtime local maxtime = self.pool.maxtime
while(true) do while(true) do
local task_func = pool.task_queue[1] local task_func = self.pool.task_queue[1]
table.remove(pool.task_queue,1) table.remove(self.pool.task_queue, 1)
if task_func and task_func.func then if task_func and task_func.func then
pass_arg = nil pass_arg = nil
local p = task_func.func(pass_arg) local p = task_func.func()
if p ~= nil then if p ~= nil then
pass_arg = p pass_arg = p
end end
if task_func.callback then if task_func.callback then
task_func.callback(pass_arg) task_func.callback(pass_arg)
end end
if minetest.get_us_time() * 1000 > last_time + maxtime then if minetest.get_us_time() / 1000 > last_time + maxtime then
coroutine.yield() coroutine.yield()
last_time = minetest.get_us_time() * 1000 last_time = minetest.get_us_time() / 1000
end end
else else
pool.queue_threads = pool.queue_threads + 1 self.pool.queue_threads = self.pool.queue_threads + 1
break return
end end
end end
end) end)
extended_api.Async.schedule_worker(pool) self.schedule_worker()
end end
end end
self.single_task = function(func, callback)
self.create_worker(function()
local pass_arg = func()
if p ~= nil then
pass_arg = p
end
if task_func.callback then
task_func.callback(pass_arg)
end
return
end)
self.schedule_worker()
end
return self
end

View File

@ -4,12 +4,40 @@ local function createDir(directory)
return minetest.mkdir(directory) return minetest.mkdir(directory)
end end
function colddb.file_Exists(db,name,tag_name) function colddb.Colddb(directory)
local directory = string.format("%s/%s/", minetest.get_worldpath(), directory)
if not createDir(directory) then
error(string.format("%s is not a directory.", directory))
end
local self = {}
self.db = {
global_tag = "",
directory = directory,
tags = {},
mem_pool = {},
mem_pool_del = {},
indexes_pool = {},
iterate_queue = {},
indexes = false,
add_to_mem_pool = true,
async = extended_api.Async(),
}
self.db.async.priority(150, 250)
-- make tables weak so the garbage-collector will remove unused data
setmetatable(self.db.tags, {__mode = "kv"})
setmetatable(self.db.mem_pool, {__mode = "kv"})
setmetatable(self.db.mem_pool_del, {__mode = "kv"})
setmetatable(self.db.indexes_pool, {__mode = "kv"})
self.file_Exists = function(name, tag_name)
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local f = io.open(string.format("%s%s%s.cold",db.directory,t,name),"r") local f = io.open(string.format("%s%s%s.cold", self.db.directory, t, name), "r")
if f ~= nil then if f ~= nil then
io.close(f) io.close(f)
return true return true
@ -19,170 +47,8 @@ function colddb.file_Exists(db,name,tag_name)
return false return false
end end
function colddb.get_db(directory)
local directory = string.format("%s/%s",minetest.get_worldpath(),directory)
if not createDir(directory) then
error(string.format("%s is not a directory.",directory))
end
db = {
global_tag = "",
directory = directory,
tags = {},
mem_pool = {},
indexes_pool = {},
iterate_queue = {},
indexes = false,
add_to_mem_pool = true,
async_pool = extended_api.Async.create_async_pool(),
}
extended_api.Async.priority(db.async_pool,150,250)
-- make tables weak so the garbage-collector will remove unused data
setmetatable(db.tags, {__mode = "kv"})
setmetatable(db.mem_pool, {__mode = "kv"})
setmetatable(db.indexes_pool, {__mode = "kv"})
return db
end
function colddb.add_global_tag(db,tag)
local t = ""
if type(tag) == "table" then
for index in pairs(tag) do
t = string.format("%s%s/",t,index)
end
else
t = string.format("%s/",tag)
end
db.global_tag = string.format("%s%s",db.global_tag,t)
db.directory = string.format("%s/%s",db.directory,t)
if not createDir(db.directory) then
error(string.format("%s is not a directory.",db.directory))
end
end
function colddb.add_tag(db,name,tag)
local t = ""
if not db.tags[name] then
db.tags[name] = ""
end
if type(tag) == "table" then
for key,value in pairs(tag) do
t = string.format("%s%s/",t,value)
end
else
t = string.format("%s/",tag)
end
local test_path = string.format("%s%s%s",db.directory,db.tags[name],t)
if not createDir(test_path) then
error(string.format("%s is not a directory.",test_path))
end
db.tags[name] = string.format("%s%s",db.tags[name],t)
end
function colddb.get_tag(db,name)
if not name then
return ""
end
local tag = db.tags[name]
if tag then
return tag
end
return ""
end
function colddb.get_or_add_tag(db,name,tag)
if not db.tags[name] then
colddb.add_tag(db,name,tag)
end
return name
end
function colddb.remove_tag(db,name)
if db.tags[name] then
local delete_path = string.format("%s%s",db.directory,db.tags[name])
local wc = delete_path:len()
delete_path = delete_path:sub(0,wc-1)
db.tags[name] = nil
os.remove(delete_path)
end
end
function colddb.delete_file(db,name,tag_name)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local text = string.format("%s%s%s.cold",db.directory,t,name)
local err,msg = os.remove(text)
if err == nil then
print(string.format("error removing db data %s error message: %s",text,msg))
end
end
function colddb.load_table(db,name,tag_name)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local f = io.open(string.format("%s%s%s.cold",db.directory,t,name), "r")
if f then
local data = minetest.deserialize(f:read("*a"))
f:close()
return data
end
return nil
end
function colddb.save_table(db,name, _table,tag_name)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end
return minetest.safe_file_write(string.format("%s%s%s.cold",db.directory,t,name), minetest.serialize(_table))
end
function colddb.save_key(db,name,tag_name)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end
return minetest.safe_file_write(string.format("%s%s%s.cold",db.directory,t,name), "")
end
function colddb.load_key(db,name,tag_name)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local f = io.open(string.format("%s%s%s.cold",db.directory,t,name), "r")
if f then
f:close()
return true
end
return false
end
function colddb.delete_index_table(db,tag_name)
local t = ""
local name = "æIndex_table"
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local p = string.format("%s%sæIndex_table.cold",db.directory,t)
if colddb.file_Exists(db,name,tag_name) then
local err,msg = os.remove(p)
if err == nil then
print(string.format("error removing db data %s error message: %s",p,msg))
end
return true
end
return false
end
local function delete_lines_func_begin(args) local function delete_lines_func_begin(args)
local f = io.open(args.copyfile, "w") local f = io.open(args.copyfile, "w")
local db = args.db
local cs = args.cs
if f then if f then
args.file = f args.file = f
args.removedlist = {} args.removedlist = {}
@ -193,9 +59,7 @@ end
local function delete_lines_func_i(line, i, args) local function delete_lines_func_i(line, i, args)
local f = args.file local f = args.file
local db = args.db local om = self.db.indexes_pool[args.cs]
local cs = args.cs
local om = db.indexes_pool[args.cs]
if om and not om.deleted_items[line] then if om and not om.deleted_items[line] then
f:write(string.format("\n%s", line)) f:write(string.format("\n%s", line))
else else
@ -205,11 +69,10 @@ local function delete_lines_func_i(line,i,args)
end end
local function delete_lines_func_end(args) local function delete_lines_func_end(args)
local db = args.db
local cs = args.cs local cs = args.cs
if db.indexes_pool[cs] or db.indexes_pool[cs].file then if self.db.indexes_pool[cs] or self.db.indexes_pool[cs].file then
db.indexes_pool[cs].file:close() self.db.indexes_pool[cs].file:close()
db.indexes_pool[cs].file = nil self.db.indexes_pool[cs].file = nil
args.file:seek("set") args.file:seek("set")
args.file:write(string.format("%i", args.count)) args.file:write(string.format("%i", args.count))
args.file:close() args.file:close()
@ -221,21 +84,254 @@ local function delete_lines_func_end(args)
os.rename(args.copyfile, args.oldfile) os.rename(args.copyfile, args.oldfile)
end end
for i, l in pairs(args.removedlist) do for i, l in pairs(args.removedlist) do
db.indexes_pool[cs].deleted_items[i] = nil self.db.indexes_pool[cs].deleted_items[i] = nil
end end
db.indexes_pool[cs].deleting = false self.db.indexes_pool[cs].deleting = false
end end
args = nil args = nil
end end
function colddb.delete_lines(db,_lines,tag_name) local function iterate(func_on_iterate, end_func, count, cs, args)
local f = self.db.indexes_pool[cs]
local fl = f.file
self.db.async.iterate(1, count, function(i)
local line = fl:read("*l")
if args.do_not_skip_removed_items or not self.db.indexes_pool[cs].deleted_items[line] then
local ar = func_on_iterate(line, i, args)
if ar ~= nil then
args = ar
return args
end
end
end,function()
if end_func then
end_func(args)
end
if self.db.iterate_queue[cs] and self.db.iterate_queue[cs][1] then
local copy = self.db.iterate_queue[cs][1]
f = self.db.indexes_pool[cs]
if not f or not f.file then
self.open_index_table(copy.tag_name)
f = self.db.indexes_pool[cs]
end
if copy.begin_func then
local a = copy.begin_func(copy.args)
if a and type(a) == "table" then
copy.args = a
end
end
minetest.after(0, iterate, copy.func_on_iterate, copy.end_func, copy.count, copy.cs, copy.args)
table.remove(self.db.iterate_queue[cs], 1)
return false
else
fl:close()
self.db.iterate_queue[cs] = nil
end
self.db.indexes_pool[cs].iterating = false
return false
end)
end
local function load_into_mem(name, _table, tag_name)
if self.db.add_to_mem_pool then
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
local cs = string.format("%s%s", t, name)
if not self.db.mem_pool[cs] then
self.db.mem_pool[cs] = {mem = _table, indexes = self.db.indexes}
else
self.db.mem_pool[cs].mem = _table
self.db.mem_pool[cs].indexes = self.db.indexes
end
end
end
local path_count = {}
local function _remove_tag(delete_path, prev_dp)
if path_count[delete_path] and path_count[delete_path] > 0 then
minetest.after(1.5, _remove_tag, delete_path)
return
elseif path_count[delete_path] and path_count[delete_path] < 1 then
self.db.mem_pool = {}
self.db.mem_pool_del = {}
os.remove(delete_path)
return
elseif not path_count[delete_path] then
path_count[delete_path] = 0
end
local list = minetest.get_dir_list(delete_path)
self.db.async.foreach(list, function(k, v)
v = string.format("%s/%s", delete_path, v)
local err = os.remove(v)
if err == nil then
minetest.after(0, _remove_tag, v, delete_path)
path_count[delete_path] = path_count[delete_path] + 1
end
end, function()
if prev_dp then
path_count[prev_dp] = path_count[prev_dp] - 1
end
if path_count[delete_path] > 0 then
minetest.after(1.5, _remove_tag, delete_path)
else
self.db.mem_pool = {}
self.db.mem_pool_del = {}
os.remove(delete_path)
end
end)
end
self.add_global_tag = function(tag)
local t = ""
if type(tag) == "table" then
for index in pairs(tag) do
t = string.format("%s%s/", t, index)
end
else
t = string.format("%s/", tag)
end
self.db.global_tag = string.format("%s%s", self.db.global_tag, t)
self.db.directory = string.format("%s/%s", self.db.directory, t)
if not createDir(self.db.directory) then
error(string.format("%s is not a directory.", self.db.directory))
end
end
self.add_tag = function(name, tag)
local t = ""
if not self.db.tags[name] then
self.db.tags[name] = ""
end
if type(tag) == "table" then
for key, value in pairs(tag) do
t = string.format("%s%s/", t, value)
end
else
t = string.format("%s/", tag)
end
local test_path = string.format("%s%s%s", self.db.directory, self.db.tags[name], t)
if not createDir(test_path) then
error(string.format("%s is not a directory.", test_path))
end
self.db.tags[name] = string.format("%s%s", self.db.tags[name], t)
end
self.get_tag = function(name)
if not name then
return ""
end
local tag = self.db.tags[name]
if tag then
return tag
end
return ""
end
self.get_or_add_tag = function(name, tag)
if not self.db.tags[name] then
self.add_tag(name, tag)
end
return name
end
self.remove_tag = function(name)
if self.db.tags[name] then
local delete_path = string.format("%s%s", self.db.directory, self.db.tags[name])
local wc = delete_path:len()
delete_path = delete_path:sub(0, wc-1)
self.db.tags[name] = nil
local err = os.remove(delete_path)
if err == nil then
minetest.after(0.1, _remove_tag, delete_path)
end
end
end
self.delete_file = function(name, tag_name)
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
local text = string.format("%s%s%s.cold", self.db.directory, t, name)
local err, msg = os.remove(text)
if err == nil then
print(string.format("error removing db data %s error message: %s", text, msg))
end
end
self.load_table = function(name, tag_name)
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
local f = io.open(string.format("%s%s%s.cold", self.db.directory, t, name), "r")
if f then
local data = minetest.deserialize(f:read("*a"))
f:close()
return data
end
return nil
end
self.save_table = function(name, _table, tag_name)
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
return minetest.safe_file_write(string.format("%s%s%s.cold", self.db.directory, t, name), minetest.serialize(_table))
end
self.save_key = function(name, tag_name)
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
return minetest.safe_file_write(string.format("%s%s%s.cold", self.db.directory, t, name), "")
end
self.load_key = function(name, tag_name)
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
local f = io.open(string.format("%s%s%s.cold", self.db.directory, t, name), "r")
if f then
f:close()
return true
end
return false
end
self.delete_index_table = function(tag_name)
local t = "" local t = ""
local name = "æIndex_table" local name = "æIndex_table"
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end
local p = string.format("%s%sæIndex_table.cold", self.db.directory, t)
if self.file_Exists(name, tag_name) then
local err, msg = os.remove(p)
if err == nil then
print(string.format("error removing db data %s error message: %s", p, msg))
end
return true
end
return false
end
self.delete_lines = function(_lines, tag_name)
local t = ""
local name = "æIndex_table"
if tag_name then
t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
local f = db.indexes_pool[cs] local f = self.db.indexes_pool[cs]
local k = type(_lines) local k = type(_lines)
if k == "string" then if k == "string" then
f.deleted_items[_lines] = true f.deleted_items[_lines] = true
@ -244,59 +340,34 @@ function colddb.delete_lines(db,_lines,tag_name)
f.deleted_items[i] = true f.deleted_items[i] = true
end end
end end
if not db.indexes_pool[cs].deleting then if not self.db.indexes_pool[cs].deleting then
db.indexes_pool[cs].deleting = false self.db.indexes_pool[cs].deleting = false
end end
if f and f.file and not db.indexes_pool[cs].deleting then if f and f.file and not self.db.indexes_pool[cs].deleting then
db.indexes_pool[cs].deleting = true self.db.indexes_pool[cs].deleting = true
if db.indexes_pool[cs].needs_flushing == true then if self.db.indexes_pool[cs].needs_flushing == true then
f.file:flush() f.file:flush()
db.indexes_pool[cs].needs_flushing = false self.db.indexes_pool[cs].needs_flushing = false
end end
local oldfile = string.format("%s%sæIndex_table.cold",db.directory,t) local oldfile = string.format("%s%sæIndex_table.cold", self.db.directory, t)
local copyfile = string.format("%s%sæIndex_table.cold.replacer",db.directory,t) local copyfile = string.format("%s%sæIndex_table.cold.replacer", self.db.directory, t)
local args = {db=db,cs=cs,oldfile=oldfile,copyfile=copyfile,do_not_skip_removed_items=true} local args = {cs = cs, oldfile = oldfile, copyfile = copyfile, do_not_skip_removed_items = true}
db.indexes_pool[cs] = f self.db.indexes_pool[cs] = f
colddb.iterate_index_table(db,delete_lines_func_begin,delete_lines_func_i,delete_lines_func_end,args,tag_name) iterate_index_table(delete_lines_func_begin, delete_lines_func_i, delete_lines_func_end, args, tag_name)
end end
end end
function colddb.create_index_table(db,tag_name) self.open_index_table = function(tag_name)
local t = "" local t = ""
local name = "æIndex_table" local name = "æIndex_table"
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end
local p = string.format("%s%sæIndex_table.cold",db.directory,t)
if not colddb.file_Exists(db,name,tag_name) then
local f = io.open(p, "w")
if f then
f:seek("set")
f:write("0")
f:close()
end
end
local f = io.open(p, "r+")
if f then
f:seek("set")
f:write("0")
db.indexes_pool[string.format("%s%s",t,name)] = {file = f,needs_flushing = false,deleted_items = {},iterating = false}
return true
end
return false
end
function colddb.open_index_table(db,tag_name)
local t = ""
local name = "æIndex_table"
if tag_name then
t = colddb.get_tag(db,tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
local fs = db.indexes_pool[cs] local fs = self.db.indexes_pool[cs]
if not fs then if not fs then
local p = string.format("%s%sæIndex_table.cold",db.directory,t) local p = string.format("%s%sæIndex_table.cold", self.db.directory, t)
if not colddb.file_Exists(db,name,tag_name) then if not self.file_Exists(name,tag_name) then
local f = io.open(p, "w") local f = io.open(p, "w")
if f then if f then
f:seek("set") f:seek("set")
@ -306,7 +377,7 @@ function colddb.open_index_table(db,tag_name)
end end
local f = io.open(p, "r+") local f = io.open(p, "r+")
if f then if f then
db.indexes_pool[cs] = {file = f,needs_flushing = false,deleted_items = {},iterating = false} self.db.indexes_pool[cs] = {file = f, needs_flushing = false, deleted_items = {}, iterating = false}
return f return f
end end
return nil return nil
@ -316,14 +387,14 @@ function colddb.open_index_table(db,tag_name)
return nil return nil
end end
function colddb.append_index_table(db,key,tag_name) self.append_index_table = function(key, tag_name)
local t = "" local t = ""
local name = "æIndex_table" local name = "æIndex_table"
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
local f = db.indexes_pool[cs] local f = self.db.indexes_pool[cs]
local k = type(key) local k = type(key)
if f and f.file and k == "string" then if f and f.file and k == "string" then
local fl = f.file local fl = f.file
@ -331,7 +402,7 @@ function colddb.append_index_table(db,key,tag_name)
fl:flush() fl:flush()
f.needs_flushing = false f.needs_flushing = false
end end
db.indexes_pool[cs].needs_flushing = true self.db.indexes_pool[cs].needs_flushing = true
fl:seek("end") fl:seek("end")
fl:write(string.format("\n%s", key)) fl:write(string.format("\n%s", key))
fl:seek("set") fl:seek("set")
@ -339,13 +410,14 @@ function colddb.append_index_table(db,key,tag_name)
count = count + 1 count = count + 1
fl:seek("set") fl:seek("set")
fl:write(string.format("%i", count)) fl:write(string.format("%i", count))
fl:close()
elseif f and f.file then elseif f and f.file then
local fl = f.file local fl = f.file
if f.needs_flushing == true then if f.needs_flushing == true then
fl:flush() fl:flush()
f.needs_flushing = false f.needs_flushing = false
end end
db.indexes_pool[cs].needs_flushing = true self.db.indexes_pool[cs].needs_flushing = true
local c = 0 local c = 0
for i in pairs(key) do for i in pairs(key) do
fl:seek("end") fl:seek("end")
@ -357,19 +429,20 @@ function colddb.append_index_table(db,key,tag_name)
count = count + c count = count + c
fl:seek("set") fl:seek("set")
fl:write(string.format("%i", count)) fl:write(string.format("%i", count))
fl:close()
else else
return false return false
end end
end end
function colddb.get_count(db,tag_name) self.get_count = function(tag_name)
local t = "" local t = ""
local name = "æIndex_table" local name = "æIndex_table"
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s",t,name) local cs = string.format("%s%s",t,name)
local f = db.indexes_pool[cs] local f = self.db.indexes_pool[cs]
if f and f.file then if f and f.file then
local fl = f.file local fl = f.file
if f.needs_flushing == true then if f.needs_flushing == true then
@ -378,65 +451,26 @@ function colddb.get_count(db,tag_name)
end end
fl:seek("set") fl:seek("set")
local count = tonumber(fl:read("*l")) local count = tonumber(fl:read("*l"))
fl:close()
return count return count
end end
return nil return nil
end end
local function iterate(db,func_on_iterate,end_func,count,cs,args) self.iterate_index_table = function(begin_func, func_on_iterate, end_func, args, tag_name)
local f = db.indexes_pool[cs]
local fl = f.file
extended_api.Async.iterate(db.async_pool,1,count,function(i)
local line = fl:read("*l")
if args.do_not_skip_removed_items or not db.indexes_pool[cs].deleted_items[line] then
local ar = func_on_iterate(line,i,args)
if ar ~= nil then
args = ar
return args
end
end
end,function()
if end_func then
end_func(args)
end
if db.iterate_queue[cs] and db.iterate_queue[cs][1] then
local copy = db.iterate_queue[cs][1]
f = db.indexes_pool[cs]
if not f or not f.file then
colddb.open_index_table(db,copy.tag_name)
f = db.indexes_pool[cs]
end
if copy.begin_func then
local a = copy.begin_func(copy.args)
if a and type(a) == "table" then
copy.args = a
end
end
minetest.after(0,iterate,copy.db,copy.func_on_iterate,copy.end_func,copy.count,copy.cs,copy.args)
table.remove(db.iterate_queue[cs],1)
return false
else
db.iterate_queue[cs] = nil
end
db.indexes_pool[cs].iterating = false
return false
end)
end
function colddb.iterate_index_table(db,begin_func,func_on_iterate,end_func,args,tag_name)
local t = "" local t = ""
local name = "æIndex_table" local name = "æIndex_table"
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
local f = db.indexes_pool[cs] local f = self.db.indexes_pool[cs]
if not f or not f.file then if not f or not f.file then
colddb.open_index_table(db,tag_name) self.open_index_table(tag_name)
f = db.indexes_pool[cs] f = self.db.indexes_pool[cs]
end end
if f and f.file and db.indexes_pool[cs].iterating == false then if f and f.file and self.db.indexes_pool[cs].iterating == false then
db.indexes_pool[cs].iterating = true self.db.indexes_pool[cs].iterating = true
local fl = f.file local fl = f.file
if f.needs_flushing == true then if f.needs_flushing == true then
fl:flush() fl:flush()
@ -459,11 +493,11 @@ function colddb.iterate_index_table(db,begin_func,func_on_iterate,end_func,args,
if c < 1 then if c < 1 then
-- If theres nothing to index then return -- If theres nothing to index then return
end_func(args) end_func(args)
db.indexes_pool[cs].iterating = false self.db.indexes_pool[cs].iterating = false
return false return false
end end
-- Start iterating the index table -- Start iterating the index table
iterate(db,func_on_iterate,end_func,c,cs,args) iterate(func_on_iterate, end_func, c, cs, args)
elseif f and f.file then elseif f and f.file then
local fl = f.file local fl = f.file
-- If its iterating some other function then add this one to the queue list -- If its iterating some other function then add this one to the queue list
@ -473,175 +507,175 @@ function colddb.iterate_index_table(db,begin_func,func_on_iterate,end_func,args,
-- If theres nothing to index then return -- If theres nothing to index then return
return false return false
end end
if not db.iterate_queue[cs] then if not self.db.iterate_queue[cs] then
db.iterate_queue[cs] = {} self.db.iterate_queue[cs] = {}
end end
local _table = {db=db,begin_func=begin_func,func_on_iterate=func_on_iterate,end_func=end_func,count=c,cs=cs,tag_name=tag_name,args=args} local _table = {begin_func = begin_func, func_on_iterate = func_on_iterate, end_func = end_func, count = c, cs = cs, tag_name = tag_name, args = args}
table.insert(db.iterate_queue[cs],_table) table.insert(self.db.iterate_queue[cs], _table)
end end
end end
local function load_into_mem(db,name,_table,tag_name) self.set = function(name, _table, tag_name)
if db.add_to_mem_pool then
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end
if self.db.indexes and not self.file_Exists(name,tag_name) then
self.db.async.queue_task(function()
local cs2 = string.format("%s%s", t , "æIndex_table")
local om = self.db.indexes_pool[cs2]
if not self.file_Exists("æIndex_table", tag_name) or not (om and om.file) then
self.open_index_table(tag_name)
end
self.append_index_table(name, tag_name)
end)
end
self.db.async.queue_task(function()
self.save_table(name, _table, tag_name)
end)
if self.db.add_to_mem_pool then
load_into_mem(name, _table, tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
if not db.mem_pool[cs] then self.db.mem_pool_del[cs] = nil
db.mem_pool[cs] = {mem = _table,indexes = db.indexes}
else
db.mem_pool[cs].mem = _table
db.mem_pool[cs].indexes = db.indexes
end
end
end end
function colddb.set(db,name,_table,tag_name) self.set_key = function(name, tag_name)
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
if db.indexes and not colddb.file_Exists(db,name,tag_name) then if self.db.indexes and not self.file_Exists(name, tag_name) then
extended_api.Async.queue_task(db.async_pool,function() self.db.async.queue_task(function()
local cs2 = string.format("%s%s", t, "æIndex_table") local cs2 = string.format("%s%s", t, "æIndex_table")
local om = db.indexes_pool[cs2] local om = self.db.indexes_pool[cs2]
if not colddb.file_Exists(db,"æIndex_table",tag_name) or not (om and om.file) then if not self.file_Exists("æIndex_table", tag_name) or not (om and om.file) then
colddb.open_index_table(db,tag_name) self.open_index_table(tag_name)
end end
colddb.append_index_table(db,name,tag_name) self.append_index_table(name, tag_name)
end) end)
end end
extended_api.Async.queue_task(db.async_pool,function() self.db.async.queue_task(function()
colddb.save_table(db,name, _table,tag_name) self.save_key(name, tag_name)
end) end)
if db.add_to_mem_pool then if self.db.add_to_mem_pool then
load_into_mem(db,name,_table,tag_name) load_into_mem(name, "", tag_name)
end end
local cs = string.format("%s%s", t, name)
self.db.mem_pool_del[cs] = nil
end end
function colddb.set_key(db,name,tag_name) self.get = function(name, tag_name, callback)
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
if db.indexes and not colddb.file_Exists(db,name,tag_name) then local cs = string.format("%s%s", t, name)
extended_api.Async.queue_task(db.async_pool,function() if self.db.mem_pool_del[cs] then
local cs2 = string.format("%s%s",t,"æIndex_table")
local om = db.indexes_pool[cs2]
if not colddb.file_Exists(db,"æIndex_table",tag_name) or not (om and om.file) then
colddb.open_index_table(db,tag_name)
end
colddb.append_index_table(db,name,tag_name)
end)
end
extended_api.Async.queue_task(db.async_pool,function()
colddb.save_key(db,name, tag_name)
end)
if db.add_to_mem_pool then
load_into_mem(db,name,"",tag_name)
end
end
function colddb.get(db,name,tag_name,callback)
if callback then if callback then
extended_api.Async.queue_task(db.async_pool,function() callback(nil)
local t = ""
if tag_name then
t = colddb.get_tag(db,tag_name)
end end
local cs = string.format("%s%s",t,name) return nil
local pm = db.mem_pool[cs] end
if callback then
self.db.async.queue_task(function()
local pm = self.db.mem_pool[cs]
if pm then if pm then
return pm.mem return pm.mem
else else
local _table = colddb.load_table(db,name,tag_name) local _table = self.load_table(name, tag_name)
if _table then if _table then
load_into_mem(db,name,_table,tag_name) load_into_mem(name, _table, tag_name)
return _table return _table
end end
end end
self.db.mem_pool_del[cs] = true
return nil return nil
end,callback) end,callback)
else else
local t = "" local pm = self.db.mem_pool[cs]
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local cs = string.format("%s%s",t,name)
local pm = db.mem_pool[cs]
if pm then if pm then
return pm.mem return pm.mem
else else
local _table = colddb.load_table(db,name,tag_name) local _table = self.load_table(name, tag_name)
if _table then if _table then
load_into_mem(db,name,_table,tag_name) load_into_mem(name, _table, tag_name)
return _table return _table
end end
end end
self.db.mem_pool_del[cs] = true
return nil return nil
end end
end end
function colddb.get_key(db,name,tag_name,callback) self.get_key = function(name, tag_name, callback)
if callback then
extended_api.Async.queue_task(db.async_pool,function()
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
local pm = db.mem_pool[cs] if self.db.mem_pool_del[cs] then
if callback then
callback(false)
end
return false
end
if callback then
self.db.async.queue_task(function()
local t = ""
if tag_name then
t = self.get_tag(tag_name)
end
local pm = self.db.mem_pool[cs]
if pm then if pm then
return true return true
else else
local bool = colddb.load_key(db,name,tag_name) local bool = self.load_key(name, tag_name)
if bool then if bool then
load_into_mem(db,name,bool,tag_name) load_into_mem(name, bool, tag_name)
return bool return bool
end end
end end
self.db.mem_pool_del[cs] = true
return nil return nil
end,callback) end,callback)
else else
local t = "" local pm = self.db.mem_pool[cs]
if tag_name then
t = colddb.get_tag(db,tag_name)
end
local cs = string.format("%s%s",t,name)
local pm = db.mem_pool[cs]
if pm then if pm then
return true return true
else else
local bool = colddb.load_key(db,name,tag_name) local bool = self.load_key(name, tag_name)
if bool then if bool then
load_into_mem(db,name,bool,tag_name) load_into_mem(name, bool, tag_name)
return bool return bool
end end
end end
self.db.mem_pool_del[cs] = true
return nil return nil
end end
end end
function colddb.remove(db,name,tag_name) self.remove = function(name, tag_name)
local t = "" local t = ""
if tag_name then if tag_name then
t = colddb.get_tag(db,tag_name) t = self.get_tag(tag_name)
end end
local cs = string.format("%s%s", t, name) local cs = string.format("%s%s", t, name)
if db.mem_pool[cs] then self.db.mem_pool[cs] = nil
db.mem_pool[cs] = nil self.db.mem_pool_del[cs] = true
end if self.db.indexes and self.file_Exists("æIndex_table", tag_name) then
if db.indexes and colddb.file_Exists(db,"æIndex_table",tag_name) then self.db.async.queue_task(function()
extended_api.Async.queue_task(db.async_pool,function()
local cs2 = string.format("%s%s",t,"æIndex_table") local cs2 = string.format("%s%s",t,"æIndex_table")
if not (db.indexes_pool[cs2] and db.indexes_pool[cs2].file) then if not (self.db.indexes_pool[cs2] and self.db.indexes_pool[cs2].file) then
colddb.open_index_table(db,tag_name) self.open_index_table(tag_name)
end end
colddb.delete_lines(db,name,tag_name) self.delete_lines(name, tag_name)
end) end)
end end
extended_api.Async.queue_task(db.async_pool,function() self.db.async.queue_task(function()
colddb.delete_file(db,name,tag_name) self.delete_file(name, tag_name)
end) end)
end end
return self
end