More efficient python script

- Fixed pipelining
- Cleaning everything up
- Don't re-download saved characters
- Add the media to .gitignore

About pipelining:

According to python:
1) you send a request
2) you MUST get response headers for (1) (THIS IS MANDATORY)
3) you send another request
4) you get response body for (2)
5) response headers for (3)
6) response body for (5)

Only two requests can be pipelined. Surely this is an unavoidable, wait no it's just written into the code to error out if you don't do it that way.

according to reality:
1) you send a request
2) you do not get response headers for (1)
3) you repeat steps 1-2 until enough responses are queued
4) you receive those responses as header,body,header,body...

they even name it with a __ so to make it hard to override, but the state can safely go to Idle after a request has sent, whether or not response headers have come in. Sure the connection might close, but then you adjust to not pipeline, and re-send the rest of your requests over a new connection.
This commit is contained in:
user 2015-09-21 19:11:25 +00:00 committed by SmallJoker
parent e762283dec
commit f389e6bd13
2 changed files with 168 additions and 47 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
character_*.png
character_*.txt

View File

@ -1,61 +1,180 @@
#!/usr/bin/python3
from http.client import HTTPConnection
from http.client import HTTPConnection,HTTPException,BadStatusLine,_CS_IDLE
import json
import base64
from contextlib import closing
import sys,os,shutil,time
def die(message,code=23):
print(message,file=sys.stderr)
raise SystemExit(code)
server = "minetest.fensta.bplaced.net"
skinsdir = "u_skins/textures/"
metadir = "u_skins/meta/"
i = 1
pages = 1
curskin = 0
curpage = 1
pages = None
c = HTTPConnection(server)
def addpage(page):
global i, pages
print("Page: " + str(page))
r = 0
try:
c.request("GET", "/api/get.json.php?getlist&page=" + str(page) + "&outformat=base64")
r = c.getresponse()
except Exception:
if r != 0:
if r.status != 200:
print("Error", r.status)
exit(r.status)
return
data = r.read().decode()
l = json.loads(data)
if not l["success"]:
print("Success != True")
exit(1)
r = 0
pages = int(l["pages"])
for s in l["skins"]:
f = open(skinsdir + "character_" + str(i) + ".png", "wb")
f.write(base64.b64decode(bytes(s["img"], 'utf-8')))
f.close()
f = open(metadir + "character_" + str(i) + ".txt", "w")
f.write(str(s["name"]) + '\n')
f.write(str(s["author"]) + '\n')
f.write(str(s["license"]))
f.close()
def replace(location,base,encoding=None,path=None):
if path is None:
path = os.path.join(location,base)
mode = "wt" if encoding else "wb"
# an unpredictable temp name only needed for a+rwxt directories
tmp = os.path.join(location,'.'+base+'-tmp')
def deco(handle):
with open(tmp,mode,encoding=encoding) as out:
handle(out)
os.rename(tmp,path)
return deco
def maybeReplace(location,base,encoding=None):
def deco(handle):
path = os.path.join(location,base)
if os.path.exists(path): return
return replace(location,base,encoding=encoding,path=path)(handle)
return deco
class Penguin:
"idk"
def __init__(self, url, recv, diemessage):
self.url = url
self.recv = recv
self.diemessage = diemessage
class Pipeline(list):
"Gawd why am I being so elaborate?"
def __init__(self, threshold=10):
"threshold is how many requests in parallel to pipeline"
self.threshold = threshold
self.sent = True
def __enter__(self):
self.reopen()
return self
def __exit__(self,typ,exn,trace):
self.send()
self.drain()
def reopen(self):
self.c = HTTPConnection(server)
self.send()
def append(self,url,recv,diemessage):
self.sent = False
super().append(Penguin(url,recv,diemessage))
if len(self) > self.threshold:
self.send()
self.drain()
def trydrain(self):
for penguin in self:
print('drain',penguin.url)
try:
penguin.response.begin()
penguin.recv(penguin.response)
except BadStatusLine as e:
print('derped requesting',penguin.url)
return False
except HTTPException as e:
die(penguin.diemessage+' '+repr(e)+' (url='+penguin.url+')')
self.clear()
return True
def drain(self):
print('draining pipeline...',len(self))
assert self.sent, "Can't drain without sending the requests!"
self.sent = False
while self.trydrain() is not True:
self.c.close()
print('drain failed, trying again')
time.sleep(1)
self.reopen()
def trysend(self):
for penguin in pipeline:
print('fill',penguin.url)
try:
self.c.request("GET", penguin.url)
self.c._HTTPConnection__state = _CS_IDLE
penguin.response = self.c.response_class(self.c.sock,
method="GET")
# begin LATER so we can send multiple requests w/out response headers
except BadStatusLine:
return False
except HTTPException as e:
die(diemessage+' because of a '+repr(e))
return True
def send(self):
if self.sent: return
print('filling pipeline...',len(self))
while self.trysend() is not True:
self.c.close()
print('derped resending')
time.sleep(1)
self.reopen()
self.sent = True
with Pipeline() as pipeline:
# two connections is okay, right? one for json, one for preview images
c = HTTPConnection(server)
def addpage(page):
global curskin, pages
print("Page: " + str(page))
r = 0
try:
c.request("GET", "/skins/1/" + str(s["id"]) + ".png")
c.request("GET", "/api/get.json.php?getlist&page=" + str(page) + "&outformat=base64")
r = c.getresponse()
except Exception:
if r != 0:
if r.status != 200:
print("Error", r.status)
continue
die("Error", r.status)
return
data = r.read()
f = open(skinsdir + "character_" + str(i) + "_preview.png", "wb")
f.write(data)
f.close()
i = i + 1
addpage(1)
if pages > 1:
for p in range(pages-1):
addpage(p+2)
print("Skins have been updated!")
data = r.read().decode()
l = json.loads(data)
if not l["success"]:
die("Success != True")
r = 0
pages = int(l["pages"])
foundOne = False
for s in l["skins"]:
# make sure to increment this, even if the preview exists!
curskin = curskin + 1
previewbase = "character_" + str(curskin) + "_preview.png"
preview = os.path.join(skinsdir, previewbase)
if os.path.exists(preview):
print('skin',curskin,'already retrieved')
continue
print('updating skin',curskin,'id',s["id"])
foundOne = True
@maybeReplace(skinsdir, "character_" + str(curskin) + ".png")
def go(f):
f.write(base64.b64decode(bytes(s["img"], 'utf-8')))
f.close()
@maybeReplace(metadir, "character_" + str(curskin) + ".txt",
encoding='utf-8')
def go(f):
f.write(str(s["name"]) + '\n')
f.write(str(s["author"]) + '\n')
f.write(str(s["license"]))
url = "/skins/1/" + str(s["id"]) + ".png"
def closure(skinsdir,previewbase,preview,s):
"explanation: python sucks"
def tryget(r):
print('replacing',s["id"])
if r.status != 200:
print("Error", r.status)
return
@replace(skinsdir,previewbase,path=preview)
def go(f):
shutil.copyfileobj(r,f)
return tryget
pipeline.append(url,closure(skinsdir,previewbase,preview,s),
"Couldn't get {} because of a".format(
s["id"]))
if not foundOne:
print("No skins updated on this page. Seems we're done?")
#raise SystemExit
addpage(curpage)
while pages > curpage:
curpage = curpage + 1
addpage(curpage)
print("Skins have been updated!")