summaryrefslogtreecommitdiffstats
path: root/pyload/utils
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/utils')
-rw-r--r--pyload/utils/ImportDebugger.py19
-rw-r--r--pyload/utils/JsEngine.py195
-rw-r--r--pyload/utils/__init__.py253
-rw-r--r--pyload/utils/fs.py78
-rw-r--r--pyload/utils/json_layer.py15
-rw-r--r--pyload/utils/packagetools.py155
-rw-r--r--pyload/utils/pylgettext.py61
7 files changed, 776 insertions, 0 deletions
diff --git a/pyload/utils/ImportDebugger.py b/pyload/utils/ImportDebugger.py
new file mode 100644
index 000000000..a997f7b0c
--- /dev/null
+++ b/pyload/utils/ImportDebugger.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+import sys
+
+class ImportDebugger(object):
+
+ def __init__(self):
+ self.imported = {}
+
+ def find_module(self, name, path=None):
+
+ if name not in self.imported:
+ self.imported[name] = 0
+
+ self.imported[name] += 1
+
+ print name, path
+
+sys.meta_path.append(ImportDebugger()) \ No newline at end of file
diff --git a/pyload/utils/JsEngine.py b/pyload/utils/JsEngine.py
new file mode 100644
index 000000000..ef7494d16
--- /dev/null
+++ b/pyload/utils/JsEngine.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from imp import find_module
+from os.path import join, exists
+from urllib import quote
+
+
+ENGINE = ""
+
+DEBUG = False
+JS = False
+PYV8 = False
+NODE = False
+RHINO = False
+
+# TODO: Refactor + clean up this class
+
+if not ENGINE:
+ try:
+ import subprocess
+
+ subprocess.Popen(["js", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["js", "-e", "print(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "js"
+ JS = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ find_module("PyV8")
+ ENGINE = "pyv8"
+ PYV8 = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ import subprocess
+ subprocess.Popen(["node", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["node", "-e", "console.log(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "node"
+ NODE = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ path = "" #path where to find rhino
+
+ if exists("/usr/share/java/js.jar"):
+ path = "/usr/share/java/js.jar"
+ elif exists("js.jar"):
+ path = "js.jar"
+ elif exists(join(pypath, "js.jar")): #may raises an exception, but js.jar wasnt found anyway
+ path = join(pypath, "js.jar")
+
+ if not path:
+ raise Exception
+
+ import subprocess
+
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", "print(23+19)"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "rhino"
+ RHINO = True
+ except:
+ pass
+
+class JsEngine():
+ def __init__(self):
+ self.engine = ENGINE
+ self.init = False
+
+ def __nonzero__(self):
+ return False if not ENGINE else True
+
+ def set_debug(self, value):
+ global DEBUG
+ DEBUG = value
+
+ def eval(self, script):
+ if not self.init:
+ if ENGINE == "pyv8" or (DEBUG and PYV8):
+ import PyV8
+ global PyV8
+
+ self.init = True
+
+ if type(script) == unicode:
+ script = script.encode("utf8")
+
+ if not ENGINE:
+ raise Exception("No JS Engine")
+
+ if not DEBUG:
+ if ENGINE == "pyv8":
+ return self.eval_pyv8(script)
+ elif ENGINE == "js":
+ return self.eval_js(script)
+ elif ENGINE == "node":
+ return self.eval_node(script)
+ elif ENGINE == "rhino":
+ return self.eval_rhino(script)
+ else:
+ results = []
+ if PYV8:
+ res = self.eval_pyv8(script)
+ print "PyV8:", res
+ results.append(res)
+ if JS:
+ res = self.eval_js(script)
+ print "JS:", res
+ results.append(res)
+ if NODE:
+ res = self.eval_node(script)
+ print "NODE:", res
+ results.append(res)
+ if RHINO:
+ res = self.eval_rhino(script)
+ print "Rhino:", res
+ results.append(res)
+
+ warning = False
+ for x in results:
+ for y in results:
+ if x != y:
+ warning = True
+
+ if warning: print "### WARNING ###: Different results"
+
+ return results[0]
+
+ def eval_pyv8(self, script):
+ rt = PyV8.JSContext()
+ rt.enter()
+ return rt.eval(script)
+
+ def eval_js(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["js", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_node(self, script):
+ script = "console.log(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["node", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_rhino(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res.decode("utf8").encode("ISO-8859-1")
+
+ def error(self):
+ return _("No js engine detected, please install either Spidermonkey, ossp-js, pyv8, nodejs or rhino")
+
+if __name__ == "__main__":
+ js = JsEngine()
+ js.set_debug(True)
+
+ test = u'"ü"+"ä"'
+ js.eval(test) \ No newline at end of file
diff --git a/pyload/utils/__init__.py b/pyload/utils/__init__.py
new file mode 100644
index 000000000..ca00f9abe
--- /dev/null
+++ b/pyload/utils/__init__.py
@@ -0,0 +1,253 @@
+# -*- coding: utf-8 -*-
+
+""" Store all usefull functions here """
+
+import os
+import time
+import re
+from string import maketrans
+from itertools import islice
+from htmlentitydefs import name2codepoint
+
+# abstraction layer for json operations
+try: # since python 2.6
+ import json
+except ImportError: #use system simplejson if available
+ import simplejson as json
+
+json_loads = json.loads
+json_dumps = json.dumps
+
+def decode(string):
+ """ decode string with utf if possible """
+ try:
+ if type(string) == str:
+ return string.decode("utf8", "replace")
+ else:
+ return string
+ except:
+ return string
+
+def encode(string):
+ """ decode string to utf if possible """
+ try:
+ if type(string) == unicode:
+ return string.encode("utf8", "replace")
+ else:
+ return string
+ except:
+ return string
+
+def remove_chars(string, repl):
+ """ removes all chars in repl from string"""
+ if type(string) == str:
+ return string.translate(maketrans("", ""), repl)
+ elif type(string) == unicode:
+ return string.translate(dict([(ord(s), None) for s in repl]))
+
+
+def get_console_encoding(enc):
+ if os.name == "nt":
+ if enc == "cp65001": # aka UTF-8
+ print "WARNING: Windows codepage 65001 is not supported."
+ enc = "cp850"
+ else:
+ enc = "utf8"
+
+ return enc
+
+def compare_time(start, end):
+ start = map(int, start)
+ end = map(int, end)
+
+ if start == end: return True
+
+ now = list(time.localtime()[3:5])
+ if start < now < end: return True
+ elif start > end and (now > start or now < end): return True
+ elif start < now > end < start: return True
+ else: return False
+
+def to_list(value):
+ return value if type(value) == list else ([value] if value is not None else [])
+
+def formatSize(size):
+ print "Deprecated formatSize, use format_size"
+ return format_size(size)
+
+def format_size(bytes):
+ bytes = int(bytes)
+ steps = 0
+ sizes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB")
+ while bytes > 1000:
+ bytes /= 1024.0
+ steps += 1
+ return "%.2f %s" % (bytes, sizes[steps])
+
+def formatSpeed(speed):
+ print "Deprecated formatSpeed, use format_speed"
+ return format_speed(speed)
+
+def format_speed(speed):
+ return format_size(speed) + "/s"
+
+def format_time(seconds):
+ if seconds < 0: return "00:00:00"
+ hours, seconds = divmod(seconds, 3600)
+ minutes, seconds = divmod(seconds, 60)
+ return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
+
+def uniqify(seq): #by Dave Kirby
+ """ removes duplicates from list, preserve order """
+ seen = set()
+ return [x for x in seq if x not in seen and not seen.add(x)]
+
+def bits_set(bits, compare):
+ """ checks if all bits are set in compare, or bits is 0 """
+ return bits == (bits & compare)
+
+def parseFileSize(string, unit=None): #returns bytes
+ if not unit:
+ m = re.match(r"([\d.,]+) *([a-zA-Z]*)", string.strip().lower())
+ if m:
+ traffic = float(m.group(1).replace(",", "."))
+ unit = m.group(2)
+ else:
+ return 0
+ else:
+ if isinstance(string, basestring):
+ traffic = float(string.replace(",", "."))
+ else:
+ traffic = string
+
+ #ignore case
+ unit = unit.lower().strip()
+
+ if unit in ("gb", "gig", "gbyte", "gigabyte", "gib", "g"):
+ traffic *= 1 << 30
+ elif unit in ("mb", "mbyte", "megabyte", "mib", "m"):
+ traffic *= 1 << 20
+ elif unit in ("kb", "kib", "kilobyte", "kbyte", "k"):
+ traffic *= 1 << 10
+
+ return traffic
+
+
+def lock(func):
+ def new(*args, **kwargs):
+ #print "Handler: %s args: %s" % (func,args[1:])
+ args[0].lock.acquire()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def read_lock(func):
+ def new(*args, **kwargs):
+ args[0].lock.acquire(shared=True)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def chunks(iterable, size):
+ it = iter(iterable)
+ item = list(islice(it, size))
+ while item:
+ yield item
+ item = list(islice(it, size))
+
+
+def fixup(m):
+ text = m.group(0)
+ if text[:2] == "&#":
+ # character reference
+ try:
+ if text[:3] == "&#x":
+ return unichr(int(text[3:-1], 16))
+ else:
+ return unichr(int(text[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ name = text[1:-1]
+ text = unichr(name2codepoint[name])
+ except KeyError:
+ pass
+
+ return text # leave as is
+
+
+def has_method(obj, name):
+ """ checks if 'name' was defined in obj, (false if it was inhereted) """
+ return name in obj.__dict__
+
+def accumulate(it, inv_map=None):
+ """ accumulate (key, value) data to {value : [keylist]} dictionary """
+ if not inv_map:
+ inv_map = {}
+
+ for key, value in it:
+ if value in inv_map:
+ inv_map[value].append(key)
+ else:
+ inv_map[value] = [key]
+
+ return inv_map
+
+def to_string(value):
+ return str(value) if not isinstance(value, basestring) else value
+
+def to_int(string, default=0):
+ """ return int from string or default """
+ try:
+ return int(string)
+ except ValueError:
+ return default
+
+def from_string(value, typ=None):
+ """ cast value to given type, unicode for strings """
+
+ # value is no string
+ if not isinstance(value, basestring):
+ return value
+
+ value = decode(value)
+
+ if typ == "int":
+ return int(value)
+ elif typ == "bool":
+ return True if value.lower() in ("1", "true", "on", "an", "yes") else False
+ elif typ == "time":
+ if not value: value = "0:00"
+ if not ":" in value: value += ":00"
+ return value
+ else:
+ return value
+
+def get_index(l, value):
+ """ .index method that also works on tuple and python 2.5 """
+ for pos, t in enumerate(l):
+ if t == value:
+ return pos
+
+ # Matches behavior of list.index
+ raise ValueError("list.index(x): x not in list")
+
+def primary_uid(user):
+ """ Gets primary user id for user instances or ints """
+ if type(user) == int: return user
+ return user.primary if user else None
+
+def html_unescape(text):
+ """Removes HTML or XML character references and entities from a text string"""
+ return re.sub("&#?\w+;", fixup, text)
+
+if __name__ == "__main__":
+ print remove_chars("ab'cdgdsf''ds'", "'ghd")
diff --git a/pyload/utils/fs.py b/pyload/utils/fs.py
new file mode 100644
index 000000000..92cc605e7
--- /dev/null
+++ b/pyload/utils/fs.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+from os.path import join
+from . import decode, remove_chars
+
+# File System Encoding functions:
+# Use fs_encode before accessing files on disk, it will encode the string properly
+
+if sys.getfilesystemencoding().startswith('ANSI'):
+ def fs_encode(string):
+ if type(string) == unicode:
+ return string.encode('utf8')
+ return string
+
+ fs_decode = decode #decode utf8
+
+else:
+ fs_encode = fs_decode = lambda x: x # do nothing
+
+# FS utilities
+def chmod(path, mode):
+ try:
+ return os.chmod(fs_encode(path), mode)
+ except :
+ pass
+
+def dirname(path):
+ return fs_decode(os.path.dirname(fs_encode(path)))
+
+def abspath(path):
+ return fs_decode(os.path.abspath(fs_encode(path)))
+
+def chown(path, uid, gid):
+ return os.chown(fs_encode(path), uid, gid)
+
+def remove(path):
+ return os.remove(fs_encode(path))
+
+def exists(path):
+ return os.path.exists(fs_encode(path))
+
+def makedirs(path, mode=0755):
+ return os.makedirs(fs_encode(path), mode)
+
+# fs_decode?
+def listdir(path):
+ return [fs_decode(x) for x in os.listdir(fs_encode(path))]
+
+def save_filename(name):
+ #remove some chars
+ if os.name == 'nt':
+ return remove_chars(name, '/\\?%*:|"<>,')
+ else:
+ return remove_chars(name, '/\\"')
+
+def stat(name):
+ return os.stat(fs_encode(name))
+
+def save_join(*args):
+ """ joins a path, encoding aware """
+ return fs_encode(join(*[x if type(x) == unicode else decode(x) for x in args]))
+
+def free_space(folder):
+ folder = fs_encode(folder)
+
+ if os.name == "nt":
+ import ctypes
+
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ from os import statvfs
+
+ s = statvfs(folder)
+ return s.f_frsize * s.f_bavail
diff --git a/pyload/utils/json_layer.py b/pyload/utils/json_layer.py
new file mode 100644
index 000000000..cf9743603
--- /dev/null
+++ b/pyload/utils/json_layer.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# abstraction layer for json operations
+
+print ".json_layer is deprecated, use .json instead"
+
+try: # since python 2.6
+ import json
+ from json import loads as json_loads
+ from json import dumps as json_dumps
+except ImportError: #use system simplejson if available
+ import simplejson as json
+ from simplejson import loads as json_loads
+ from simplejson import dumps as json_dumps
diff --git a/pyload/utils/packagetools.py b/pyload/utils/packagetools.py
new file mode 100644
index 000000000..791a46d51
--- /dev/null
+++ b/pyload/utils/packagetools.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# JDownloader/src/jd/controlling/LinkGrabberPackager.java
+
+import re
+from urlparse import urlparse
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match"""
+ for patternlist in args:
+ for pattern in patternlist:
+ r = pattern.search(string)
+ if r is not None:
+ name = r.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapped to data lists (eg. urls)
+ """
+ packs = {}
+
+ endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
+
+ rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
+ re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
+ re.compile("(.*)\\.rar$", re.I),
+ re.compile("(.*)\\.r\\d+$", re.I),
+ re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
+
+ zipPats = [re.compile("(.*)\\.zip$", re.I),
+ re.compile("(.*)\\.z\\d+$", re.I),
+ re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
+ re.compile("(.*)\\.a.$", re.I)]
+
+ ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
+ re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
+
+ iszPats = [re.compile("(.*)\\.isz$", re.I),
+ re.compile("(.*)\\.i\\d{2}$", re.I)]
+
+ pat1 = re.compile("(\\.?CD\\d+)", re.I)
+ pat2 = re.compile("(\\.?part\\d+)", re.I)
+
+ pat3 = re.compile("(.+)[\\.\\-_]+$")
+ pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ #check if an already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found: continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ r = pat4.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # remove part and cd pattern
+ r = pat1.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ r = pat2.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ r = pat3.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ # fallback: package by hoster
+ if not name:
+ name = urlparse(file).hostname
+ if name: name = name.replace("www.", "")
+
+ # fallback : default name
+ if not name:
+ name = "unknown"
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
+
+
+if __name__ == "__main__":
+ from os.path import join
+ from pprint import pprint
+
+ f = open(join("..", "..", "testlinks2.txt"), "rb")
+ urls = [(x.strip(), x.strip()) for x in f.readlines() if x.strip()]
+ f.close()
+
+ print "Having %d urls." % len(urls)
+
+ packs = parseNames(urls)
+
+ pprint(packs)
+
+ print "Got %d urls." % sum([len(x) for x in packs.itervalues()])
diff --git a/pyload/utils/pylgettext.py b/pyload/utils/pylgettext.py
new file mode 100644
index 000000000..fb36fecee
--- /dev/null
+++ b/pyload/utils/pylgettext.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+def setpaths(pathlist):
+ global _searchdirs
+ if isinstance(pathlist, list):
+ _searchdirs = pathlist
+ else:
+ _searchdirs = list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ if _searchdirs is not None:
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = list()
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+#Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find