summaryrefslogtreecommitdiffstats
path: root/module/utils
diff options
context:
space:
mode:
Diffstat (limited to 'module/utils')
-rw-r--r--module/utils/ImportDebugger.py19
-rw-r--r--module/utils/JsEngine.py195
-rw-r--r--module/utils/json_layer.py15
-rw-r--r--module/utils/packagetools.py155
-rw-r--r--module/utils/pylgettext.py61
5 files changed, 445 insertions, 0 deletions
diff --git a/module/utils/ImportDebugger.py b/module/utils/ImportDebugger.py
new file mode 100644
index 000000000..a997f7b0c
--- /dev/null
+++ b/module/utils/ImportDebugger.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+import sys
+
+class ImportDebugger(object):
+
+ def __init__(self):
+ self.imported = {}
+
+ def find_module(self, name, path=None):
+
+ if name not in self.imported:
+ self.imported[name] = 0
+
+ self.imported[name] += 1
+
+ print name, path
+
+sys.meta_path.append(ImportDebugger()) \ No newline at end of file
diff --git a/module/utils/JsEngine.py b/module/utils/JsEngine.py
new file mode 100644
index 000000000..ef7494d16
--- /dev/null
+++ b/module/utils/JsEngine.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from imp import find_module
+from os.path import join, exists
+from urllib import quote
+
+
+ENGINE = ""
+
+DEBUG = False
+JS = False
+PYV8 = False
+NODE = False
+RHINO = False
+
+# TODO: Refactor + clean up this class
+
+if not ENGINE:
+ try:
+ import subprocess
+
+ subprocess.Popen(["js", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["js", "-e", "print(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "js"
+ JS = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ find_module("PyV8")
+ ENGINE = "pyv8"
+ PYV8 = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ import subprocess
+ subprocess.Popen(["node", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["node", "-e", "console.log(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "node"
+ NODE = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ path = "" #path where to find rhino
+
+ if exists("/usr/share/java/js.jar"):
+ path = "/usr/share/java/js.jar"
+ elif exists("js.jar"):
+ path = "js.jar"
+ elif exists(join(pypath, "js.jar")): #may raises an exception, but js.jar wasnt found anyway
+ path = join(pypath, "js.jar")
+
+ if not path:
+ raise Exception
+
+ import subprocess
+
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", "print(23+19)"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "rhino"
+ RHINO = True
+ except:
+ pass
+
+class JsEngine():
+ def __init__(self):
+ self.engine = ENGINE
+ self.init = False
+
+ def __nonzero__(self):
+ return False if not ENGINE else True
+
+ def set_debug(self, value):
+ global DEBUG
+ DEBUG = value
+
+ def eval(self, script):
+ if not self.init:
+ if ENGINE == "pyv8" or (DEBUG and PYV8):
+ import PyV8
+ global PyV8
+
+ self.init = True
+
+ if type(script) == unicode:
+ script = script.encode("utf8")
+
+ if not ENGINE:
+ raise Exception("No JS Engine")
+
+ if not DEBUG:
+ if ENGINE == "pyv8":
+ return self.eval_pyv8(script)
+ elif ENGINE == "js":
+ return self.eval_js(script)
+ elif ENGINE == "node":
+ return self.eval_node(script)
+ elif ENGINE == "rhino":
+ return self.eval_rhino(script)
+ else:
+ results = []
+ if PYV8:
+ res = self.eval_pyv8(script)
+ print "PyV8:", res
+ results.append(res)
+ if JS:
+ res = self.eval_js(script)
+ print "JS:", res
+ results.append(res)
+ if NODE:
+ res = self.eval_node(script)
+ print "NODE:", res
+ results.append(res)
+ if RHINO:
+ res = self.eval_rhino(script)
+ print "Rhino:", res
+ results.append(res)
+
+ warning = False
+ for x in results:
+ for y in results:
+ if x != y:
+ warning = True
+
+ if warning: print "### WARNING ###: Different results"
+
+ return results[0]
+
+ def eval_pyv8(self, script):
+ rt = PyV8.JSContext()
+ rt.enter()
+ return rt.eval(script)
+
+ def eval_js(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["js", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_node(self, script):
+ script = "console.log(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["node", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_rhino(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res.decode("utf8").encode("ISO-8859-1")
+
+ def error(self):
+ return _("No js engine detected, please install either Spidermonkey, ossp-js, pyv8, nodejs or rhino")
+
+if __name__ == "__main__":
+ js = JsEngine()
+ js.set_debug(True)
+
+ test = u'"ü"+"ä"'
+ js.eval(test) \ No newline at end of file
diff --git a/module/utils/json_layer.py b/module/utils/json_layer.py
new file mode 100644
index 000000000..cf9743603
--- /dev/null
+++ b/module/utils/json_layer.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# abstraction layer for json operations
+
+print ".json_layer is deprecated, use .json instead"
+
+try: # since python 2.6
+ import json
+ from json import loads as json_loads
+ from json import dumps as json_dumps
+except ImportError: #use system simplejson if available
+ import simplejson as json
+ from simplejson import loads as json_loads
+ from simplejson import dumps as json_dumps
diff --git a/module/utils/packagetools.py b/module/utils/packagetools.py
new file mode 100644
index 000000000..791a46d51
--- /dev/null
+++ b/module/utils/packagetools.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# JDownloader/src/jd/controlling/LinkGrabberPackager.java
+
+import re
+from urlparse import urlparse
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match"""
+ for patternlist in args:
+ for pattern in patternlist:
+ r = pattern.search(string)
+ if r is not None:
+ name = r.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapped to data lists (eg. urls)
+ """
+ packs = {}
+
+ endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
+
+ rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
+ re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
+ re.compile("(.*)\\.rar$", re.I),
+ re.compile("(.*)\\.r\\d+$", re.I),
+ re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
+
+ zipPats = [re.compile("(.*)\\.zip$", re.I),
+ re.compile("(.*)\\.z\\d+$", re.I),
+ re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
+ re.compile("(.*)\\.a.$", re.I)]
+
+ ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
+ re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
+
+ iszPats = [re.compile("(.*)\\.isz$", re.I),
+ re.compile("(.*)\\.i\\d{2}$", re.I)]
+
+ pat1 = re.compile("(\\.?CD\\d+)", re.I)
+ pat2 = re.compile("(\\.?part\\d+)", re.I)
+
+ pat3 = re.compile("(.+)[\\.\\-_]+$")
+ pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ #check if an already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found: continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ r = pat4.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # remove part and cd pattern
+ r = pat1.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ r = pat2.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ r = pat3.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ # fallback: package by hoster
+ if not name:
+ name = urlparse(file).hostname
+ if name: name = name.replace("www.", "")
+
+ # fallback : default name
+ if not name:
+ name = "unknown"
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
+
+
+if __name__ == "__main__":
+ from os.path import join
+ from pprint import pprint
+
+ f = open(join("..", "..", "testlinks2.txt"), "rb")
+ urls = [(x.strip(), x.strip()) for x in f.readlines() if x.strip()]
+ f.close()
+
+ print "Having %d urls." % len(urls)
+
+ packs = parseNames(urls)
+
+ pprint(packs)
+
+ print "Got %d urls." % sum([len(x) for x in packs.itervalues()])
diff --git a/module/utils/pylgettext.py b/module/utils/pylgettext.py
new file mode 100644
index 000000000..fb36fecee
--- /dev/null
+++ b/module/utils/pylgettext.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+def setpaths(pathlist):
+ global _searchdirs
+ if isinstance(pathlist, list):
+ _searchdirs = pathlist
+ else:
+ _searchdirs = list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ if _searchdirs is not None:
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = list()
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+#Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find