summaryrefslogtreecommitdiffstats
path: root/pyload/utils
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/utils')
-rw-r--r--pyload/utils/JsEngine.py155
-rw-r--r--pyload/utils/__init__.py227
-rw-r--r--pyload/utils/packagetools.py136
-rw-r--r--pyload/utils/pylgettext.py60
4 files changed, 578 insertions, 0 deletions
diff --git a/pyload/utils/JsEngine.py b/pyload/utils/JsEngine.py
new file mode 100644
index 000000000..46789f64d
--- /dev/null
+++ b/pyload/utils/JsEngine.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from imp import find_module
+from os.path import join, exists
+from urllib import quote
+
+
+ENGINE = ""
+
+DEBUG = False
+JS = False
+PYV8 = False
+RHINO = False
+
+
+if not ENGINE:
+ try:
+ import subprocess
+
+ subprocess.Popen(["js", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["js", "-e", "print(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "js"
+ JS = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ find_module("PyV8")
+ ENGINE = "pyv8"
+ PYV8 = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ path = "" #path where to find rhino
+
+ if exists("/usr/share/java/js.jar"):
+ path = "/usr/share/java/js.jar"
+ elif exists("js.jar"):
+ path = "js.jar"
+ elif exists(join(pypath, "js.jar")): #may raises an exception, but js.jar wasnt found anyway
+ path = join(pypath, "js.jar")
+
+ if not path:
+ raise Exception
+
+ import subprocess
+
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", "print(23+19)"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "rhino"
+ RHINO = True
+ except:
+ pass
+
+class JsEngine:
+ def __init__(self):
+ self.engine = ENGINE
+ self.init = False
+
+ def __nonzero__(self):
+ return False if not ENGINE else True
+
+ def eval(self, script):
+ if not self.init:
+ if ENGINE == "pyv8" or (DEBUG and PYV8):
+ import PyV8
+ global PyV8
+
+ self.init = True
+
+ if type(script) == unicode:
+ script = script.encode("utf8")
+
+ if not ENGINE:
+ raise Exception("No JS Engine")
+
+ if not DEBUG:
+ if ENGINE == "pyv8":
+ return self.eval_pyv8(script)
+ elif ENGINE == "js":
+ return self.eval_js(script)
+ elif ENGINE == "rhino":
+ return self.eval_rhino(script)
+ else:
+ results = []
+ if PYV8:
+ res = self.eval_pyv8(script)
+ print "PyV8:", res
+ results.append(res)
+ if JS:
+ res = self.eval_js(script)
+ print "JS:", res
+ results.append(res)
+ if RHINO:
+ res = self.eval_rhino(script)
+ print "Rhino:", res
+ results.append(res)
+
+ warning = False
+ for x in results:
+ for y in results:
+ if x != y:
+ warning = True
+
+ if warning: print "### WARNING ###: Different results"
+
+ return results[0]
+
+ def eval_pyv8(self, script):
+ rt = PyV8.JSContext()
+ rt.enter()
+ return rt.eval(script)
+
+ def eval_js(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["js", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_rhino(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res.decode("utf8").encode("ISO-8859-1")
+
+ def error(self):
+ return _("No js engine detected, please install either Spidermonkey, ossp-js, pyv8 or rhino")
diff --git a/pyload/utils/__init__.py b/pyload/utils/__init__.py
new file mode 100644
index 000000000..b9eb8c88e
--- /dev/null
+++ b/pyload/utils/__init__.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+""" Store all useful functions here """
+
+import os
+import sys
+import time
+import re
+from os.path import join
+from string import maketrans
+from htmlentitydefs import name2codepoint
+
+
+def chmod(*args):
+ try:
+ os.chmod(*args)
+ except:
+ pass
+
+
+def decode(string):
+ """ decode string with utf if possible """
+ try:
+ return string.decode("utf8", "replace")
+ except:
+ return string
+
+
+def remove_chars(string, repl):
+ """ removes all chars in repl from string"""
+ if type(repl) == unicode:
+ for badc in list(repl):
+ string = string.replace(badc, "")
+ return string
+ else:
+ if type(string) == str:
+ return string.translate(maketrans("", ""), repl)
+ elif type(string) == unicode:
+ return string.translate(dict([(ord(s), None) for s in repl]))
+
+
+def safe_filename(name):
+ """ remove bad chars """
+ name = name.encode('ascii', 'replace') # Non-ASCII chars usually breaks file saving. Replacing.
+ if os.name == 'nt':
+ return remove_chars(name, u'\00\01\02\03\04\05\06\07\10\11\12\13\14\15\16\17\20\21\22\23\24\25\26\27\30\31\32'
+ u'\33\34\35\36\37/\\?%*:|"<>')
+ else:
+ return remove_chars(name, u'\0/\\"')
+
+#: Deprecated method
+def save_path(name):
+ return safe_filename(name)
+
+
+def safe_join(*args):
+ """ joins a path, encoding aware """
+ return fs_encode(join(*[x if type(x) == unicode else decode(x) for x in args]))
+
+#: Deprecated method
+def save_join(*args):
+ return safe_join(*args)
+
+
+# File System Encoding functions:
+# Use fs_encode before accesing files on disk, it will encode the string properly
+
+if sys.getfilesystemencoding().startswith('ANSI'):
+ def fs_encode(string):
+ try:
+ string = string.encode('utf-8')
+ finally:
+ return string
+
+ fs_decode = decode #decode utf8
+
+else:
+ fs_encode = fs_decode = lambda x: x # do nothing
+
+
+def get_console_encoding(enc):
+ if os.name == "nt":
+ if enc == "cp65001": # aka UTF-8
+ print "WARNING: Windows codepage 65001 is not supported."
+ enc = "cp850"
+ else:
+ enc = "utf8"
+
+ return enc
+
+
+def compare_time(start, end):
+ start = map(int, start)
+ end = map(int, end)
+
+ if start == end: return True
+
+ now = list(time.localtime()[3:5])
+ if start < now < end: return True
+ elif start > end and (now > start or now < end): return True
+ elif start < now > end < start: return True
+ else: return False
+
+
+def formatSize(size):
+ """formats size of bytes"""
+ size = int(size)
+ steps = 0
+ sizes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB")
+ while size > 1000:
+ size /= 1024.0
+ steps += 1
+ return "%.2f %s" % (size, sizes[steps])
+
+
+def formatSpeed(speed):
+ return formatSize(speed) + "/s"
+
+
+def freeSpace(folder):
+ if os.name == "nt":
+ import ctypes
+
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ s = os.statvfs(folder)
+ return s.f_bsize * s.f_bavail
+
+
+def fs_bsize(path):
+ """ get optimal file system buffer size (in bytes) for I/O calls """
+ path = fs_encode(path)
+
+ if os.name == "nt":
+ import ctypes
+
+ drive = "%s\\" % os.path.splitdrive(path)[0]
+ cluster_sectors, sector_size = ctypes.c_longlong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None)
+ return cluster_sectors * sector_size
+ else:
+ return os.statvfs(path).f_bsize
+
+
+def uniqify(seq): #: Originally by Dave Kirby
+ """ removes duplicates from list, preserve order """
+ seen = set()
+ seen_add = seen.add
+ return [x for x in seq if x not in seen and not seen_add(x)]
+
+
+def parseFileSize(string, unit=None): #returns bytes
+ if not unit:
+ m = re.match(r"([\d.,]+) *([a-zA-Z]*)", string.strip().lower())
+ if m:
+ traffic = float(m.group(1).replace(",", "."))
+ unit = m.group(2)
+ else:
+ return 0
+ else:
+ if isinstance(string, basestring):
+ traffic = float(string.replace(",", "."))
+ else:
+ traffic = string
+
+ #ignore case
+ unit = unit.lower().strip()
+
+ if unit in ("eb", "ebyte", "exabyte", "eib", "e"):
+ traffic *= 1 << 60
+ elif unit in ("pb", "pbyte", "petabyte", "pib", "p"):
+ traffic *= 1 << 50
+ elif unit in ("tb", "tbyte", "terabyte", "tib", "t"):
+ traffic *= 1 << 40
+ elif unit in ("gb", "gbyte", "gigabyte", "gib", "g", "gig"):
+ traffic *= 1 << 30
+ elif unit in ("mb", "mbyte", "megabyte", "mib", "m"):
+ traffic *= 1 << 20
+ elif unit in ("kb", "kbyte", "kilobyte", "kib", "k"):
+ traffic *= 1 << 10
+
+ return traffic
+
+
+def lock(func):
+ def new(*args):
+ #print "Handler: %s args: %s" % (func, args[1:])
+ args[0].lock.acquire()
+ try:
+ return func(*args)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+
+def fixup(m):
+ text = m.group(0)
+ if text[:2] == "&#":
+ # character reference
+ try:
+ if text[:3] == "&#x":
+ return unichr(int(text[3:-1], 16))
+ else:
+ return unichr(int(text[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ name = text[1:-1]
+ text = unichr(name2codepoint[name])
+ except KeyError:
+ pass
+
+ return text # leave as is
+
+
+def html_unescape(text):
+ """Removes HTML or XML character references and entities from a text string"""
+ return re.sub("&#?\w+;", fixup, text)
+
+
+def versiontuple(v): #: By kindall (http://stackoverflow.com/a/11887825)
+ return tuple(map(int, (v.split("."))))
diff --git a/pyload/utils/packagetools.py b/pyload/utils/packagetools.py
new file mode 100644
index 000000000..d5ab4d182
--- /dev/null
+++ b/pyload/utils/packagetools.py
@@ -0,0 +1,136 @@
+# JDownloader/src/jd/controlling/LinkGrabberPackager.java
+
+import re
+from urlparse import urlparse
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match"""
+ for patternlist in args:
+ for pattern in patternlist:
+ r = pattern.search(string)
+ if r is not None:
+ name = r.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapt to data lists (eg. urls)
+ """
+ packs = {}
+
+ endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
+
+ rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
+ re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
+ re.compile("(.*)\\.rar$", re.I),
+ re.compile("(.*)\\.r\\d+$", re.I),
+ re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
+
+ zipPats = [re.compile("(.*)\\.zip$", re.I),
+ re.compile("(.*)\\.z\\d+$", re.I),
+ re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
+ re.compile("(.*)\\.a.$", re.I)]
+
+ ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
+ re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
+
+ iszPats = [re.compile("(.*)\\.isz$", re.I),
+ re.compile("(.*)\\.i\\d{2}$", re.I)]
+
+ pat1 = re.compile("(\\.?CD\\d+)", re.I)
+ pat2 = re.compile("(\\.?part\\d+)", re.I)
+
+ pat3 = re.compile("(.+)[\\.\\-_]+$")
+ pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ #check if a already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found: continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ r = pat4.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # remove part and cd pattern
+ r = pat1.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ r = pat2.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ r = pat3.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ # fallback: package by hoster
+ if not name:
+ name = urlparse(file).hostname
+ if name: name = name.replace("www.", "")
+
+ # fallback : default name
+ if not name:
+ name = "unknown"
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
diff --git a/pyload/utils/pylgettext.py b/pyload/utils/pylgettext.py
new file mode 100644
index 000000000..cab631cf4
--- /dev/null
+++ b/pyload/utils/pylgettext.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+def setpaths(pathlist):
+ global _searchdirs
+ if isinstance(pathlist, list):
+ _searchdirs = pathlist
+ else:
+ _searchdirs = list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ if _searchdirs is not None:
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = list()
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+#Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find