summaryrefslogtreecommitdiffstats
path: root/pyload/utils
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/utils')
-rw-r--r--pyload/utils/ImportDebugger.py19
-rw-r--r--pyload/utils/JsEngine.py195
-rw-r--r--pyload/utils/PluginLoader.py333
-rw-r--r--pyload/utils/__init__.py244
-rw-r--r--pyload/utils/filetypes.py24
-rw-r--r--pyload/utils/fs.py90
-rw-r--r--pyload/utils/json_layer.py15
-rw-r--r--pyload/utils/packagetools.py155
-rw-r--r--pyload/utils/pylgettext.py61
9 files changed, 1136 insertions, 0 deletions
diff --git a/pyload/utils/ImportDebugger.py b/pyload/utils/ImportDebugger.py
new file mode 100644
index 000000000..a997f7b0c
--- /dev/null
+++ b/pyload/utils/ImportDebugger.py
@@ -0,0 +1,19 @@
+# -*- coding: utf-8 -*-
+
+import sys
+
+class ImportDebugger(object):
+
+ def __init__(self):
+ self.imported = {}
+
+ def find_module(self, name, path=None):
+
+ if name not in self.imported:
+ self.imported[name] = 0
+
+ self.imported[name] += 1
+
+ print name, path
+
+sys.meta_path.append(ImportDebugger()) \ No newline at end of file
diff --git a/pyload/utils/JsEngine.py b/pyload/utils/JsEngine.py
new file mode 100644
index 000000000..3318ffb2a
--- /dev/null
+++ b/pyload/utils/JsEngine.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: RaNaN
+"""
+
+from imp import find_module
+from os.path import join, exists
+from urllib import quote
+
+
+ENGINE = ""
+
+DEBUG = False
+JS = False
+PYV8 = False
+NODE = False
+RHINO = False
+
+# TODO: Refactor + clean up this class
+
+if not ENGINE:
+ try:
+ import subprocess
+
+ subprocess.Popen(["js", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["js", "-e", "print(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "js"
+ JS = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ import PyV8
+ ENGINE = "pyv8"
+ PYV8 = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ import subprocess
+ subprocess.Popen(["node", "-v"], bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
+ p = subprocess.Popen(["node", "-e", "console.log(23+19)"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "node"
+ NODE = True
+ except:
+ pass
+
+if not ENGINE or DEBUG:
+ try:
+ path = "" #path where to find rhino
+
+ if exists("/usr/share/java/js.jar"):
+ path = "/usr/share/java/js.jar"
+ elif exists("js.jar"):
+ path = "js.jar"
+ elif exists(join(pypath, "js.jar")): #may raises an exception, but js.jar wasnt found anyway
+ path = join(pypath, "js.jar")
+
+ if not path:
+ raise Exception
+
+ import subprocess
+
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", "print(23+19)"],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ #integrity check
+ if out.strip() == "42":
+ ENGINE = "rhino"
+ RHINO = True
+ except:
+ pass
+
+class JsEngine():
+ def __init__(self):
+ self.engine = ENGINE
+ self.init = False
+
+ def __nonzero__(self):
+ return False if not ENGINE else True
+
+ def set_debug(self, value):
+ global DEBUG
+ DEBUG = value
+
+ def eval(self, script):
+ if not self.init:
+ if ENGINE == "pyv8" or (DEBUG and PYV8):
+ import PyV8
+ global PyV8
+
+ self.init = True
+
+ if type(script) == unicode:
+ script = script.encode("utf8")
+
+ if not ENGINE:
+ raise Exception("No JS Engine")
+
+ if not DEBUG:
+ if ENGINE == "pyv8":
+ return self.eval_pyv8(script)
+ elif ENGINE == "js":
+ return self.eval_js(script)
+ elif ENGINE == "node":
+ return self.eval_node(script)
+ elif ENGINE == "rhino":
+ return self.eval_rhino(script)
+ else:
+ results = []
+ if PYV8:
+ res = self.eval_pyv8(script)
+ print "PyV8:", res
+ results.append(res)
+ if JS:
+ res = self.eval_js(script)
+ print "JS:", res
+ results.append(res)
+ if NODE:
+ res = self.eval_node(script)
+ print "NODE:", res
+ results.append(res)
+ if RHINO:
+ res = self.eval_rhino(script)
+ print "Rhino:", res
+ results.append(res)
+
+ warning = False
+ for x in results:
+ for y in results:
+ if x != y:
+ warning = True
+
+ if warning: print "### WARNING ###: Different results"
+
+ return results[0]
+
+ def eval_pyv8(self, script):
+ rt = PyV8.JSContext()
+ rt.enter()
+ return rt.eval(script)
+
+ def eval_js(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["js", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_node(self, script):
+ script = "console.log(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["node", "-e", script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res
+
+ def eval_rhino(self, script):
+ script = "print(eval(unescape('%s')))" % quote(script)
+ p = subprocess.Popen(["java", "-cp", path, "org.mozilla.javascript.tools.shell.Main", "-e", script],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT, bufsize=-1)
+ out, err = p.communicate()
+ res = out.strip()
+ return res.decode("utf8").encode("ISO-8859-1")
+
+ def error(self):
+ return _("No js engine detected, please install either Spidermonkey, ossp-js, pyv8, nodejs or rhino")
+
+if __name__ == "__main__":
+ js = JsEngine()
+ js.set_debug(True)
+
+ test = u'"ü"+"ä"'
+ js.eval(test) \ No newline at end of file
diff --git a/pyload/utils/PluginLoader.py b/pyload/utils/PluginLoader.py
new file mode 100644
index 000000000..038ac9b23
--- /dev/null
+++ b/pyload/utils/PluginLoader.py
@@ -0,0 +1,333 @@
+###############################################################################
+# Copyright(c) 2008-2013 pyLoad Team
+# http://www.pyload.org
+#
+# This file is part of pyLoad.
+# pyLoad is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# Subjected to the terms and conditions in LICENSE
+#
+# @author: RaNaN
+###############################################################################
+
+import re
+
+from os import listdir, makedirs
+from os.path import isfile, join, exists, basename
+from sys import version_info
+from time import time
+from collections import defaultdict
+from logging import getLogger
+
+from pyload.lib.SafeEval import const_eval as literal_eval
+from pyload.plugins.Base import Base
+
+from new_collections import namedtuple
+
+PluginTuple = namedtuple("PluginTuple", "version re deps category user path")
+
+
+class BaseAttributes(defaultdict):
+ """ Dictionary that loads defaults values from Base object """
+
+ def __missing__(self, key):
+ attr = "__%s__" % key
+ if not hasattr(Base, attr):
+ return defaultdict.__missing__(self, key)
+
+ return getattr(Base, attr)
+
+class LoaderFactory:
+ """ Container for multiple plugin loaders """
+
+ def __init__(self, *loader):
+ self.loader = list(loader)
+
+ def __iter__(self):
+ return self.loader.__iter__()
+
+
+ def checkVersions(self):
+ """ Reduces every plugin loader to the globally newest version.
+ Afterwards every plugin is unique across all available loader """
+ for plugin_type in self.loader[0].iterTypes():
+ for loader in self.loader:
+ # iterate all plugins
+ for plugin, info in loader.getPlugins(plugin_type).iteritems():
+ # now iterate all other loaders
+ for l2 in self.loader:
+ if l2 is not loader:
+ l2.removePlugin(plugin_type, plugin, info.version)
+
+ def getPlugin(self, plugin, name):
+ """ retrieve a plugin from an available loader """
+ for loader in self.loader:
+ if loader.hasPlugin(plugin, name):
+ return loader.getPlugin(plugin, name)
+
+
+class PluginLoader:
+ """
+ Class to provide and load plugins from the file-system
+ """
+ TYPES = ("crypter", "hoster", "accounts", "addons", "network", "internal")
+
+ BUILTIN = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s*(True|False|None|[0-9x.]+)', re.I)
+ SINGLE = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s*(?:r|u|_)?((?:(?<!")"(?!")|\').*(?:(?<!")"(?!")|\'))',
+ re.I)
+ # finds the beginning of a expression that could span multiple lines
+ MULTI = re.compile(r'__(?P<attr>[a-z0-9_]+)__\s*=\s*(\(|\{|\[|"{3})',re.I)
+
+ # closing symbols
+ MULTI_MATCH = {
+ "{": "}",
+ "(": ")",
+ "[": "]",
+ '"""': '"""'
+ }
+
+ NO_MATCH = re.compile(r'^no_match$')
+
+ def __init__(self, path, package, config):
+ self.path = path
+ self.package = package
+ self.config = config
+ self.log = getLogger("log")
+ self.plugins = {}
+
+ self.createIndex()
+
+ def logDebug(self, plugin, name, msg):
+ self.log.debug("Plugin %s | %s: %s" % (plugin, name, msg))
+
+ def createIndex(self):
+ """create information for all plugins available"""
+
+ if not exists(self.path):
+ makedirs(self.path)
+ if not exists(join(self.path, "__init__.py")):
+ f = open(join(self.path, "__init__.py"), "wb")
+ f.close()
+
+ a = time()
+ for plugin in self.TYPES:
+ self.plugins[plugin] = self.parse(plugin)
+
+ self.log.debug("Created index of plugins for %s in %.2f ms", self.path, (time() - a) * 1000)
+
+ def parse(self, folder):
+ """ Analyze and parses all plugins in folder """
+ plugins = {}
+ pfolder = join(self.path, folder)
+ if not exists(pfolder):
+ makedirs(pfolder)
+ if not exists(join(pfolder, "__init__.py")):
+ f = open(join(pfolder, "__init__.py"), "wb")
+ f.close()
+
+ for f in listdir(pfolder):
+ if (isfile(join(pfolder, f)) and f.endswith(".py") or f.endswith("_25.pyc") or f.endswith(
+ "_26.pyc") or f.endswith("_27.pyc")) and not f.startswith("_"):
+ if f.endswith("_25.pyc") and version_info[0:2] != (2, 5):
+ continue
+ elif f.endswith("_26.pyc") and version_info[0:2] != (2, 6):
+ continue
+ elif f.endswith("_27.pyc") and version_info[0:2] != (2, 7):
+ continue
+
+ # replace suffix and version tag
+ name = f[:-3]
+ if name[-1] == ".": name = name[:-4]
+
+ plugin = self.parsePlugin(join(pfolder, f), folder, name)
+ if plugin:
+ plugins[name] = plugin
+
+ return plugins
+
+ def parseAttributes(self, filename, name, folder=""):
+ """ Parse attribute dict from plugin"""
+ data = open(filename, "rb")
+ content = data.read()
+ data.close()
+
+ attrs = BaseAttributes()
+ for m in self.BUILTIN.findall(content) + self.SINGLE.findall(content) + self.parseMultiLine(content):
+ #replace gettext function and eval result
+ try:
+ attrs[m[0]] = literal_eval(m[-1].replace("_(", "("))
+ except Exception, e:
+ self.logDebug(folder, name, "Error when parsing: %s" % m[-1])
+ self.log.debug(str(e))
+
+ if not hasattr(Base, "__%s__" % m[0]):
+ if m[0] != "type": #TODO remove type from all plugins, its not needed
+ self.logDebug(folder, name, "Unknown attribute '%s'" % m[0])
+
+ return attrs
+
+ def parseMultiLine(self, content):
+ # regexp is not enough to parse multi line statements
+ attrs = []
+ for m in self.MULTI.finditer(content):
+ attr = m.group(1)
+ char = m.group(2)
+ # the end char to search for
+ endchar = self.MULTI_MATCH[char]
+ size = len(endchar)
+ # save number of of occurred
+ stack = 0
+ endpos = m.start(2) - size
+ for i in xrange(m.end(2), len(content) - size + 1):
+ if content[i:i+size] == endchar:
+ # closing char seen and match now complete
+ if stack == 0:
+ endpos = i
+ break
+ else:
+ stack -= 1
+ elif content[i:i+size] == char:
+ stack += 1
+
+ # in case the end was not found match will be empty
+ attrs.append((attr, content[m.start(2): endpos + size]))
+
+ return attrs
+
+
+ def parsePlugin(self, filename, folder, name):
+ """ Parses a plugin from disk, folder means plugin type in this context. Also sets config.
+
+ :arg home: dict with plugins, of which the found one will be matched against (according version)
+ :returns PluginTuple"""
+
+ attrs = self.parseAttributes(filename, name, folder)
+ if not attrs: return
+
+ version = 0
+ if "version" in attrs:
+ try:
+ version = float(attrs["version"])
+ except ValueError:
+ self.logDebug(folder, name, "Invalid version %s" % attrs["version"])
+ version = 9 #TODO remove when plugins are fixed, causing update loops
+ else:
+ self.logDebug(folder, name, "No version attribute")
+
+ if "pattern" in attrs and attrs["pattern"]:
+ try:
+ plugin_re = re.compile(attrs["pattern"], re.I)
+ except:
+ self.logDebug(folder, name, "Invalid regexp pattern '%s'" % attrs["pattern"])
+ plugin_re = self.NO_MATCH
+ else:
+ plugin_re = self.NO_MATCH
+
+ deps = attrs["dependencies"]
+ category = attrs["category"] if folder == "addons" else ""
+
+ # create plugin tuple
+ # user_context=True is the default for non addons plugins
+ plugin = PluginTuple(version, plugin_re, deps, category,
+ bool(folder != "addons" or attrs["user_context"]), filename)
+
+ # These have none or their own config
+ if folder in ("internal", "accounts", "network"):
+ return plugin
+
+ if folder == "addons" and "config" not in attrs and not attrs["internal"]:
+ attrs["config"] = (["activated", "bool", "Activated", False],)
+
+ if "config" in attrs and attrs["config"] is not None:
+ config = attrs["config"]
+ desc = attrs["description"]
+ expl = attrs["explanation"]
+
+ # Convert tuples to list
+ config = [list(x) for x in config]
+
+ if folder == "addons" and not attrs["internal"]:
+ for item in config:
+ if item[0] == "activated": break
+ else: # activated flag missing
+ config.insert(0, ("activated", "bool", "Activated", False))
+
+ try:
+ self.config.addConfigSection(name, name, desc, expl, config)
+ except:
+ self.logDebug(folder, name, "Invalid config %s" % config)
+
+ return plugin
+
+ def iterPlugins(self):
+ """ Iterates over all plugins returning (type, name, info) with info as PluginTuple """
+
+ for plugin, data in self.plugins.iteritems():
+ for name, info in data.iteritems():
+ yield plugin, name, info
+
+ def iterTypes(self):
+ """ Iterate over the available plugin types """
+
+ for plugin in self.plugins.iterkeys():
+ yield plugin
+
+ def hasPlugin(self, plugin, name):
+ """ Check if certain plugin is available """
+ return plugin in self.plugins and name in self.plugins[plugin]
+
+ def getPlugin(self, plugin, name):
+ """ Return plugin info for a single entity """
+ try:
+ return self.plugins[plugin][name]
+ except KeyError:
+ return None
+
+ def getPlugins(self, plugin):
+ """ Return all plugins of given plugin type """
+ return self.plugins[plugin]
+
+ def removePlugin(self, plugin, name, available_version=None):
+ """ Removes a plugin from the index.
+ Optionally only when its version is below or equal the available one
+ """
+ try:
+ if available_version is not None:
+ if self.plugins[plugin][name] <= available_version:
+ del self.plugins[plugin][name]
+ else:
+ del self.plugins[plugin][name]
+
+ # no errors are thrown if the plugin didn't existed
+ except KeyError:
+ return
+
+ def isUserPlugin(self, name):
+ """ Determine if given plugin name is enable for user_context in any plugin type """
+ for plugins in self.plugins:
+ if name in plugins and name[plugins].user:
+ return True
+
+ return False
+
+ def savePlugin(self, content):
+ """ Saves a plugin to disk """
+
+ def loadModule(self, plugin, name):
+ """ Returns loaded module for plugin
+
+ :param plugin: plugin type, subfolder of module.plugins
+ :raises Exception: Everything could go wrong, failures needs to be catched
+ """
+ plugins = self.plugins[plugin]
+ # convert path to python recognizable import
+ path = basename(plugins[name].path).replace(".pyc", "").replace(".py", "")
+ module = __import__(self.package + ".%s.%s" % (plugin, path), globals(), locals(), path)
+ return module
+
+ def loadAttributes(self, plugin, name):
+ """ Same as `parseAttributes` for already indexed plugins """
+ return self.parseAttributes(self.plugins[plugin][name].path, name, plugin) \ No newline at end of file
diff --git a/pyload/utils/__init__.py b/pyload/utils/__init__.py
new file mode 100644
index 000000000..577213dd1
--- /dev/null
+++ b/pyload/utils/__init__.py
@@ -0,0 +1,244 @@
+# -*- coding: utf-8 -*-
+
+""" Store all usefull functions here """
+
+import os
+import time
+import re
+from string import maketrans
+from itertools import islice
+from htmlentitydefs import name2codepoint
+
+# abstraction layer for json operations
+try: # since python 2.6
+ import json
+except ImportError: #use system simplejson if available
+ import simplejson as json
+
+json_loads = json.loads
+json_dumps = json.dumps
+
+def decode(string):
+ """ decode string to unicode with utf8 """
+ if type(string) == str:
+ return string.decode("utf8", "replace")
+ else:
+ return string
+
+def encode(string):
+ """ decode string to utf8 """
+ if type(string) == unicode:
+ return string.encode("utf8", "replace")
+ else:
+ return string
+
+
+def remove_chars(string, repl):
+ """ removes all chars in repl from string"""
+ if type(string) == str:
+ return string.translate(maketrans("", ""), repl)
+ elif type(string) == unicode:
+ return string.translate(dict([(ord(s), None) for s in repl]))
+
+
+def get_console_encoding(enc):
+ if os.name == "nt":
+ if enc == "cp65001": # aka UTF-8
+ print "WARNING: Windows codepage 65001 is not supported."
+ enc = "cp850"
+ else:
+ enc = "utf8"
+
+ return enc
+
+def compare_time(start, end):
+ start = map(int, start)
+ end = map(int, end)
+
+ if start == end: return True
+
+ now = list(time.localtime()[3:5])
+ if start < now < end: return True
+ elif start > end and (now > start or now < end): return True
+ elif start < now > end < start: return True
+ else: return False
+
+def to_list(value):
+ return value if type(value) == list else ([value] if value is not None else [])
+
+def formatSize(size):
+ print "Deprecated formatSize, use format_size"
+ return format_size(size)
+
+def format_size(bytes):
+ bytes = int(bytes)
+ steps = 0
+ sizes = ("B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB")
+ while bytes > 1000:
+ bytes /= 1024.0
+ steps += 1
+ return "%.2f %s" % (bytes, sizes[steps])
+
+def formatSpeed(speed):
+ print "Deprecated formatSpeed, use format_speed"
+ return format_speed(speed)
+
+def format_speed(speed):
+ return format_size(speed) + "/s"
+
+def format_time(seconds):
+ if seconds < 0: return "00:00:00"
+ hours, seconds = divmod(seconds, 3600)
+ minutes, seconds = divmod(seconds, 60)
+ return "%.2i:%.2i:%.2i" % (hours, minutes, seconds)
+
+def parse_time(timestamp, pattern):
+ """ Parse a string representing a time according to a pattern and
+ return a time in seconds suitable for an account plugin. """
+ return int(time.mktime(time.strptime(timestamp, pattern)))
+
+def parseFileSize(string, unit=None):
+ print "Deprecated parseFileSize, use parse_size"
+ return parse_size(string, unit)
+
+def parse_size(string, unit=None):
+ """ Parses file size from a string. Tries to parse unit if not given.
+
+ :return: size in bytes
+ """
+ if not unit:
+ m = re.match(r"([\d.,]+) *([a-zA-Z]*)", string.strip().lower())
+ if m:
+ traffic = float(m.group(1).replace(",", "."))
+ unit = m.group(2)
+ else:
+ return 0
+ else:
+ if isinstance(string, basestring):
+ traffic = float(string.replace(",", "."))
+ else:
+ traffic = string
+
+ #ignore case
+ unit = unit.lower().strip()
+
+ if unit in ("gb", "gig", "gbyte", "gigabyte", "gib", "g"):
+ traffic *= 1 << 30
+ elif unit in ("mb", "mbyte", "megabyte", "mib", "m"):
+ traffic *= 1 << 20
+ elif unit in ("kb", "kib", "kilobyte", "kbyte", "k"):
+ traffic *= 1 << 10
+
+ return traffic
+
+def uniqify(seq): #by Dave Kirby
+ """ removes duplicates from list, preserve order """
+ seen = set()
+ return [x for x in seq if x not in seen and not seen.add(x)]
+
+def bits_set(bits, compare):
+ """ checks if all bits are set in compare, or bits is 0 """
+ return bits == (bits & compare)
+
+def lock(func):
+ def new(*args, **kwargs):
+ #print "Handler: %s args: %s" % (func,args[1:])
+ args[0].lock.acquire()
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def read_lock(func):
+ def new(*args, **kwargs):
+ args[0].lock.acquire(shared=True)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+def chunks(iterable, size):
+ it = iter(iterable)
+ item = list(islice(it, size))
+ while item:
+ yield item
+ item = list(islice(it, size))
+
+
+def fixup(m):
+ text = m.group(0)
+ if text[:2] == "&#":
+ # character reference
+ try:
+ if text[:3] == "&#x":
+ return unichr(int(text[3:-1], 16))
+ else:
+ return unichr(int(text[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ name = text[1:-1]
+ text = unichr(name2codepoint[name])
+ except KeyError:
+ pass
+
+ return text # leave as is
+
+
+def has_method(obj, name):
+ """ checks if 'name' was defined in obj, (false if it was inhereted) """
+ return hasattr(obj, '__dict__') and name in obj.__dict__
+
+def accumulate(it, inv_map=None):
+ """ accumulate (key, value) data to {value : [keylist]} dictionary """
+ if inv_map is None:
+ inv_map = {}
+
+ for key, value in it:
+ if value in inv_map:
+ inv_map[value].append(key)
+ else:
+ inv_map[value] = [key]
+
+ return inv_map
+
+def to_string(value):
+ return str(value) if not isinstance(value, basestring) else value
+
+def to_bool(value):
+ if not isinstance(value, basestring): return True if value else False
+ return True if value.lower() in ("1", "true", "on", "an", "yes") else False
+
+def to_int(string, default=0):
+ """ return int from string or default """
+ try:
+ return int(string)
+ except ValueError:
+ return default
+
+def get_index(l, value):
+ """ .index method that also works on tuple and python 2.5 """
+ for pos, t in enumerate(l):
+ if t == value:
+ return pos
+
+ # Matches behavior of list.index
+ raise ValueError("list.index(x): x not in list")
+
+def primary_uid(user):
+ """ Gets primary user id for user instances or ints """
+ if type(user) == int: return user
+ return user.primary if user else None
+
+def html_unescape(text):
+ """Removes HTML or XML character references and entities from a text string"""
+ return re.sub("&#?\w+;", fixup, text)
+
+if __name__ == "__main__":
+ print remove_chars("ab'cdgdsf''ds'", "'ghd")
diff --git a/pyload/utils/filetypes.py b/pyload/utils/filetypes.py
new file mode 100644
index 000000000..ce5c8a0c5
--- /dev/null
+++ b/pyload/utils/filetypes.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+import re
+from pyload.Api import MediaType
+
+filetypes = {
+ MediaType.Audio: re.compile("\.(m3u|m4a|mp3|wav|wma|aac?|flac|midi|m4b)$", re.I),
+ MediaType.Image: re.compile("\.(jpe?g|bmp|png|gif|ico|tiff?|svg|psd)$", re.I),
+ MediaType.Video: re.compile("\.(3gp|flv|m4v|avi|mp4|mov|swf|vob|wmv|divx|mpe?g|rm|mkv)$", re.I),
+ MediaType.Document: re.compile("\.(epub|mobi|acsm|azw[0-9]|pdf|txt|md|abw|docx?|tex|odt|rtf||log)$", re.I),
+ MediaType.Archive: re.compile("\.(rar|r[0-9]+|7z|7z.[0-9]+|zip|gz|bzip2?|tar|lzma)$", re.I),
+ MediaType.Executable: re.compile("\.(jar|exe|dmg|sh|apk)$", re.I),
+}
+
+
+def guess_type(name):
+ for mt, regex in filetypes.iteritems():
+ if regex.search(name) is not None:
+ return mt
+
+ return MediaType.Other
+
+
+
diff --git a/pyload/utils/fs.py b/pyload/utils/fs.py
new file mode 100644
index 000000000..05e098e2a
--- /dev/null
+++ b/pyload/utils/fs.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+from os.path import join
+from . import decode, remove_chars
+
+# File System Encoding functions:
+# Use fs_encode before accessing files on disk, it will encode the string properly
+
+if sys.getfilesystemencoding().startswith('ANSI'):
+ def fs_encode(string):
+ if type(string) == unicode:
+ return string.encode('utf8')
+ else:
+ return string
+
+ fs_decode = decode #decode utf8
+
+else:
+ fs_encode = fs_decode = lambda x: x # do nothing
+
+# FS utilities
+def chmod(path, mode):
+ try:
+ return os.chmod(fs_encode(path), mode)
+ except :
+ pass
+
+def dirname(path):
+ return fs_decode(os.path.dirname(fs_encode(path)))
+
+def abspath(path):
+ return fs_decode(os.path.abspath(fs_encode(path)))
+
+def chown(path, uid, gid):
+ return os.chown(fs_encode(path), uid, gid)
+
+def remove(path):
+ return os.remove(fs_encode(path))
+
+def exists(path):
+ return os.path.exists(fs_encode(path))
+
+def makedirs(path, mode=0755):
+ return os.makedirs(fs_encode(path), mode)
+
+def listdir(path):
+ return [fs_decode(x) for x in os.listdir(fs_encode(path))]
+
+def save_filename(name):
+ #remove some chars
+ if os.name == 'nt':
+ return remove_chars(name, '/\\?%*:|"<>,')
+ else:
+ return remove_chars(name, '/\\"')
+
+def stat(name):
+ return os.stat(fs_encode(name))
+
+def save_join(*args):
+ """ joins a path, encoding aware """
+ return fs_encode(join(*[x if type(x) == unicode else decode(x) for x in args]))
+
+def free_space(folder):
+ folder = fs_encode(folder)
+
+ if os.name == "nt":
+ import ctypes
+
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ s = os.statvfs(folder)
+ return s.f_frsize * s.f_bavail
+
+def get_bsize(path):
+ """ get optimal file system buffer size (in bytes) for i/o calls """
+ path = fs_encode(path)
+
+ if os.name == "nt":
+ import ctypes
+
+ drive = "%s\\" % os.path.splitdrive(path)[0]
+ cluster_sectors, sector_size = ctypes.c_longlong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None)
+ return cluster_sectors * sector_size
+ else:
+ return os.statvfs(path).f_bsize
diff --git a/pyload/utils/json_layer.py b/pyload/utils/json_layer.py
new file mode 100644
index 000000000..cf9743603
--- /dev/null
+++ b/pyload/utils/json_layer.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+# abstraction layer for json operations
+
+print ".json_layer is deprecated, use .json instead"
+
+try: # since python 2.6
+ import json
+ from json import loads as json_loads
+ from json import dumps as json_dumps
+except ImportError: #use system simplejson if available
+ import simplejson as json
+ from simplejson import loads as json_loads
+ from simplejson import dumps as json_dumps
diff --git a/pyload/utils/packagetools.py b/pyload/utils/packagetools.py
new file mode 100644
index 000000000..02dfa4739
--- /dev/null
+++ b/pyload/utils/packagetools.py
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# JDownloader/src/jd/controlling/LinkGrabberPackager.java
+
+import re
+from urlparse import urlparse
+
+endings = "\\.(3gp|7zip|7z|abr|ac3|aiff|aifc|aif|ai|au|avi|bin|bz2|cbr|cbz|ccf|cue|cvd|chm|dta|deb|divx|djvu|dlc|dmg|doc|docx|dot|eps|exe|ff|flv|f4v|gsd|gif|gz|iwd|iso|ipsw|java|jar|jpg|jpeg|jdeatme|load|mws|mw|m4v|m4a|mkv|mp2|mp3|mp4|mov|movie|mpeg|mpe|mpg|msi|msu|msp|nfo|npk|oga|ogg|ogv|otrkey|pkg|png|pdf|pptx|ppt|pps|ppz|pot|psd|qt|rmvb|rm|rar|ram|ra|rev|rnd|r\\d+|rpm|run|rsdf|rtf|sh(!?tml)|srt|snd|sfv|swf|tar|tif|tiff|ts|txt|viv|vivo|vob|wav|wmv|xla|xls|xpi|zeno|zip|z\\d+|_[_a-z]{2}|\\d+$)"
+
+rarPats = [re.compile("(.*)(\\.|_|-)pa?r?t?\\.?[0-9]+.(rar|exe)$", re.I),
+ re.compile("(.*)(\\.|_|-)part\\.?[0]*[1].(rar|exe)$", re.I),
+ re.compile("(.*)\\.rar$", re.I),
+ re.compile("(.*)\\.r\\d+$", re.I),
+ re.compile("(.*)(\\.|_|-)\\d+$", re.I)]
+
+zipPats = [re.compile("(.*)\\.zip$", re.I),
+ re.compile("(.*)\\.z\\d+$", re.I),
+ re.compile("(?is).*\\.7z\\.[\\d]+$", re.I),
+ re.compile("(.*)\\.a.$", re.I)]
+
+ffsjPats = [re.compile("(.*)\\._((_[a-z])|([a-z]{2}))(\\.|$)"),
+ re.compile("(.*)(\\.|_|-)[\\d]+(" + endings + "$)", re.I)]
+
+iszPats = [re.compile("(.*)\\.isz$", re.I),
+ re.compile("(.*)\\.i\\d{2}$", re.I)]
+
+pat1 = re.compile("(\\.?CD\\d+)", re.I)
+pat2 = re.compile("(\\.?part\\d+)", re.I)
+
+pat3 = re.compile("(.+)[\\.\\-_]+$")
+pat4 = re.compile("(.+)\\.\\d+\\.xtm$")
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match"""
+ for patternlist in args:
+ for pattern in patternlist:
+ r = pattern.search(string)
+ if r is not None:
+ name = r.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapped to data lists (eg. urls)
+ """
+ packs = {}
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ #check if an already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found: continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ r = pat4.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # remove part and cd pattern
+ r = pat1.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ r = pat2.search(name)
+ if r is not None:
+ name = name.replace(r.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ r = pat3.search(name)
+ if r is not None:
+ name = r.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ # fallback: package by hoster
+ if not name:
+ name = urlparse(file).hostname
+ if name: name = name.replace("www.", "")
+
+ # fallback : default name
+ if not name:
+ name = _("Unnamed package")
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
+
+
+if __name__ == "__main__":
+ from os.path import join
+ from pprint import pprint
+
+ f = open(join("..", "..", "testlinks2.txt"), "rb")
+ urls = [(x.strip(), x.strip()) for x in f.readlines() if x.strip()]
+ f.close()
+
+ print "Having %d urls." % len(urls)
+
+ packs = parseNames(urls)
+
+ pprint(packs)
+
+ print "Got %d urls." % sum([len(x) for x in packs.itervalues()])
diff --git a/pyload/utils/pylgettext.py b/pyload/utils/pylgettext.py
new file mode 100644
index 000000000..fb36fecee
--- /dev/null
+++ b/pyload/utils/pylgettext.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+def setpaths(pathlist):
+ global _searchdirs
+ if isinstance(pathlist, list):
+ _searchdirs = pathlist
+ else:
+ _searchdirs = list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ if _searchdirs is not None:
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = list()
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+#Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find