summaryrefslogtreecommitdiffstats
path: root/pyload/utils
diff options
context:
space:
mode:
Diffstat (limited to 'pyload/utils')
-rw-r--r--pyload/utils/__init__.py263
-rw-r--r--pyload/utils/filters.py68
-rw-r--r--pyload/utils/middlewares.py144
-rw-r--r--pyload/utils/packagetools.py154
-rw-r--r--pyload/utils/printer.py17
-rw-r--r--pyload/utils/pylgettext.py57
6 files changed, 703 insertions, 0 deletions
diff --git a/pyload/utils/__init__.py b/pyload/utils/__init__.py
new file mode 100644
index 000000000..3c525caeb
--- /dev/null
+++ b/pyload/utils/__init__.py
@@ -0,0 +1,263 @@
+# -*- coding: utf-8 -*-
+# @author: vuolter
+
+""" Store all useful functions here """
+
+import bitmath
+import htmlentitydefs
+import os
+import re
+import string
+import sys
+import time
+import urllib
+
+import pyload.utils.pylgettext as gettext
+
+# abstraction layer for json operations
+from bottle import json_loads
+
+
+def os.chmod(*args):
+ try:
+ os.chmod(*args)
+ except Exception:
+ pass
+
+
+def decode(string):
+ """ Decode string to unicode with utf8 """
+ if type(string) == str:
+ return string.decode("utf8", "replace")
+ else:
+ return string
+
+
+def encode(string):
+ """ Decode string to utf8 """
+ if type(string) == unicode:
+ return string.encode("utf8", "replace")
+ else:
+ return string
+
+
+def remove_chars(string, repl):
+ """ removes all chars in repl from string"""
+ if type(repl) == unicode:
+ for badc in list(repl):
+ string = string.replace(badc, "")
+ return string
+ else:
+ if type(string) == str:
+ return string.translate(string.maketrans("", ""), repl)
+ elif type(string) == unicode:
+ return string.translate(dict((ord(s), None) for s in repl))
+
+
+def safe_filename(name):
+ """ remove bad chars """
+ name = urllib.unquote(name).encode('ascii', 'replace') #: Non-ASCII chars usually breaks file saving. Replacing.
+ if os.name == 'nt':
+ return remove_chars(name, u'\00\01\02\03\04\05\06\07\10\11\12\13\14\15\16\17\20\21\22\23\24\25\26\27\30\31\32'
+ u'\33\34\35\36\37/?%*|"<>')
+ else:
+ return remove_chars(name, u'\0\\"')
+
+
+#: Deprecated method
+def save_path(name):
+ return safe_filename(name)
+
+
+def fs_join(*args):
+ """ joins a path, encoding aware """
+ return fs_encode(os.path.join(*[x if type(x) == unicode else decode(x) for x in args]))
+
+
+#: Deprecated method
+def save_join(*args):
+ return fs_join(*args)
+
+
+# File System Encoding functions:
+# Use fs_encode before accesing files on disk, it will encode the string properly
+
+if sys.getfilesystemencoding().startswith('ANSI'):
+
+ def fs_encode(string):
+ return safe_filename(encode(string))
+
+ fs_decode = decode #: decode utf8
+
+else:
+ fs_encode = fs_decode = lambda x: x #: do nothing
+
+
+def get_console_encoding(enc):
+ if os.name == "nt":
+ if enc == "cp65001": #: aka UTF-8
+ print "WARNING: Windows codepage 65001 is not supported."
+ enc = "cp850"
+ else:
+ enc = "utf8"
+
+ return enc
+
+
+def compare_time(start, end):
+ start = map(int, start)
+ end = map(int, end)
+
+ if start == end:
+ return True
+
+ now = list(time.localtime()[3:5])
+ if start < now < end:
+ return True
+ elif start > end and (now > start or now < end):
+ return True
+ elif start < now > end < start:
+ return True
+ return False
+
+
+def formatSize(size):
+ """formats size of bytes"""
+ return bitmath.Byte(int(size)).best_prefix()
+
+
+def formatSpeed(speed):
+ return formatSize(speed) + "/s"
+
+
+def freeSpace(folder):
+ if os.name == "nt":
+ import ctypes
+
+ free_bytes = ctypes.c_ulonglong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(folder), None, None, ctypes.pointer(free_bytes))
+ return free_bytes.value
+ else:
+ s = os.statvfs(folder)
+ return s.f_frsize * s.f_bavail
+
+
+def fs_bsize(path):
+ """ get optimal file system buffer size (in bytes) for I/O calls """
+ path = fs_encode(path)
+
+ if os.name == "nt":
+ import ctypes
+
+ drive = "%s\\" % os.path.splitdrive(path)[0]
+ cluster_sectors, sector_size = ctypes.c_longlong(0)
+ ctypes.windll.kernel32.GetDiskFreeSpaceW(ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors), ctypes.pointer(sector_size), None, None)
+ return cluster_sectors * sector_size
+ else:
+ return os.statvfs(path).f_frsize
+
+
+def uniqify(seq): #: Originally by Dave Kirby
+ """ Remove duplicates from list preserving order """
+ seen = set()
+ seen_add = seen.add
+ return [x for x in seq if x not in seen and not seen_add(x)]
+
+
+def parseFileSize(string, unit=None): #: returns bytes
+ if not unit:
+ m = re.match(r"([\d.,]+) *([a-zA-Z]*)", string.strip().lower())
+ if m:
+ traffic = float(m.group(1).replace(",", "."))
+ unit = m.group(2)
+ else:
+ return 0
+ else:
+ if isinstance(string, basestring):
+ traffic = float(string.replace(",", "."))
+ else:
+ traffic = string
+
+ # ignore case
+ unit = unit.lower().strip()
+
+ if unit in ("eb", "ebyte", "exabyte", "eib", "e"):
+ traffic *= 1 << 60
+ elif unit in ("pb", "pbyte", "petabyte", "pib", "p"):
+ traffic *= 1 << 50
+ elif unit in ("tb", "tbyte", "terabyte", "tib", "t"):
+ traffic *= 1 << 40
+ elif unit in ("gb", "gbyte", "gigabyte", "gib", "g", "gig"):
+ traffic *= 1 << 30
+ elif unit in ("mb", "mbyte", "megabyte", "mib", "m"):
+ traffic *= 1 << 20
+ elif unit in ("kb", "kbyte", "kilobyte", "kib", "k"):
+ traffic *= 1 << 10
+
+ return traffic
+
+
+def lock(func):
+
+ def new(*args):
+ # print "Handler: %s args: %s" % (func, args[1:])
+ args[0].lock.acquire()
+ try:
+ return func(*args)
+ finally:
+ args[0].lock.release()
+
+ return new
+
+
+def fixup(m):
+ text = m.group(0)
+ if text[:2] == "&#":
+ # character reference
+ try:
+ if text[:3] == "&#x":
+ return unichr(int(text[3:-1], 16))
+ else:
+ return unichr(int(text[2:-1]))
+ except ValueError:
+ pass
+ else:
+ # named entity
+ try:
+ name = text[1:-1]
+ text = unichr(htmlentitydefs.name2codepoint[name])
+ except KeyError:
+ pass
+
+ return text #: leave as is
+
+
+def has_method(obj, name):
+ """ Check if "name" was defined in obj, (false if it was inhereted) """
+ return hasattr(obj, '__dict__') and name in obj.__dict__
+
+
+def html_unescape(text):
+ """Removes HTML or XML character references and entities from a text string"""
+ return re.sub("&#?\w+;", fixup, text)
+
+
+def versiontuple(v): #: By kindall (http://stackoverflow.com/a/11887825)
+ return tuple(map(int, (v.split("."))))
+
+
+def load_translation(name, locale, default="en"):
+ """ Load language and return its translation object or None """
+
+ import traceback
+
+ try:
+ gettext.setpaths([join(os.sep, "usr", "share", "pyload", "locale"), None])
+ translation = gettext.translation(name, os.path.join(pypath, "locale"),
+ languages=[locale, default], fallback=True)
+ except Exception:
+ traceback.print_exc()
+ return None
+ else:
+ translation.install(True)
+ return translation
diff --git a/pyload/utils/filters.py b/pyload/utils/filters.py
new file mode 100644
index 000000000..9d4d47c04
--- /dev/null
+++ b/pyload/utils/filters.py
@@ -0,0 +1,68 @@
+# -*- coding: utf-8 -*-
+
+import os
+
+quotechar = "::/"
+
+try:
+ from os.path import relpath
+except Exception:
+ from posixpath import curdir, sep, pardir
+
+
+ def os.relpath(path, start=curdir):
+ """Return a relative version of a path"""
+ if not path:
+ raise ValueError("no path specified")
+ start_list = os.path.abspath(start).split(sep)
+ path_list = os.path.abspath(path).split(sep)
+ # Work out how much of the filepath is shared by start and path.
+ i = len(os.path.commonprefix([start_list, path_list]))
+ rel_list = [pardir] * (len(start_list) - i) + path_list[i:]
+ if not rel_list:
+ return curdir
+ return os.path.join(*rel_list)
+
+
+def quotepath(path):
+ try:
+ return path.replace("../", quotechar)
+ except AttributeError:
+ return path
+ except Exception:
+ return ""
+
+
+def unquotepath(path):
+ try:
+ return path.replace(quotechar, "../")
+ except AttributeError:
+ return path
+ except Exception:
+ return ""
+
+
+def path_make_absolute(path):
+ p = os.path.abspath(path)
+ if p[-1] == os.path.sep:
+ return p
+ else:
+ return p + os.path.sep
+
+
+def path_make_relative(path):
+ p = os.relpath(path)
+ if p[-1] == os.path.sep:
+ return p
+ else:
+ return p + os.path.sep
+
+
+def truncate(value, n):
+ if (n - len(value)) < 3:
+ return value[:n] + "..."
+ return value
+
+
+def date(date, format):
+ return date
diff --git a/pyload/utils/middlewares.py b/pyload/utils/middlewares.py
new file mode 100644
index 000000000..c3f4952db
--- /dev/null
+++ b/pyload/utils/middlewares.py
@@ -0,0 +1,144 @@
+# -*- coding: utf-8 -*-
+
+import gzip
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+
+class StripPathMiddleware(object):
+
+ def __init__(self, app):
+ self.app = app
+
+
+ def __call__(self, e, h):
+ e['PATH_INFO'] = e['PATH_INFO'].rstrip('/')
+ return self.app(e, h)
+
+
+class PrefixMiddleware(object):
+
+ def __init__(self, app, prefix="/pyload"):
+ self.app = app
+ self.prefix = prefix
+
+
+ def __call__(self, e, h):
+ path = e['PATH_INFO']
+ if path.startswith(self.prefix):
+ e['PATH_INFO'] = path.replace(self.prefix, "", 1)
+ return self.app(e, h)
+
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
+# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
+
+# WSGI middleware
+# Gzip-encodes the response.
+
+
+class GZipMiddleWare(object):
+
+ def __init__(self, application, compress_level=6):
+ self.application = application
+ self.compress_level = int(compress_level)
+
+
+ def __call__(self, environ, start_response):
+ if 'gzip' not in environ.get('HTTP_ACCEPT_ENCODING', ''):
+ # nothing for us to do, so this middleware will
+ # be a no-op:
+ return self.application(environ, start_response)
+ response = GzipResponse(start_response, self.compress_level)
+ app_iter = self.application(environ,
+ response.gzip_start_response)
+ if app_iter is not None:
+ response.finish_response(app_iter)
+
+ return response.write()
+
+
+def header_value(headers, key):
+ for header, value in headers:
+ if key.lower() == header.lower():
+ return value
+
+
+def update_header(headers, key, value):
+ remove_header(headers, key)
+ headers.append((key, value))
+
+
+def remove_header(headers, key):
+ for header, value in headers:
+ if key.lower() == header.lower():
+ headers.remove((header, value))
+ break
+
+
+class GzipResponse(object):
+
+ def __init__(self, start_response, compress_level):
+ self.start_response = start_response
+ self.compress_level = compress_level
+ self.buffer = StringIO()
+ self.compressible = False
+ self.content_length = None
+ self.headers = ()
+
+
+ def gzip_start_response(self, status, headers, exc_info=None):
+ self.headers = headers
+ ct = header_value(headers, 'content-type')
+ ce = header_value(headers, 'content-encoding')
+ cl = header_value(headers, 'content-length')
+ if cl:
+ cl = int(cl)
+ else:
+ cl = 201
+ self.compressible = False
+ if ct and (ct.startswith('text/') or ct.startswith('application/')) and 'zip' not in ct and cl > 200:
+ self.compressible = True
+ if ce:
+ self.compressible = False
+ if self.compressible:
+ headers.append(('content-encoding', 'gzip'))
+ remove_header(headers, 'content-length')
+ self.headers = headers
+ self.status = status
+ return self.buffer.write
+
+
+ def write(self):
+ out = self.buffer
+ out.seek(0)
+ s = out.getvalue()
+ out.close()
+ return [s]
+
+
+ def finish_response(self, app_iter):
+ if self.compressible:
+ output = gzip.GzipFile(mode='wb', compresslevel=self.compress_level, fileobj=self.buffer)
+ else:
+ output = self.buffer
+ try:
+ for s in app_iter:
+ output.write(s)
+ if self.compressible:
+ output.close()
+ finally:
+ if hasattr(app_iter, 'close'):
+ try:
+ app_iter.close()
+ except Exception:
+ pass
+
+ content_length = self.buffer.tell()
+ update_header(self.headers, "Content-Length", str(content_length))
+ self.start_response(self.status, self.headers)
diff --git a/pyload/utils/packagetools.py b/pyload/utils/packagetools.py
new file mode 100644
index 000000000..fed46123c
--- /dev/null
+++ b/pyload/utils/packagetools.py
@@ -0,0 +1,154 @@
+# -*- coding: utf-8 -*-
+
+import re
+import urlparse
+
+
+endings = ("jdeatme", "3gp", "7zip", "7z", "abr", "ac3", "aiff", "aifc", "aif", "ai",
+ "au", "avi", "apk", "bin", "bmp", "bat", "bz2", "cbr", "cbz", "ccf", "chm",
+ "cr2", "cso", "cue", "cvd", "dta", "deb", "divx", "djvu", "dlc", "dmg", "doc",
+ "docx", "dot", "eps", "epub", "exe", "ff", "flv", "flac", "f4v", "gsd", "gif",
+ "gpg", "gz", "iwd", "idx", "iso", "ipa", "ipsw", "java", "jar", "jpe?g", "load",
+ "m2ts", "m4v", "m4a", "md5", "mkv", "mp2", "mp3", "mp4", "mobi", "mov", "movie",
+ "mpeg", "mpe", "mpg", "mpq", "msi", "msu", "msp", "mv", "mws", "nfo", "npk", "oga",
+ "ogg", "ogv", "otrkey", "par2", "pkg", "png", "pdf", "pptx?", "ppsx?", "ppz", "pot",
+ "psd", "qt", "rmvb", "rm", "rar", "ram", "ra", "rev", "rnd", "rpm", "run", "rsdf",
+ "reg", "rtf", "shnf", "sh(?!tml)", "ssa", "smi", "sub", "srt", "snd", "sfv", "sfx",
+ "swf", "swc", "tar\.(gz|bz2|xz)", "tar", "tgz", "tiff?", "ts", "txt", "viv", "vivo",
+ "vob", "vtt", "webm", "wav", "wmv", "wma", "xla", "xls", "xpi", "zeno", "zip",
+ "[r-z]\d{2}", "_[_a-z]{2}", "\d{3,4}(?=\?|$|\"|\r|\n)")
+
+rarPats = [re.compile(r'(.*)(\.|_|-)pa?r?t?\.?\d+.(rar|exe)$', re.I),
+ re.compile(r'(.*)(\.|_|-)part\.?[0]*[1].(rar|exe)$', re.I),
+ re.compile(r'(.*)\.rar$', re.I),
+ re.compile(r'(.*)\.r\d+$', re.I),
+ re.compile(r'(.*)(\.|_|-)\d+$', re.I)]
+
+zipPats = [re.compile(r'(.*)\.zip$', re.I),
+ re.compile(r'(.*)\.z\d+$', re.I),
+ re.compile(r'(?is).*\.7z\.[\d]+$', re.I),
+ re.compile(r'(.*)\.a.$', re.I)]
+
+ffsjPats = [re.compile(r'(.*)\._((_[a-z])|([a-z]{2}))(\.|$)'),
+ re.compile(r'(.*)(\.|_|-)[\d]+(\.(' + '|'.join(endings) + ')$)', re.I)]
+
+iszPats = [re.compile(r'(.*)\.isz$', re.I),
+ re.compile(r'(.*)\.i\d{2}$', re.I)]
+
+pat0 = re.compile(r'www\d*\.', re.I)
+
+pat1 = re.compile(r'(\.?CD\d+)', re.I)
+pat2 = re.compile(r'(\.?part\d+)', re.I)
+
+pat3 = re.compile(r'(.+)[\.\-_]+$')
+pat4 = re.compile(r'(.+)\.\d+\.xtm$')
+
+
+def matchFirst(string, *args):
+ """ matches against list of regexp and returns first match """
+ for patternlist in args:
+ for pattern in patternlist:
+ m = pattern.search(string)
+ if m is not None:
+ name = m.group(1)
+ return name
+
+ return string
+
+
+def parseNames(files):
+ """ Generates packages names from name, data lists
+
+ :param files: list of (name, data)
+ :return: packagenames mapped to data lists (eg. urls)
+ """
+ packs = {}
+
+ for file, url in files:
+ patternMatch = False
+
+ if file is None:
+ continue
+
+ # remove trailing /
+ name = file.rstrip('/')
+
+ # extract last path part .. if there is a path
+ split = name.rsplit("/", 1)
+ if len(split) > 1:
+ name = split.pop(1)
+
+ # check if an already existing package may be ok for this file
+ # found = False
+ # for pack in packs:
+ # if pack in file:
+ # packs[pack].append(url)
+ # found = True
+ # break
+ #
+ # if found:
+ # continue
+
+ # unrar pattern, 7zip/zip and hjmerge pattern, isz pattern, FFSJ pattern
+ before = name
+ name = matchFirst(name, rarPats, zipPats, iszPats, ffsjPats)
+ if before != name:
+ patternMatch = True
+
+ # xtremsplit pattern
+ m = pat4.search(name)
+ if m is not None:
+ name = m.group(1)
+
+ # remove part and cd pattern
+ m = pat1.search(name)
+ if m is not None:
+ name = name.replace(m.group(0), "")
+ patternMatch = True
+
+ m = pat2.search(name)
+ if m is not None:
+ name = name.replace(m.group(0), "")
+ patternMatch = True
+
+ # additional checks if extension pattern matched
+ if patternMatch:
+ # remove extension
+ index = name.rfind(".")
+ if index <= 0:
+ index = name.rfind("_")
+ if index > 0:
+ length = len(name) - index
+ if length <= 4:
+ name = name[:-length]
+
+ # remove endings like . _ -
+ m = pat3.search(name)
+ if m is not None:
+ name = m.group(1)
+
+ # replace . and _ with space
+ name = name.replace(".", " ")
+ name = name.replace("_", " ")
+
+ name = name.strip()
+ else:
+ name = ""
+
+ #@NOTE: fallback: package by hoster
+ if not name:
+ name = urlparse.urlparse(file).netloc
+ if name:
+ name = pat0.sub("", name)
+
+ # fallback : default name
+ if not name:
+ name = _("Unnamed package")
+
+ # build mapping
+ if name in packs:
+ packs[name].append(url)
+ else:
+ packs[name] = [url]
+
+ return packs
diff --git a/pyload/utils/printer.py b/pyload/utils/printer.py
new file mode 100644
index 000000000..e4f6a360a
--- /dev/null
+++ b/pyload/utils/printer.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+# @author: vuolter
+
+import colorama
+
+colorama.init(autoreset=True)
+
+
+def color(color, text):
+ return colorama.Fore[c.upper()](text)
+
+for c in colorama.Fore:
+ eval("%(color)s = lambda msg: color(%(color)s, msg)" % {'color': c.lower()})
+
+
+def overline(line, msg):
+ print "\033[%(line)s;0H\033[2K%(msg)s" % {'line': str(line), 'msg': msg}
diff --git a/pyload/utils/pylgettext.py b/pyload/utils/pylgettext.py
new file mode 100644
index 000000000..76bb268ec
--- /dev/null
+++ b/pyload/utils/pylgettext.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+
+from gettext import *
+
+_searchdirs = None
+
+origfind = find
+
+
+def setpaths(pathlist):
+ global _searchdirs
+ _searchdirs = pathlist if isinstance(pathlist, list) else list(pathlist)
+
+
+def addpath(path):
+ global _searchdirs
+ if _searchdirs is None:
+ _searchdirs = list(path)
+ else:
+ if path not in _searchdirs:
+ _searchdirs.append(path)
+
+
+def delpath(path):
+ global _searchdirs
+ if _searchdirs is not None:
+ if path in _searchdirs:
+ _searchdirs.remove(path)
+
+
+def clearpath():
+ global _searchdirs
+ _searchdirs = None
+
+
+def find(domain, localedir=None, languages=None, all=False):
+ if _searchdirs is None:
+ return origfind(domain, localedir, languages, all)
+ searches = [localedir] + _searchdirs
+ results = []
+ for dir in searches:
+ res = origfind(domain, dir, languages, all)
+ if all is False:
+ results.append(res)
+ else:
+ results.extend(res)
+ if all is False:
+ results = filter(lambda x: x is not None, results)
+ if len(results) == 0:
+ return None
+ else:
+ return results[0]
+ else:
+ return results
+
+# Is there a smarter/cleaner pythonic way for this?
+translation.func_globals['find'] = find