diff options
author | GammaC0de <GammaC0de@users.noreply.github.com> | 2015-05-29 23:33:10 +0200 |
---|---|---|
committer | GammaC0de <GammaC0de@users.noreply.github.com> | 2015-05-29 23:33:10 +0200 |
commit | 844dfd92f590e531ca2f7fd86305fcbc13a03721 (patch) | |
tree | 5303bd07749b362dab071ada6197fe37dda85b27 /module/plugins/internal | |
parent | [BitshareCom] Code cosmetics (diff) | |
parent | [SimpleHoster] Fix DB error (diff) | |
download | pyload-844dfd92f590e531ca2f7fd86305fcbc13a03721.tar.xz |
Merge pull request #1 from pyload/stable
sync stable
Diffstat (limited to 'module/plugins/internal')
-rw-r--r-- | module/plugins/internal/AbstractExtractor.py | 141 | ||||
-rw-r--r-- | module/plugins/internal/CaptchaService.py | 545 | ||||
-rw-r--r-- | module/plugins/internal/DeadCrypter.py | 11 | ||||
-rw-r--r-- | module/plugins/internal/DeadHoster.py | 11 | ||||
-rw-r--r-- | module/plugins/internal/Extractor.py | 150 | ||||
-rw-r--r-- | module/plugins/internal/MultiHook.py | 309 | ||||
-rw-r--r-- | module/plugins/internal/MultiHoster.py | 106 | ||||
-rw-r--r-- | module/plugins/internal/SevenZip.py | 153 | ||||
-rw-r--r-- | module/plugins/internal/SimpleCrypter.py | 94 | ||||
-rw-r--r-- | module/plugins/internal/SimpleDereferer.py | 101 | ||||
-rw-r--r-- | module/plugins/internal/SimpleHoster.py | 649 | ||||
-rw-r--r-- | module/plugins/internal/UnRar.py | 262 | ||||
-rw-r--r-- | module/plugins/internal/UnZip.py | 80 | ||||
-rw-r--r-- | module/plugins/internal/XFSAccount.py | 65 | ||||
-rw-r--r-- | module/plugins/internal/XFSCrypter.py | 28 | ||||
-rw-r--r-- | module/plugins/internal/XFSHoster.py | 206 |
16 files changed, 1826 insertions, 1085 deletions
diff --git a/module/plugins/internal/AbstractExtractor.py b/module/plugins/internal/AbstractExtractor.py deleted file mode 100644 index 310897d4e..000000000 --- a/module/plugins/internal/AbstractExtractor.py +++ /dev/null @@ -1,141 +0,0 @@ -# -*- coding: utf-8 -*- - -class ArchiveError(Exception): - pass - - -class CRCError(Exception): - pass - - -class PasswordError(Exception): - pass - - -class AbtractExtractor: - __name__ = "AbtractExtractor" - __version__ = "0.12" - - __description__ = """Abtract extractor plugin""" - __license__ = "GPLv3" - __authors__ = [("RaNaN", "ranan@pyload.org"), - ("Walter Purcaro", "vuolter@gmail.com")] - - - EXTENSIONS = [] - - - @classmethod - def checkDeps(cls): - """ Check if system statisfy dependencies - :return: boolean - """ - return True - - - @classmethod - def isArchive(cls, file): - raise NotImplementedError - - - @classmethod - def getTargets(cls, files_ids): - """ Filter suited targets from list of filename id tuple list - :param files_ids: List of filepathes - :return: List of targets, id tuple list - """ - targets = [] - - for file, id in files_ids: - if cls.isArchive(file): - targets.append((file, id)) - - return targets - - - def __init__(self, m, file, out, password, fullpath, overwrite, excludefiles, renice, delete, keepbroken): - """Initialize extractor for specific file - - :param m: ExtractArchive Hook plugin - :param file: Absolute filepath - :param out: Absolute path to destination directory - :param fullpath: extract to fullpath - :param overwrite: Overwrite existing archives - :param renice: Renice value - """ - self.m = m - self.file = file - self.out = out - self.password = password - self.fullpath = fullpath - self.overwrite = overwrite - self.excludefiles = excludefiles - self.renice = renice - self.delete = delete - self.keepbroken = keepbroken - self.files = [] #: Store extracted files here - - - def init(self): - """ Initialize additional data structures """ - pass - - - def verify(self): - """Check if password if needed. Raise ArchiveError if integrity is - questionable. - - :raises ArchiveError - """ - pass - - - def isPassword(self, password): - """ Check if the given password is/might be correct. - If it can not be decided at this point return true. - - :param password: - :return: boolean - """ - if isinstance(password, basestring): - return True - else: - return False - - - def setPassword(self, password): - if self.isPassword(password): - self.password = password - return True - else: - return False - - - def repair(self): - return False - - - def extract(self, progress=lambda x: None): - """Extract the archive. Raise specific errors in case of failure. - - :param progress: Progress function, call this to update status - :raises PasswordError - :raises CRCError - :raises ArchiveError - :return: - """ - self.setPassword(password) - raise NotImplementedError - - - def getDeleteFiles(self): - """Return list of files to delete, do *not* delete them here. - - :return: List with paths of files to delete - """ - raise NotImplementedError - - - def getExtractedFiles(self): - """Populate self.files at some point while extracting""" - return self.files diff --git a/module/plugins/internal/CaptchaService.py b/module/plugins/internal/CaptchaService.py index 965799e8e..e51844965 100644 --- a/module/plugins/internal/CaptchaService.py +++ b/module/plugins/internal/CaptchaService.py @@ -1,50 +1,62 @@ # -*- coding: utf-8 -*- +import random import re +import time +import urlparse -from random import random +from base64 import b64encode from module.common.json_layer import json_loads +from module.plugins.Plugin import Base, Fail -class CaptchaService: +#@TODO: Extend (new) Plugin class; remove all `html` args +class CaptchaService(Base): __name__ = "CaptchaService" - __version__ = "0.16" + __type__ = "captcha" + __version__ = "0.29" __description__ = """Base captcha service plugin""" __license__ = "GPLv3" __authors__ = [("pyLoad Team", "admin@pyload.org")] - KEY_PATTERN = None - key = None #: last key detected def __init__(self, plugin): self.plugin = plugin + super(CaptchaService, self).__init__(plugin.core) - def detect_key(self, html=None): - if not html: - if hasattr(self.plugin, "html") and self.plugin.html: - html = self.plugin.html - else: - errmsg = _("%s html not found") % self.__name__ - self.plugin.fail(errmsg) #@TODO: replace all plugin.fail(errmsg) with plugin.error(errmsg) in 0.4.10 - raise TypeError(errmsg) + #@TODO: Recheck in 0.4.10 + def fail(self, reason): + self.plugin.fail(reason) + raise AttributeError(reason) - m = re.search(self.KEY_PATTERN, html) - if m: - self.key = m.group(1).strip() - self.plugin.logDebug("%s key: %s" % (self.__name__, self.key)) + + #@TODO: Recheck in 0.4.10 + def retrieve_key(self, html): + if self.detect_key(html): return self.key else: - self.plugin.logDebug("%s key not found" % self.__name__) - return None + self.fail(_("%s key not found") % self.__name__) + + + #@TODO: Recheck in 0.4.10 + def retrieve_html(self): + if hasattr(self.plugin, "html") and self.plugin.html: + return self.plugin.html + else: + self.fail(_("%s html not found") % self.__name__) - def challenge(self, key=None): + def detect_key(self, html=None): + raise NotImplementedError + + + def challenge(self, key=None, html=None): raise NotImplementedError @@ -52,281 +64,426 @@ class CaptchaService: raise NotImplementedError -class ReCaptcha(CaptchaService): - __name__ = "ReCaptcha" - __version__ = "0.08" +class AdYouLike(CaptchaService): + __name__ = "AdYouLike" + __type__ = "captcha" + __version__ = "0.06" - __description__ = """ReCaptcha captcha service plugin""" + __description__ = """AdYouLike captcha service plugin""" __license__ = "GPLv3" - __authors__ = [("pyLoad Team", "admin@pyload.org")] + __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] - KEY_PATTERN = r'recaptcha(?:/api|\.net)/(?:challenge|noscript)\?k=([\w-]+)' - KEY_AJAX_PATTERN = r'Recaptcha\.create\s*\(\s*["\']([\w-]+)' + AYL_PATTERN = r'Adyoulike\.create\s*\((.+?)\)' + CALLBACK_PATTERN = r'(Adyoulike\.g\._jsonp_\d+)' def detect_key(self, html=None): - if not html: - if hasattr(self.plugin, "html") and self.plugin.html: - html = self.plugin.html - else: - errmsg = _("ReCaptcha html not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + html = html or self.retrieve_html() - m = re.search(self.KEY_PATTERN, html) or re.search(self.KEY_AJAX_PATTERN, html) - if m: - self.key = m.group(1).strip() - self.plugin.logDebug("ReCaptcha key: %s" % self.key) - return self.key + m = re.search(self.AYL_PATTERN, html) + n = re.search(self.CALLBACK_PATTERN, html) + if m and n: + self.key = (m.group(1).strip(), n.group(1).strip()) + self.logDebug("Ayl: %s | Callback: %s" % self.key) + return self.key #: key is the tuple(ayl, callback) else: - self.plugin.logDebug("ReCaptcha key not found") + self.logWarning("Ayl or callback pattern not found") return None - def challenge(self, key=None): - if not key: - if self.detect_key(): - key = self.key - else: - errmsg = _("ReCaptcha key not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + def challenge(self, key=None, html=None): + ayl, callback = key or self.retrieve_key(html) + + # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"}, + # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}} + ayl = json_loads(ayl) - html = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", get={'k': key}) + html = self.plugin.req.load("http://api-ayl.appspot.com/challenge", + get={'key' : ayl['adyoulike']['key'], + 'env' : ayl['all']['env'], + 'callback': callback}) try: - challenge = re.search("challenge : '(.+?)',", html).group(1) - server = re.search("server : '(.+?)',", html).group(1) - except: - errmsg = _("ReCaptcha challenge pattern not found") - self.plugin.fail(errmsg) - raise ValueError(errmsg) + challenge = json_loads(re.search(callback + r'\s*\((.+?)\)', html).group(1)) - self.plugin.logDebug("ReCaptcha challenge: %s" % challenge) + except AttributeError: + self.fail(_("AdYouLike challenge pattern not found")) - return challenge, self.result(server, challenge) + self.logDebug("Challenge: %s" % challenge) + + return self.result(ayl, challenge), challenge def result(self, server, challenge): - result = self.plugin.decryptCaptcha("%simage" % server, - get={'c': challenge}, - cookies=True, - forceUser=True, - imgtype="jpg") + # Adyoulike.g._jsonp_5579316662423138 + # ({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}}, + # "site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[], + # "VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike", + # "iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\" + # height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256, + # "token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60}, + # "big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}}, + # "tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"}) - self.plugin.logDebug("ReCaptcha result: %s" % result) + if isinstance(server, basestring): + server = json_loads(server) + + if isinstance(challenge, basestring): + challenge = json_loads(challenge) + + try: + instructions_visual = challenge['translations'][server['all']['lang']]['instructions_visual'] + result = re.search(u'«(.+?)»', instructions_visual).group(1).strip() + + except AttributeError: + self.fail(_("AdYouLike result not found")) + + result = {'_ayl_captcha_engine' : "adyoulike", + '_ayl_env' : server['all']['env'], + '_ayl_tid' : challenge['tid'], + '_ayl_token_challenge': challenge['token'], + '_ayl_response' : response} + + self.logDebug("Result: %s" % result) return result class AdsCaptcha(CaptchaService): __name__ = "AdsCaptcha" - __version__ = "0.06" + __type__ = "captcha" + __version__ = "0.09" __description__ = """AdsCaptcha captcha service plugin""" __license__ = "GPLv3" __authors__ = [("pyLoad Team", "admin@pyload.org")] - CAPTCHAID_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*CaptchaId=(\d+)' - PUBLICKEY_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?[^"\']*PublicKey=([\w-]+)' + CAPTCHAID_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?.*?CaptchaId=(\d+)' + PUBLICKEY_PATTERN = r'api\.adscaptcha\.com/Get\.aspx\?.*?PublicKey=([\w-]+)' def detect_key(self, html=None): - if not html: - if hasattr(self.plugin, "html") and self.plugin.html: - html = self.plugin.html - else: - errmsg = _("AdsCaptcha html not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + html = html or self.retrieve_html() m = re.search(self.PUBLICKEY_PATTERN, html) n = re.search(self.CAPTCHAID_PATTERN, html) if m and n: self.key = (m.group(1).strip(), n.group(1).strip()) #: key is the tuple(PublicKey, CaptchaId) - self.plugin.logDebug("AdsCaptcha key|id: %s | %s" % self.key) + self.logDebug("Key: %s | ID: %s" % self.key) return self.key else: - self.plugin.logDebug("AdsCaptcha key or id not found") + self.logWarning("Key or id pattern not found") return None - def challenge(self, key=None): - if not key: - if self.detect_key(): - key = self.key - else: - errmsg = _("AdsCaptcha key not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) - - PublicKey, CaptchaId = key + def challenge(self, key=None, html=None): + PublicKey, CaptchaId = key or self.retrieve_key(html) - html = self.plugin.req.load("http://api.adscaptcha.com/Get.aspx", get={'CaptchaId': CaptchaId, 'PublicKey': PublicKey}) + html = self.plugin.req.load("http://api.adscaptcha.com/Get.aspx", + get={'CaptchaId': CaptchaId, + 'PublicKey': PublicKey}) try: challenge = re.search("challenge: '(.+?)',", html).group(1) server = re.search("server: '(.+?)',", html).group(1) - except: - errmsg = _("AdsCaptcha challenge pattern not found") - self.plugin.fail(errmsg) - raise ValueError(errmsg) - self.plugin.logDebug("AdsCaptcha challenge: %s" % challenge) + except AttributeError: + self.fail(_("AdsCaptcha challenge pattern not found")) + + self.logDebug("Challenge: %s" % challenge) - return challenge, self.result(server, challenge) + return self.result(server, challenge), challenge def result(self, server, challenge): result = self.plugin.decryptCaptcha("%sChallenge.aspx" % server, - get={'cid': challenge, 'dummy': random()}, + get={'cid': challenge, 'dummy': random.random()}, cookies=True, imgtype="jpg") - self.plugin.logDebug("AdsCaptcha result: %s" % result) + self.logDebug("Result: %s" % result) return result -class SolveMedia(CaptchaService): - __name__ = "SolveMedia" - __version__ = "0.06" +class ReCaptcha(CaptchaService): + __name__ = "ReCaptcha" + __type__ = "captcha" + __version__ = "0.17" - __description__ = """SolveMedia captcha service plugin""" + __description__ = """ReCaptcha captcha service plugin""" __license__ = "GPLv3" - __authors__ = [("pyLoad Team", "admin@pyload.org")] + __authors__ = [("pyLoad Team", "admin@pyload.org"), + ("Walter Purcaro", "vuolter@gmail.com"), + ("zapp-brannigan", "fuerst.reinje@web.de")] - KEY_PATTERN = r'api\.solvemedia\.com/papi/challenge\.(?:no)?script\?k=(.+?)["\']' + KEY_V2_PATTERN = r'(?:data-sitekey=["\']|["\']sitekey["\']:\s*["\'])([\w-]+)' + KEY_V1_PATTERN = r'(?:recaptcha(?:/api|\.net)/(?:challenge|noscript)\?k=|Recaptcha\.create\s*\(\s*["\'])([\w-]+)' - def challenge(self, key=None): - if not key: - if self.detect_key(): - key = self.key - else: - errmsg = _("SolveMedia key not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + def detect_key(self, html=None): + html = html or self.retrieve_html() + + m = re.search(self.KEY_V2_PATTERN, html) or re.search(self.KEY_V1_PATTERN, html) + if m: + self.key = m.group(1).strip() + self.logDebug("Key: %s" % self.key) + return self.key + else: + self.logWarning("Key pattern not found") + return None + + + def challenge(self, key=None, html=None, version=None): + key = key or self.retrieve_key(html) + + if version in (1, 2): + return getattr(self, "_challenge_v%s" % version)(key) + + else: + return self.challenge(key, + version=2 if re.search(self.KEY_V2_PATTERN, html or self.retrieve_html()) else 1) - html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript", get={'k': key}) + + def _challenge_v1(self, key): + html = self.plugin.req.load("http://www.google.com/recaptcha/api/challenge", + get={'k': key}) try: - challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="([^"]+)">', - html).group(1) - server = "http://api.solvemedia.com/papi/media" - except: - errmsg = _("SolveMedia challenge pattern not found") - self.plugin.fail(errmsg) - raise ValueError(errmsg) + challenge = re.search("challenge : '(.+?)',", html).group(1) + server = re.search("server : '(.+?)',", html).group(1) - self.plugin.logDebug("SolveMedia challenge: %s" % challenge) + except AttributeError: + self.fail(_("ReCaptcha challenge pattern not found")) - return challenge, self.result(server, challenge) + self.logDebug("Challenge: %s" % challenge) + return self.result(server, challenge, key) - def result(self, server, challenge): - result = self.plugin.decryptCaptcha(server, get={'c': challenge}, imgtype="gif") - self.plugin.logDebug("SolveMedia result: %s" % result) + def result(self, server, challenge, key): + self.plugin.req.load("http://www.google.com/recaptcha/api/js/recaptcha.js") + html = self.plugin.req.load("http://www.google.com/recaptcha/api/reload", + get={'c' : challenge, + 'k' : key, + 'reason': "i", + 'type' : "image"}) - return result + try: + challenge = re.search('\(\'(.+?)\',',html).group(1) + except AttributeError: + self.fail(_("ReCaptcha second challenge pattern not found")) -class AdYouLike(CaptchaService): - __name__ = "AdYouLike" - __version__ = "0.02" + self.logDebug("Second challenge: %s" % challenge) + result = self.plugin.decryptCaptcha("%simage" % server, + get={'c': challenge}, + cookies=True, + forceUser=True, + imgtype="jpg") - __description__ = """AdYouLike captcha service plugin""" + self.logDebug("Result: %s" % result) + + return result, challenge + + + def _collectApiInfo(self): + html = self.plugin.req.load("http://www.google.com/recaptcha/api.js") + a = re.search(r'po.src = \'(.*?)\';', html).group(1) + vers = a.split("/")[5] + + self.logDebug("API version: %s" % vers) + + language = a.split("__")[1].split(".")[0] + + self.logDebug("API language: %s" % language) + + html = self.plugin.req.load("https://apis.google.com/js/api.js") + b = re.search(r'"h":"(.*?)","', html).group(1) + jsh = b.decode('unicode-escape') + + self.logDebug("API jsh-string: %s" % jsh) + + return vers, language, jsh + + + def _prepareTimeAndRpc(self): + self.plugin.req.load("http://www.google.com/recaptcha/api2/demo") + + millis = int(round(time.time() * 1000)) + + self.logDebug("Time: %s" % millis) + + rand = random.randint(1, 99999999) + a = "0.%s" % str(rand * 2147483647) + rpc = int(100000000 * float(a)) + + self.logDebug("Rpc-token: %s" % rpc) + + return millis, rpc + + + def _challenge_v2(self, key, parent=None): + if parent is None: + try: + parent = urlparse.urljoin("http://", urlparse.urlparse(self.plugin.pyfile.url).netloc) + + except Exception: + parent = "" + + botguardstring = "!A" + vers, language, jsh = self._collectApiInfo() + millis, rpc = self._prepareTimeAndRpc() + + html = self.plugin.req.load("https://www.google.com/recaptcha/api2/anchor", + get={'k' : key, + 'hl' : language, + 'v' : vers, + 'usegapi' : "1", + 'jsh' : "%s#id=IO_%s" % (jsh, millis), + 'parent' : parent, + 'pfname' : "", + 'rpctoken': rpc}) + + token1 = re.search(r'id="recaptcha-token" value="(.*?)">', html) + self.logDebug("Token #1: %s" % token1.group(1)) + + html = self.plugin.req.load("https://www.google.com/recaptcha/api2/frame", + get={'c' : token1.group(1), + 'hl' : language, + 'v' : vers, + 'bg' : botguardstring, + 'k' : key, + 'usegapi': "1", + 'jsh' : jsh}).decode('unicode-escape') + + token2 = re.search(r'"finput","(.*?)",', html) + self.logDebug("Token #2: %s" % token2.group(1)) + + token3 = re.search(r'"rresp","(.*?)",', html) + self.logDebug("Token #3: %s" % token3.group(1)) + + millis_captcha_loading = int(round(time.time() * 1000)) + captcha_response = self.plugin.decryptCaptcha("https://www.google.com/recaptcha/api2/payload", + get={'c':token3.group(1), 'k':key}, + cookies=True, + forceUser=True) + response = b64encode('{"response":"%s"}' % captcha_response) + + self.logDebug("Result: %s" % response) + + timeToSolve = int(round(time.time() * 1000)) - millis_captcha_loading + timeToSolveMore = timeToSolve + int(float("0." + str(random.randint(1, 99999999))) * 500) + + html = self.plugin.req.load("https://www.google.com/recaptcha/api2/userverify", + post={'k' : key, + 'c' : token3.group(1), + 'response': response, + 't' : timeToSolve, + 'ct' : timeToSolveMore, + 'bg' : botguardstring}) + + token4 = re.search(r'"uvresp","(.*?)",', html) + self.logDebug("Token #4: %s" % token4.group(1)) + + result = token4.group(1) + + return result, None + + +class SolveMedia(CaptchaService): + __name__ = "SolveMedia" + __type__ = "captcha" + __version__ = "0.13" + + __description__ = """SolveMedia captcha service plugin""" __license__ = "GPLv3" - __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("pyLoad Team", "admin@pyload.org")] - AYL_PATTERN = r'Adyoulike\.create\s*\((.+?)\)' - CALLBACK_PATTERN = r'(Adyoulike\.g\._jsonp_\d+)' + KEY_PATTERN = r'api\.solvemedia\.com/papi/challenge\.(?:no)?script\?k=(.+?)["\']' def detect_key(self, html=None): - if not html: - if hasattr(self.plugin, "html") and self.plugin.html: - html = self.plugin.html - else: - errmsg = _("AdYouLike html not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + html = html or self.retrieve_html() - m = re.search(self.AYL_PATTERN, html) - n = re.search(self.CALLBACK_PATTERN, html) - if m and n: - self.key = (m.group(1).strip(), n.group(1).strip()) - self.plugin.logDebug("AdYouLike ayl|callback: %s | %s" % self.key) - return self.key #: key is the tuple(ayl, callback) + m = re.search(self.KEY_PATTERN, html) + if m: + self.key = m.group(1).strip() + self.logDebug("Key: %s" % self.key) + return self.key else: - self.plugin.logDebug("AdYouLike ayl or callback not found") + self.logWarning("Key pattern not found") return None - def challenge(self, key=None): - if not key: - if self.detect_key(): - key = self.key - else: - errmsg = _("AdYouLike key not found") - self.plugin.fail(errmsg) - raise TypeError(errmsg) + def challenge(self, key=None, html=None): + key = key or self.retrieve_key(html) - ayl, callback = key + html = self.plugin.req.load("http://api.solvemedia.com/papi/challenge.noscript", + get={'k': key}) - # {"adyoulike":{"key":"P~zQ~O0zV0WTiAzC-iw0navWQpCLoYEP"}, - # "all":{"element_id":"ayl_private_cap_92300","lang":"fr","env":"prod"}} - ayl = json_loads(ayl) + for i in xrange(1, 11): + try: + magic = re.search(r'name="magic" value="(.+?)"', html).group(1) - html = self.plugin.req.load("http://api-ayl.appspot.com/challenge", - get={'key' : ayl['adyoulike']['key'], - 'env' : ayl['all']['env'], - 'callback': callback}) - try: - challenge = json_loads(re.search(callback + r'\s*\((.+?)\)', html).group(1)) - except: - errmsg = _("AdYouLike challenge pattern not found") - self.plugin.fail(errmsg) - raise ValueError(errmsg) + except AttributeError: + self.logWarning("Magic pattern not found") + magic = None - self.plugin.logDebug("AdYouLike challenge: %s" % challenge) + try: + challenge = re.search(r'<input type=hidden name="adcopy_challenge" id="adcopy_challenge" value="(.+?)">', + html).group(1) - return self.result(ayl, challenge) + except AttributeError: + self.fail(_("SolveMedia challenge pattern not found")) + else: + self.logDebug("Challenge: %s" % challenge) + + try: + result = self.result("http://api.solvemedia.com/papi/media", challenge) + + except Fail, e: + self.logWarning(e) + self.plugin.invalidCaptcha() + result = None + + html = self.plugin.req.load("http://api.solvemedia.com/papi/verify.noscript", + post={'adcopy_response' : result, + 'k' : key, + 'l' : "en", + 't' : "img", + 's' : "standard", + 'magic' : magic, + 'adcopy_challenge': challenge, + 'ref' : self.plugin.pyfile.url}) + try: + redirect = re.search(r'URL=(.+?)">', html).group(1) + + except AttributeError: + self.fail(_("SolveMedia verify pattern not found")) - def result(self, server, challenge): - # Adyoulike.g._jsonp_5579316662423138 - # ({"translations":{"fr":{"instructions_visual":"Recopiez « Soonnight » ci-dessous :"}}, - # "site_under":true,"clickable":true,"pixels":{"VIDEO_050":[],"DISPLAY":[],"VIDEO_000":[],"VIDEO_100":[], - # "VIDEO_025":[],"VIDEO_075":[]},"medium_type":"image/adyoulike", - # "iframes":{"big":"<iframe src=\"http://www.soonnight.com/campagn.html\" scrolling=\"no\" - # height=\"250\" width=\"300\" frameborder=\"0\"></iframe>"},"shares":{},"id":256, - # "token":"e6QuI4aRSnbIZJg02IsV6cp4JQ9~MjA1","formats":{"small":{"y":300,"x":0,"w":300,"h":60}, - # "big":{"y":0,"x":0,"w":300,"h":250},"hover":{"y":440,"x":0,"w":300,"h":60}}, - # "tid":"SqwuAdxT1EZoi4B5q0T63LN2AkiCJBg5"}) + else: + if "error" in html: + self.logWarning("Captcha code was invalid") + self.logDebug("Retry #%d" % i) + html = self.plugin.req.load(redirect) + else: + break - if isinstance(server, basestring): - server = json_loads(server) + else: + self.fail(_("SolveMedia max retries exceeded")) - if isinstance(challenge, basestring): - challenge = json_loads(challenge) + return result, challenge - try: - instructions_visual = challenge['translations'][server['all']['lang']]['instructions_visual'] - result = re.search(u'«(.+?)»', instructions_visual).group(1).strip() - except: - errmsg = _("AdYouLike result not found") - self.plugin.fail(errmsg) - raise ValueError(errmsg) - result = {'_ayl_captcha_engine' : "adyoulike", - '_ayl_env' : server['all']['env'], - '_ayl_tid' : challenge['tid'], - '_ayl_token_challenge': challenge['token'], - '_ayl_response' : response} + def result(self, server, challenge): + result = self.plugin.decryptCaptcha(server, + get={'c': challenge}, + cookies=True, + imgtype="gif") - self.plugin.logDebug("AdYouLike result: %s" % result) + self.logDebug("Result: %s" % result) return result diff --git a/module/plugins/internal/DeadCrypter.py b/module/plugins/internal/DeadCrypter.py index 07c5c3881..c93447164 100644 --- a/module/plugins/internal/DeadCrypter.py +++ b/module/plugins/internal/DeadCrypter.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -from urllib import unquote -from urlparse import urlparse - from module.plugins.internal.SimpleCrypter import create_getInfo from module.plugins.Crypter import Crypter as _Crypter @@ -10,7 +7,7 @@ from module.plugins.Crypter import Crypter as _Crypter class DeadCrypter(_Crypter): __name__ = "DeadCrypter" __type__ = "crypter" - __version__ = "0.04" + __version__ = "0.05" __pattern__ = r'^unmatchable$' @@ -20,8 +17,10 @@ class DeadCrypter(_Crypter): @classmethod - def getInfo(cls, url="", html=""): - return {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 1, 'url': url} + def apiInfo(cls, url): + api = super(DeadCrypter, cls).apiInfo(url) + api['status'] = 1 + return api def setup(self): diff --git a/module/plugins/internal/DeadHoster.py b/module/plugins/internal/DeadHoster.py index 6f3252f70..f159ae5fa 100644 --- a/module/plugins/internal/DeadHoster.py +++ b/module/plugins/internal/DeadHoster.py @@ -1,8 +1,5 @@ # -*- coding: utf-8 -*- -from urllib import unquote -from urlparse import urlparse - from module.plugins.internal.SimpleHoster import create_getInfo from module.plugins.Hoster import Hoster as _Hoster @@ -10,7 +7,7 @@ from module.plugins.Hoster import Hoster as _Hoster class DeadHoster(_Hoster): __name__ = "DeadHoster" __type__ = "hoster" - __version__ = "0.14" + __version__ = "0.15" __pattern__ = r'^unmatchable$' @@ -20,8 +17,10 @@ class DeadHoster(_Hoster): @classmethod - def getInfo(cls, url="", html=""): - return {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 1, 'url': url} + def apiInfo(cls, url): + api = super(DeadHoster, cls).apiInfo(url) + api['status'] = 1 + return api def setup(self): diff --git a/module/plugins/internal/Extractor.py b/module/plugins/internal/Extractor.py new file mode 100644 index 000000000..159b65ffe --- /dev/null +++ b/module/plugins/internal/Extractor.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +import os +import re + +from module.PyFile import PyFile + + +class ArchiveError(Exception): + pass + + +class CRCError(Exception): + pass + + +class PasswordError(Exception): + pass + + +class Extractor: + __name__ = "Extractor" + __version__ = "0.24" + + __description__ = """Base extractor plugin""" + __license__ = "GPLv3" + __authors__ = [("Walter Purcaro", "vuolter@gmail.com"), + ("Immenz" , "immenz@gmx.net" )] + + + EXTENSIONS = [] + VERSION = "" + REPAIR = False + + + @classmethod + def isArchive(cls, filename): + name = os.path.basename(filename).lower() + return any(name.endswith(ext) for ext in cls.EXTENSIONS) + + + @classmethod + def isMultipart(cls, filename): + return False + + + @classmethod + def isUsable(cls): + """ Check if system statisfy dependencies + :return: boolean + """ + return None + + + @classmethod + def getTargets(cls, files_ids): + """ Filter suited targets from list of filename id tuple list + :param files_ids: List of filepathes + :return: List of targets, id tuple list + """ + targets = [] + processed = [] + + for fname, id, fout in files_ids: + if cls.isArchive(fname): + pname = re.sub(cls.re_multipart, '', fname) if cls.isMultipart(fname) else os.path.splitext(fname)[0] + if pname not in processed: + processed.append(pname) + targets.append((fname, id, fout)) + return targets + + + def __init__(self, manager, filename, out, + fullpath=True, + overwrite=False, + excludefiles=[], + renice=0, + delete='No', + keepbroken=False, + fid=None): + """ Initialize extractor for specific file """ + self.manager = manager + self.filename = filename + self.out = out + self.fullpath = fullpath + self.overwrite = overwrite + self.excludefiles = excludefiles + self.renice = renice + self.delete = delete + self.keepbroken = keepbroken + self.files = [] #: Store extracted files here + + pyfile = self.manager.core.files.getFile(fid) if fid else None + self.notifyProgress = lambda x: pyfile.setProgress(x) if pyfile else lambda x: None + + + def init(self): + """ Initialize additional data structures """ + pass + + + def check(self): + """Quick Check by listing content of archive. + Raises error if password is needed, integrity is questionable or else. + + :raises PasswordError + :raises CRCError + :raises ArchiveError + """ + raise NotImplementedError + + def verify(self): + """Testing with Extractors buildt-in method + Raises error if password is needed, integrity is questionable or else. + + :raises PasswordError + :raises CRCError + :raises ArchiveError + """ + raise NotImplementedError + + + def repair(self): + return None + + + def extract(self, password=None): + """Extract the archive. Raise specific errors in case of failure. + + :param progress: Progress function, call this to update status + :param password password to use + :raises PasswordError + :raises CRCError + :raises ArchiveError + :return: + """ + raise NotImplementedError + + + def getDeleteFiles(self): + """Return list of files to delete, do *not* delete them here. + + :return: List with paths of files to delete + """ + return [self.filename] + + + def list(self, password=None): + """Populate self.files at some point while extracting""" + return self.files diff --git a/module/plugins/internal/MultiHook.py b/module/plugins/internal/MultiHook.py index dcf1c3383..01ff4b07d 100644 --- a/module/plugins/internal/MultiHook.py +++ b/module/plugins/internal/MultiHook.py @@ -1,95 +1,169 @@ # -*- coding: utf-8 -*- import re +import time +import traceback from module.plugins.Hook import Hook -from module.utils import remove_chars +from module.utils import decode, remove_chars class MultiHook(Hook): __name__ = "MultiHook" __type__ = "hook" - __version__ = "0.22" + __version__ = "0.45" - __description__ = """Hook plugin for MultiHoster""" + __config__ = [("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"), + ("pluginlist" , "str" , "Plugin list (comma separated)", "" ), + ("reload" , "bool" , "Reload plugin list" , True ), + ("reloadinterval", "int" , "Reload interval in hours" , 12 )] + + __description__ = """Hook plugin for multi hoster/crypter""" __license__ = "GPLv3" - __authors__ = [("pyLoad Team", "admin@pyload.org")] - - - interval = 12 * 60 * 60 #: reload hosters every 12h - - HOSTER_REPLACEMENTS = [("1fichier.com" , "onefichier.com"), - ("2shared.com" , "twoshared.com" ), - ("4shared.com" , "fourshared.com"), - ("cloudnator.com" , "shragle.com" ), - ("easy-share.com" , "crocko.com" ), - ("fileparadox.com", "fileparadox.in"), - ("freakshare.net" , "freakshare.com"), - ("hellshare.com" , "hellshare.cz" ), - ("ifile.it" , "filecloud.io" ), - ("nowdownload.ch" , "nowdownload.sx"), - ("nowvideo.co" , "nowvideo.sx" ), - ("putlocker.com" , "firedrive.com" ), - ("share-rapid.cz" , "multishare.cz" ), - ("sharerapid.cz" , "multishare.cz" ), - ("ul.to" , "uploaded.to" ), - ("uploaded.net" , "uploaded.to" )] - HOSTER_EXCLUDED = [] + __authors__ = [("pyLoad Team" , "admin@pyload.org" ), + ("Walter Purcaro", "vuolter@gmail.com")] + + + MIN_RELOAD_INTERVAL = 1 * 60 * 60 #: 1 hour + + DOMAIN_REPLACEMENTS = [(r'180upload\.com' , "hundredeightyupload.com"), + (r'bayfiles\.net' , "bayfiles.com" ), + (r'cloudnator\.com' , "shragle.com" ), + (r'dfiles\.eu' , "depositfiles.com" ), + (r'easy-share\.com' , "crocko.com" ), + (r'freakshare\.net' , "freakshare.com" ), + (r'hellshare\.com' , "hellshare.cz" ), + (r'ifile\.it' , "filecloud.io" ), + (r'nowdownload\.\w+', "nowdownload.sx" ), + (r'nowvideo\.\w+' , "nowvideo.sx" ), + (r'putlocker\.com' , "firedrive.com" ), + (r'share-?rapid\.cz', "multishare.cz" ), + (r'ul\.to' , "uploaded.to" ), + (r'uploaded\.net' , "uploaded.to" ), + (r'uploadhero\.co' , "uploadhero.com" ), + (r'zshares\.net' , "zshare.net" ), + (r'^1' , "one" ), + (r'^2' , "two" ), + (r'^3' , "three" ), + (r'^4' , "four" ), + (r'^5' , "five" ), + (r'^6' , "six" ), + (r'^7' , "seven" ), + (r'^8' , "eight" ), + (r'^9' , "nine" ), + (r'^0' , "zero" )] def setup(self): - self.hosters = [] + self.info = {} #@TODO: Remove in 0.4.10 + + self.plugins = [] self.supported = [] self.new_supported = [] + self.account = None + self.pluginclass = None + self.pluginmodule = None + self.pluginname = None + self.plugintype = None + + self.initPlugin() + + + def initPlugin(self): + self.pluginname = self.__name__.rsplit("Hook", 1)[0] + plugin, self.plugintype = self.core.pluginManager.findPlugin(self.pluginname) + + if plugin: + self.pluginmodule = self.core.pluginManager.loadModule(self.plugintype, self.pluginname) + self.pluginclass = getattr(self.pluginmodule, self.pluginname) + else: + self.logWarning("Hook plugin will be deactivated due missing plugin reference") + self.setConfig('activated', False) + + + def loadAccount(self): + self.account = self.core.accountManager.getAccountPlugin(self.pluginname) + + if self.account and not self.account.canUse(): + self.account = None - def getConfig(self, option, default=''): + if not self.account and hasattr(self.pluginclass, "LOGIN_ACCOUNT") and self.pluginclass.LOGIN_ACCOUNT: + self.logWarning("Hook plugin will be deactivated due missing account reference") + self.setConfig('activated', False) + + + def getURL(self, *args, **kwargs): #@TODO: Remove in 0.4.10 + """ see HTTPRequest for argument list """ + h = pyreq.getHTTPRequest(timeout=120) + try: + if not 'decode' in kwargs: + kwargs['decode'] = True + rep = h.load(*args, **kwargs) + finally: + h.close() + + return rep + + + def getConfig(self, option, default=''): #@TODO: Remove in 0.4.10 """getConfig with default value - sublass may not implements all config options""" try: return self.getConf(option) + except KeyError: return default - def getHosterCached(self): - if not self.hosters: + def pluginsCached(self): + if self.plugins: + return self.plugins + + for _i in xrange(2): try: - hosterSet = self.toHosterSet(self.getHoster()) - set(self.HOSTER_EXCLUDED) + pluginset = self._pluginSet(self.getHosters()) + break + except Exception, e: - self.logError(e) - return [] + self.logDebug(e, "Waiting 1 minute and retry") + time.sleep(60) + else: + self.logWarning(_("Fallback to default reload interval due plugin parse error")) + self.interval = self.MIN_RELOAD_INTERVAL + return list() - try: - configMode = self.getConfig('hosterListMode', 'all') - if configMode in ("listed", "unlisted"): - configSet = self.toHosterSet(self.getConfig('hosterList', '').replace('|', ',').replace(';', ',').split(',')) + try: + configmode = self.getConfig("pluginmode", 'all') + if configmode in ("listed", "unlisted"): + pluginlist = self.getConfig("pluginlist", '').replace('|', ',').replace(';', ',').split(',') + configset = self._pluginSet(pluginlist) - if configMode == "listed": - hosterSet &= configSet - else: - hosterSet -= configSet + if configmode == "listed": + pluginset &= configset + else: + pluginset -= configset - except Exception, e: - self.logError(e) + except Exception, e: + self.logError(e) - self.hosters = list(hosterSet) + self.plugins = list(pluginset) - return self.hosters + return self.plugins - def toHosterSet(self, hosters): - hosters = set((str(x).strip().lower() for x in hosters)) + def _pluginSet(self, plugins): + regexp = re.compile(r'^[\w\-.^_]{3,63}\.[a-zA-Z]{2,}$', re.U) + plugins = [decode(p.strip()).lower() for p in plugins if regexp.match(p.strip())] - for rep in self.HOSTER_REPLACEMENTS: - if rep[0] in hosters: - hosters.remove(rep[0]) - hosters.add(rep[1]) + for r in self.DOMAIN_REPLACEMENTS: + rf, rt = r + repr = re.compile(rf, re.I|re.U) + plugins = [re.sub(rf, rt, p) if repr.match(p) else p for p in plugins] - hosters.discard('') - return hosters + return set(plugins) - def getHoster(self): + def getHosters(self): """Load list of supported hoster :return: List of domain names @@ -97,121 +171,118 @@ class MultiHook(Hook): raise NotImplementedError - def coreReady(self): - if self.cb: - self.core.scheduler.removeJob(self.cb) + #: Threaded _periodical, remove in 0.4.10 and use built-in flag for that + def _periodical(self): + try: + if self.isActivated(): + self.periodical() - self.setConfig("activated", True) #: config not in sync after plugin reload + except Exception, e: + self.core.log.error(_("Error executing hooks: %s") % str(e)) + if self.core.debug: + traceback.print_exc() - cfg_interval = self.getConfig("interval", None) #: reload interval in hours - if cfg_interval is not None: - self.interval = cfg_interval * 60 * 60 + self.cb = self.core.scheduler.addJob(self.interval, self._periodical) - if self.interval: - self._periodical() - else: - self.periodical() + def periodical(self): + """reload plugin list periodically""" + self.loadAccount() - def initPeriodical(self): - pass + if self.getConfig("reload", True): + self.interval = max(self.getConfig("reloadinterval", 12) * 60 * 60, self.MIN_RELOAD_INTERVAL) + else: + self.core.scheduler.removeJob(self.cb) + self.cb = None + self.logInfo(_("Reloading supported %s list") % self.plugintype) - def periodical(self): - """reload hoster list periodically""" - self.logInfo(_("Reloading supported hoster list")) + old_supported = self.supported - old_supported = self.supported self.supported = [] self.new_supported = [] - self.hosters = [] + self.plugins = [] self.overridePlugins() - old_supported = [hoster for hoster in old_supported if hoster not in self.supported] + old_supported = [plugin for plugin in old_supported if plugin not in self.supported] + if old_supported: - self.logDebug("UNLOAD", ", ".join(old_supported)) - for hoster in old_supported: - self.unloadHoster(hoster) + self.logDebug("Unload: %s" % ", ".join(old_supported)) + for plugin in old_supported: + self.unloadPlugin(plugin) def overridePlugins(self): - pluginMap = dict((name.lower(), name) for name in self.core.pluginManager.hosterPlugins.iterkeys()) - accountList = [name.lower() for name, data in self.core.accountManager.accounts.iteritems() if data] excludedList = [] - for hoster in self.getHosterCached(): - name = remove_chars(hoster, "-.") + if self.plugintype == "hoster": + pluginMap = dict((name.lower(), name) for name in self.core.pluginManager.hosterPlugins.iterkeys()) + accountList = [account.type.lower() for account in self.core.api.getAccounts(False) if account.valid and account.premium] + else: + pluginMap = {} + accountList = [name[::-1].replace("Folder"[::-1], "", 1).lower()[::-1] for name in self.core.pluginManager.crypterPlugins.iterkeys()] + + for plugin in self.pluginsCached(): + name = remove_chars(plugin, "-.") if name in accountList: - excludedList.append(hoster) + excludedList.append(plugin) else: if name in pluginMap: self.supported.append(pluginMap[name]) else: - self.new_supported.append(hoster) + self.new_supported.append(plugin) if not self.supported and not self.new_supported: - self.logError(_("No Hoster loaded")) + self.logError(_("No %s loaded") % self.plugintype) return - module = self.core.pluginManager.getPlugin(self.__name__) - klass = getattr(module, self.__name__) - # inject plugin plugin - self.logDebug("Overwritten Hosters", ", ".join(sorted(self.supported))) - for hoster in self.supported: - hdict = self.core.pluginManager.hosterPlugins[hoster] - hdict['new_module'] = module - hdict['new_name'] = self.__name__ + self.logDebug("Overwritten %ss: %s" % (self.plugintype, ", ".join(sorted(self.supported)))) + + for plugin in self.supported: + hdict = self.core.pluginManager.plugins[self.plugintype][plugin] + hdict['new_module'] = self.pluginmodule + hdict['new_name'] = self.pluginname if excludedList: - self.logInfo(_("The following hosters were not overwritten - account exists"), ", ".join(sorted(excludedList))) + self.logInfo(_("%ss not overwritten: %s") % (self.plugintype.capitalize(), ", ".join(sorted(excludedList)))) if self.new_supported: - hosters = sorted(self.new_supported) + plugins = sorted(self.new_supported) - self.logDebug("New Hosters", ", ".join(hosters)) + self.logDebug("New %ss: %s" % (self.plugintype, ", ".join(plugins))) # create new regexp - regexp = r'.*(%s).*' % "|".join([x.replace(".", "\.") for x in hosters]) - if hasattr(klass, "__pattern__") and isinstance(klass.__pattern__, basestring) and '://' in klass.__pattern__: - regexp = r'%s|%s' % (klass.__pattern__, regexp) + regexp = r'.*(?P<DOMAIN>%s).*' % "|".join(x.replace('.', '\.') for x in plugins) + if hasattr(self.pluginclass, "__pattern__") and isinstance(self.pluginclass.__pattern__, basestring) and '://' in self.pluginclass.__pattern__: + regexp = r'%s|%s' % (self.pluginclass.__pattern__, regexp) - self.logDebug("Regexp", regexp) + self.logDebug("Regexp: %s" % regexp) - hdict = self.core.pluginManager.hosterPlugins[self.__name__] + hdict = self.core.pluginManager.plugins[self.plugintype][self.pluginname] hdict['pattern'] = regexp hdict['re'] = re.compile(regexp) - def unloadHoster(self, hoster): - hdict = self.core.pluginManager.hosterPlugins[hoster] + def unloadPlugin(self, plugin): + hdict = self.core.pluginManager.plugins[self.plugintype][plugin] if "module" in hdict: - del hdict['module'] + hdict.pop('module', None) if "new_module" in hdict: - del hdict['new_module'] - del hdict['new_name'] + hdict.pop('new_module', None) + hdict.pop('new_name', None) def unload(self): - """Remove override for all hosters. Scheduler job is removed by hookmanager""" - for hoster in self.supported: - self.unloadHoster(hoster) + """Remove override for all plugins. Scheduler job is removed by hookmanager""" + for plugin in self.supported: + self.unloadPlugin(plugin) # reset pattern - klass = getattr(self.core.pluginManager.getPlugin(self.__name__), self.__name__) - hdict = self.core.pluginManager.hosterPlugins[self.__name__] - hdict['pattern'] = getattr(klass, "__pattern__", r'^unmatchable$') - hdict['re'] = re.compile(hdict['pattern']) + hdict = self.core.pluginManager.plugins[self.plugintype][self.pluginname] - - def downloadFailed(self, pyfile): - """remove plugin override if download fails but not if file is offline/temp.offline""" - if pyfile.hasStatus("failed") and self.getConfig("unloadFailing", True): - hdict = self.core.pluginManager.hosterPlugins[pyfile.pluginname] - if "new_name" in hdict and hdict['new_name'] == self.__name__: - self.logDebug("Unload MultiHook", pyfile.pluginname, hdict) - self.unloadHoster(pyfile.pluginname) - pyfile.setStatus("queued") + hdict['pattern'] = getattr(self.pluginclass, "__pattern__", r'^unmatchable$') + hdict['re'] = re.compile(hdict['pattern']) diff --git a/module/plugins/internal/MultiHoster.py b/module/plugins/internal/MultiHoster.py index e5c28d034..ff4414034 100644 --- a/module/plugins/internal/MultiHoster.py +++ b/module/plugins/internal/MultiHoster.py @@ -2,57 +2,117 @@ import re -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo +from module.plugins.Plugin import Fail, Retry +from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies class MultiHoster(SimpleHoster): __name__ = "MultiHoster" __type__ = "hoster" - __version__ = "0.24" + __version__ = "0.40" __pattern__ = r'^unmatchable$' + __config__ = [("use_premium" , "bool", "Use premium account if available" , True), + ("revertfailed", "bool", "Revert to standard download if fails", True)] __description__ = """Multi hoster plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] - DIRECT_LINK = True + LOGIN_ACCOUNT = True def setup(self): - self.chunkLimit = 1 - self.multiDL = self.premium + self.chunkLimit = 1 + self.multiDL = bool(self.account) + self.resumeDownload = self.premium + + + def prepare(self): + self.info = {} + self.html = "" + self.link = "" #@TODO: Move to hoster class in 0.4.10 + self.directDL = False #@TODO: Move to hoster class in 0.4.10 + + if not self.getConfig('use_premium', True): + self.retryFree() + + if self.LOGIN_ACCOUNT and not self.account: + self.fail(_("Required account not found")) + + self.req.setOption("timeout", 120) + + if isinstance(self.COOKIES, list): + set_cookies(self.req.cj, self.COOKIES) + + if self.DIRECT_LINK is None: + self.directDL = self.__pattern__ != r'^unmatchable$' and re.match(self.__pattern__, self.pyfile.url) + else: + self.directDL = self.DIRECT_LINK + + self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) def process(self, pyfile): - self.prepare() + try: + self.prepare() - if self.directDL: - self.logDebug("Looking for direct download link...") - self.handleDirect() + if self.directDL: + self.checkInfo() + self.logDebug("Looking for direct download link...") + self.handleDirect(pyfile) - if not self.link and not self.lastDownload: - self.preload() + if not self.link and not self.lastDownload: + self.preload() - if self.premium and (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): - self.logDebug("Handled as premium download") - self.handlePremium() + self.checkErrors() + self.checkStatus(getinfo=False) - else: - self.logDebug("Handled as free download") - self.handleFree() + if self.premium and (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): + self.logDebug("Handled as premium download") + self.handlePremium(pyfile) - self.downloadLink(self.link) - self.checkFile() + elif not self.LOGIN_ACCOUNT or (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): + self.logDebug("Handled as free download") + self.handleFree(pyfile) + + self.downloadLink(self.link, True) + self.checkFile() + + except Fail, e: #@TODO: Move to PluginThread in 0.4.10 + err = str(e) #@TODO: Recheck in 0.4.10 + + if self.premium: + self.logWarning(_("Premium download failed")) + self.retryFree() + + elif self.getConfig("revertfailed", True) \ + and "new_module" in self.core.pluginManager.hosterPlugins[self.__name__]: + hdict = self.core.pluginManager.hosterPlugins[self.__name__] + + tmp_module = hdict['new_module'] + tmp_name = hdict['new_name'] + hdict.pop('new_module', None) + hdict.pop('new_name', None) + + pyfile.initPlugin() + + hdict['new_module'] = tmp_module + hdict['new_name'] = tmp_name + + raise Retry(_("Revert to original hoster plugin")) + + else: + raise Fail(err) - def handlePremium(self): - return self.handleFree() + def handlePremium(self, pyfile): + return self.handleFree(pyfile) - def handleFree(self): + def handleFree(self, pyfile): if self.premium: raise NotImplementedError else: - self.logError(_("Required account not found")) + self.fail(_("Required premium account not found")) diff --git a/module/plugins/internal/SevenZip.py b/module/plugins/internal/SevenZip.py new file mode 100644 index 000000000..624f6c939 --- /dev/null +++ b/module/plugins/internal/SevenZip.py @@ -0,0 +1,153 @@ +# -*- coding: utf-8 -*- + +import os +import re +import subprocess + +from module.plugins.internal.UnRar import ArchiveError, CRCError, PasswordError, UnRar, renice +from module.utils import fs_encode, save_join + + +class SevenZip(UnRar): + __name__ = "SevenZip" + __version__ = "0.11" + + __description__ = """7-Zip extractor plugin""" + __license__ = "GPLv3" + __authors__ = [("Michael Nowak" , "" ), + ("Walter Purcaro", "vuolter@gmail.com")] + + + CMD = "7z" + VERSION = "" + + EXTENSIONS = [".7z", ".xz", ".zip", ".gz", ".gzip", ".tgz", ".bz2", ".bzip2", + ".tbz2", ".tbz", ".tar", ".wim", ".swm", ".lzma", ".rar", ".cab", + ".arj", ".z", ".taz", ".cpio", ".rpm", ".deb", ".lzh", ".lha", + ".chm", ".chw", ".hxs", ".iso", ".msi", ".doc", ".xls", ".ppt", + ".dmg", ".xar", ".hfs", ".exe", ".ntfs", ".fat", ".vhd", ".mbr", + ".squashfs", ".cramfs", ".scap"] + + + #@NOTE: there are some more uncovered 7z formats + re_filelist = re.compile(r'([\d\:]+)\s+([\d\:]+)\s+([\w\.]+)\s+(\d+)\s+(\d+)\s+(.+)') + re_wrongpwd = re.compile(r'(Can not open encrypted archive|Wrong password|Encrypted\s+\=\s+\+)', re.I) + re_wrongcrc = re.compile(r'CRC Failed|Can not open file', re.I) + re_version = re.compile(r'7-Zip\s(?:\[64\]\s)?(\d+\.\d+)', re.I) + + + @classmethod + def isUsable(cls): + if os.name == "nt": + cls.CMD = os.path.join(pypath, "7z.exe") + p = subprocess.Popen([cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + else: + p = subprocess.Popen([cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + + m = cls.re_version.search(out) + cls.VERSION = m.group(1) if m else '(version unknown)' + + return True + + + def verify(self, password): + # 7z can't distinguish crc and pw error in test + p = self.call_cmd("l", "-slt", fs_encode(self.filename)) + out, err = p.communicate() + + if self.re_wrongpwd.search(out): + raise PasswordError + + if self.re_wrongpwd.search(err): + raise PasswordError + + if self.re_wrongcrc.search(err): + raise CRCError(err) + + + + def check(self, password): + p = self.call_cmd("l", "-slt", fs_encode(self.filename)) + out, err = p.communicate() + + # check if output or error macthes the 'wrong password'-Regexp + if self.re_wrongpwd.search(out): + raise PasswordError + + if self.re_wrongcrc.search(out): + raise CRCError(_("Header protected")) + + + def repair(self): + return False + + + def extract(self, password=None): + command = "x" if self.fullpath else "e" + + p = self.call_cmd(command, '-o' + self.out, fs_encode(self.filename), password=password) + + renice(p.pid, self.renice) + + # communicate and retrieve stderr + self._progress(p) + err = p.stderr.read().strip() + + if err: + if self.re_wrongpwd.search(err): + raise PasswordError + + elif self.re_wrongcrc.search(err): + raise CRCError(err) + + else: #: raise error if anything is on stderr + raise ArchiveError(err) + + if p.returncode > 1: + raise ArchiveError(_("Process return code: %d") % p.returncode) + + self.files = self.list(password) + + + def list(self, password=None): + command = "l" if self.fullpath else "l" + + p = self.call_cmd(command, fs_encode(self.filename), password=password) + out, err = p.communicate() + + if "Can not open" in err: + raise ArchiveError(_("Cannot open file")) + + if p.returncode > 1: + raise ArchiveError(_("Process return code: %d") % p.returncode) + + result = set() + for groups in self.re_filelist.findall(out): + f = groups[-1].strip() + result.add(save_join(self.out, f)) + + return list(result) + + + def call_cmd(self, command, *xargs, **kwargs): + args = [] + + #overwrite flag + if self.overwrite: + args.append("-y") + + #set a password + if "password" in kwargs and kwargs["password"]: + args.append("-p%s" % kwargs["password"]) + else: + args.append("-p-") + + #@NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue + call = [self.CMD, command] + args + list(xargs) + + self.manager.logDebug(" ".join(call)) + + p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return p diff --git a/module/plugins/internal/SimpleCrypter.py b/module/plugins/internal/SimpleCrypter.py index e0dd10585..b843a28f0 100644 --- a/module/plugins/internal/SimpleCrypter.py +++ b/module/plugins/internal/SimpleCrypter.py @@ -1,43 +1,40 @@ # -*- coding: utf-8 -*- import re - -from urlparse import urlparse +import urlparse from module.plugins.Crypter import Crypter from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies -from module.utils import fixup +from module.utils import fixup, html_unescape class SimpleCrypter(Crypter, SimpleHoster): __name__ = "SimpleCrypter" __type__ = "crypter" - __version__ = "0.33" + __version__ = "0.46" __pattern__ = r'^unmatchable$' - __config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config['general']['folder_per_package'] - ("subfolder_per_package", "bool", "Create a subfolder for each package", True)] + __config__ = [("use_subfolder" , "bool", "Save package to subfolder" , True), #: Overrides core.config['general']['folder_per_package'] + ("subfolder_per_pack", "bool", "Create a subfolder for each package", True)] __description__ = """Simple decrypter plugin""" __license__ = "GPLv3" - __authors__ = [("stickell", "l.stickell@yahoo.it"), - ("zoidberg", "zoidberg@mujmail.cz"), - ("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] """ Following patterns should be defined by each crypter: - LINK_PATTERN: group(1) must be a download link or a regex to catch more links + LINK_PATTERN: Download link or regex to catch links in group(1) example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' - NAME_PATTERN: (optional) folder name or webpage title + NAME_PATTERN: (optional) folder name or page title example: NAME_PATTERN = r'<title>Files of: (?P<N>[^<]+) folder</title>' - OFFLINE_PATTERN: (optional) Checks if the file is yet available online + OFFLINE_PATTERN: (optional) Checks if the page is unreachable example: OFFLINE_PATTERN = r'File (deleted|not found)' - TEMP_OFFLINE_PATTERN: (optional) Checks if the file is temporarily offline + TEMP_OFFLINE_PATTERN: (optional) Checks if the page is temporarily unreachable example: TEMP_OFFLINE_PATTERN = r'Server maintainance' @@ -71,25 +68,28 @@ class SimpleCrypter(Crypter, SimpleHoster): #@TODO: Remove in 0.4.10 def init(self): account_name = (self.__name__ + ".py").replace("Folder.py", "").replace(".py", "") - account = self.core.accountManager.getAccountPlugin(account_name) + account = self.pyfile.m.core.accountManager.getAccountPlugin(account_name) if account and account.canUse(): self.user, data = account.selectAccount() - self.req = account.getAccountRequest(self.user) - self.premium = account.isPremium(self.user) + self.req = account.getAccountRequest(self.user) + self.premium = account.isPremium(self.user) self.account = account def prepare(self): - if self.LOGIN_ACCOUNT and not self.account: - self.fail(_("Required account not found")) + self.pyfile.error = "" #@TODO: Remove in 0.4.10 + + self.info = {} + self.html = "" + self.links = [] #@TODO: Move to hoster class in 0.4.10 if self.LOGIN_PREMIUM and not self.premium: self.fail(_("Required premium account not found")) - self.info = {} - self.links = [] + if self.LOGIN_ACCOUNT and not self.account: + self.fail(_("Required account not found")) self.req.setOption("timeout", 120) @@ -103,36 +103,45 @@ class SimpleCrypter(Crypter, SimpleHoster): self.prepare() self.preload() - - if self.html is None: - self.fail(_("No html retrieved")) - self.checkInfo() self.links = self.getLinks() if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'): - self.handleMultiPages() + self.handlePages(pyfile) self.logDebug("Package has %d links" % len(self.links)) if self.links: self.packages = [(self.info['name'], self.links, self.info['folder'])] + elif not self.urls and not self.packages: #@TODO: Remove in 0.4.10 + self.fail(_("No link grabbed")) + + + def checkNameSize(self, getinfo=True): + if not self.info or getinfo: + self.logDebug("File info (BEFORE): %s" % self.info) + self.info.update(self.getInfo(self.pyfile.url, self.html)) + self.logDebug("File info (AFTER): %s" % self.info) + + try: + url = self.info['url'].strip() + name = self.info['name'].strip() + if name and name != url: + self.pyfile.name = name - def checkNameSize(self): - name = self.info['name'] - url = self.info['url'] + except Exception: + pass - if name and name != url: - self.pyfile.name = name - else: - self.pyfile.name = self.info['name'] = urlparse(name).path.split('/')[-1] + try: + folder = self.info['folder'] = self.pyfile.name - folder = self.info['folder'] = self.pyfile.name + except Exception: + pass - self.logDebug("File name: %s" % self.pyfile.name, - "File folder: %s" % folder) + self.logDebug("File name: %s" % self.pyfile.name, + "File folder: %s" % self.pyfile.name) def getLinks(self): @@ -140,14 +149,19 @@ class SimpleCrypter(Crypter, SimpleHoster): Returns the links extracted from self.html You should override this only if it's impossible to extract links using only the LINK_PATTERN. """ - return re.findall(self.LINK_PATTERN, self.html) + url_p = urlparse.urlparse(self.pyfile.url) + baseurl = "%s://%s" % (url_p.scheme, url_p.netloc) + + links = [urlparse.urljoin(baseurl, link) if not urlparse.urlparse(link).scheme else link \ + for link in re.findall(self.LINK_PATTERN, self.html)] + + return [html_unescape(l.strip().decode('unicode-escape')) for l in links] - def handleMultiPages(self): + def handlePages(self, pyfile): try: - m = re.search(self.PAGES_PATTERN, self.html) - pages = int(m.group(1)) - except: + pages = int(re.search(self.PAGES_PATTERN, self.html).group(1)) + except Exception: pages = 1 for p in xrange(2, pages + 1): diff --git a/module/plugins/internal/SimpleDereferer.py b/module/plugins/internal/SimpleDereferer.py new file mode 100644 index 000000000..1f04ab1c6 --- /dev/null +++ b/module/plugins/internal/SimpleDereferer.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +import re +import urllib + +from module.plugins.Crypter import Crypter +from module.plugins.internal.SimpleHoster import create_getInfo, set_cookies +from module.utils import html_unescape + + +class SimpleDereferer(Crypter): + __name__ = "SimpleDereferer" + __type__ = "crypter" + __version__ = "0.13" + + __pattern__ = r'^unmatchable$' + __config__ = [] #@TODO: Remove in 0.4.10 + + __description__ = """Simple dereferer plugin""" + __license__ = "GPLv3" + __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] + + + """ + Following patterns should be defined by each crypter: + + LINK_PATTERN: Regex to catch the redirect url in group(1) + example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' + + OFFLINE_PATTERN: (optional) Checks if the page is unreachable + example: OFFLINE_PATTERN = r'File (deleted|not found)' + + TEMP_OFFLINE_PATTERN: (optional) Checks if the page is temporarily unreachable + example: TEMP_OFFLINE_PATTERN = r'Server maintainance' + + + You can override the getLinks method if you need a more sophisticated way to extract the redirect url. + """ + + LINK_PATTERN = None + + TEXT_ENCODING = False + COOKIES = True + + + def handleDirect(self, pyfile): + header = self.load(pyfile.url, just_header=True, decode=True) + if 'location' in header and header['location']: + self.link = header['location'] + + + def decrypt(self, pyfile): + self.handleDirect(pyfile) + + if not self.link: + try: + self.link = urllib.unquote(re.match(self.__pattern__, pyfile.url).group('LINK')) + + except AttributeError: + self.prepare() + self.preload() + self.checkStatus() + + self.link = self.getLink() + + if self.link: + self.urls = [self.link] + + elif not self.urls and not self.packages: #@TODO: Remove in 0.4.10 + self.fail(_("No link grabbed")) + + + def prepare(self): + self.info = {} + self.html = "" + self.link = "" #@TODO: Move to hoster class in 0.4.10 + + self.req.setOption("timeout", 120) + + if isinstance(self.COOKIES, list): + set_cookies(self.req.cj, self.COOKIES) + + + def preload(self): + self.html = self.load(self.pyfile.url, cookies=bool(self.COOKIES), decode=not self.TEXT_ENCODING) + + if isinstance(self.TEXT_ENCODING, basestring): + self.html = unicode(self.html, self.TEXT_ENCODING) + + + def checkStatus(self): + if hasattr(self, "OFFLINE_PATTERN") and re.search(self.OFFLINE_PATTERN, self.html): + self.offline() + + elif hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, self.html): + self.tempOffline() + + + def getLink(self): + link = re.search(self.LINK_PATTERN, self.html).group(1) + return html_unescape(link.strip().decode('unicode-escape')) #@TODO: Move this check to plugin `load` method in 0.4.10 diff --git a/module/plugins/internal/SimpleHoster.py b/module/plugins/internal/SimpleHoster.py index 6726726e1..1d44a6642 100644 --- a/module/plugins/internal/SimpleHoster.py +++ b/module/plugins/internal/SimpleHoster.py @@ -1,18 +1,20 @@ # -*- coding: utf-8 -*- +import datetime +import mimetypes +import os import re - -from os.path import exists -from time import time -from urllib import unquote -from urlparse import urljoin, urlparse +import time +import urllib +import urlparse from module.PyFile import statusMap as _statusMap from module.network.CookieJar import CookieJar +from module.network.HTTPRequest import BadHeader from module.network.RequestFactory import getURL from module.plugins.Hoster import Hoster -from module.plugins.Plugin import Fail -from module.utils import fixup, fs_encode, parseFileSize +from module.plugins.Plugin import Fail, Retry +from module.utils import fixup, fs_encode, html_unescape, parseFileSize #@TODO: Adapt and move to PyFile in 0.4.10 @@ -25,7 +27,7 @@ def _error(self, reason, type): type = "unknown" msg = _("%s error") % type.strip().capitalize() if type else _("Error") - msg += ": %s" % reason.strip() if reason else "" + msg += (": %s" % reason.strip()) if reason else "" msg += _(" | Plugin may be out of date") raise Fail(msg) @@ -72,7 +74,7 @@ def parseHtmlForm(attr_str, html, input_names={}): if name: value = parseHtmlTagAttrValue("value", inputtag.group(1)) if not value: - inputs[name] = inputtag.group(3) or '' + inputs[name] = inputtag.group(3) or "" else: inputs[name] = value @@ -98,38 +100,77 @@ def parseHtmlForm(attr_str, html, input_names={}): return {}, None #: no matching form found -#: Deprecated +#@TODO: Remove in 0.4.10 def parseFileInfo(plugin, url="", html=""): if hasattr(plugin, "getInfo"): info = plugin.getInfo(url, html) res = info['name'], info['size'], info['status'], info['url'] else: - res = urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 0, 3, url + url = urllib.unquote(url) + url_p = urlparse.urlparse(url) + res = ((url_p.path.split('/')[-1] + or url_p.query.split('=', 1)[::-1][0].split('&', 1)[0] + or url_p.netloc.split('.', 1)[0]), + 0, + 3 if url else 8, + url) return res #@TODO: Remove in 0.4.10 -#@NOTE: Every plugin must have own parseInfos classmethod to work with 0.4.10 def create_getInfo(plugin): - if hasattr(plugin, "parseInfos"): - fn = lambda urls: [(info['name'], info['size'], info['status'], info['url']) for info in plugin.parseInfos(urls)] - else: - fn = lambda urls: [parseFileInfo(url) for url in urls] + def getInfo(urls): + for url in urls: + if hasattr(plugin, "URL_REPLACEMENTS"): + url = replace_patterns(url, plugin.URL_REPLACEMENTS) + yield parseFileInfo(plugin, url) - return fn + return getInfo def timestamp(): - return int(time() * 1000) + return int(time.time() * 1000) #@TODO: Move to hoster class in 0.4.10 -def _isDirectLink(self, url, resumable=False): - link = "" +def getFileURL(self, url, follow_location=None): + link = "" + redirect = 1 + + if type(follow_location) is int: + redirect = max(follow_location, 1) + else: + redirect = 10 + + for i in xrange(redirect): + try: + self.logDebug("Redirect #%d to: %s" % (i, url)) + header = self.load(url, just_header=True, decode=True) + + except Exception: #: Bad bad bad... rewrite this part in 0.4.10 + req = pyreq.getHTTPRequest() + res = req.load(url, just_header=True, decode=True) - for i in xrange(5 if resumable else 1): - header = self.load(url, ref=True, cookies=True, just_header=True, decode=True) + req.close() + + header = {"code": req.code} + for line in res.splitlines(): + line = line.strip() + if not line or ":" not in line: + continue + + key, none, value = line.partition(":") + key = key.lower().strip() + value = value.strip() + + if key in header: + if type(header[key]) == list: + header[key].append(value) + else: + header[key] = [header[key], value] + else: + header[key] = value if 'content-disposition' in header: link = url @@ -137,62 +178,100 @@ def _isDirectLink(self, url, resumable=False): elif 'location' in header and header['location']: location = header['location'] - if not urlparse(location).scheme: - p = urlparse(url) - base = "%s://%s" % (p.scheme, p.netloc) - location = urljoin(base, location) + if not urlparse.urlparse(location).scheme: + url_p = urlparse.urlparse(url) + baseurl = "%s://%s" % (url_p.scheme, url_p.netloc) + location = urlparse.urljoin(baseurl, location) if 'code' in header and header['code'] == 302: link = location - elif resumable: + if follow_location: url = location - self.logDebug("Redirect #%d to: %s" % (++i, location)) continue + else: + extension = os.path.splitext(urlparse.urlparse(url).path.split('/')[-1])[-1] + + if 'content-type' in header and header['content-type']: + mimetype = header['content-type'].split(';')[0].strip() + + elif extension: + mimetype = mimetypes.guess_type(extension, False)[0] or "application/octet-stream" + + else: + mimetype = "" + + if mimetype and (link or 'html' not in mimetype): + link = url + else: + link = "" + break + else: - self.logError(_("Too many redirects")) + try: + self.logError(_("Too many redirects")) + except Exception: + pass return link +def secondsToMidnight(gmt=0): + now = datetime.datetime.utcnow() + datetime.timedelta(hours=gmt) + + if now.hour is 0 and now.minute < 10: + midnight = now + else: + midnight = now + datetime.timedelta(days=1) + + td = midnight.replace(hour=0, minute=10, second=0, microsecond=0) - now + + if hasattr(td, 'total_seconds'): + res = td.total_seconds() + else: #@NOTE: work-around for python 2.5 and 2.6 missing datetime.timedelta.total_seconds + res = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 + + return int(res) + + class SimpleHoster(Hoster): __name__ = "SimpleHoster" __type__ = "hoster" - __version__ = "0.79" + __version__ = "1.50" __pattern__ = r'^unmatchable$' + __config__ = [("use_premium", "bool", "Use premium account if available" , True), + ("fallback" , "bool", "Fallback to free download if premium fails", True)] __description__ = """Simple hoster plugin""" __license__ = "GPLv3" - __authors__ = [("zoidberg", "zoidberg@mujmail.cz"), - ("stickell", "l.stickell@yahoo.it"), - ("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] """ - Info patterns should be defined by each hoster: + Info patterns: - INFO_PATTERN: (optional) Name and Size of the file + INFO_PATTERN: (mandatory) Name and Size of the file example: INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>size_unit)' or - NAME_PATTERN: (optional) Name that will be set for the file + NAME_PATTERN: (mandatory) Name that will be set for the file example: NAME_PATTERN = r'(?P<N>file_name)' - SIZE_PATTERN: (optional) Size that will be checked for the file + SIZE_PATTERN: (mandatory) Size that will be checked for the file example: SIZE_PATTERN = r'(?P<S>file_size) (?P<U>size_unit)' HASHSUM_PATTERN: (optional) Hash code and type of the file example: HASHSUM_PATTERN = r'(?P<H>hash_code) (?P<T>MD5)' - OFFLINE_PATTERN: (optional) Check if the file is yet available online + OFFLINE_PATTERN: (mandatory) Check if the page is unreachable example: OFFLINE_PATTERN = r'File (deleted|not found)' - TEMP_OFFLINE_PATTERN: (optional) Check if the file is temporarily offline + TEMP_OFFLINE_PATTERN: (optional) Check if the page is temporarily unreachable example: TEMP_OFFLINE_PATTERN = r'Server (maintenance|maintainance)' - Error handling patterns are all optional: + Error patterns: WAIT_PATTERN: (optional) Detect waiting time example: WAIT_PATTERN = r'' @@ -200,11 +279,23 @@ class SimpleHoster(Hoster): PREMIUM_ONLY_PATTERN: (optional) Check if the file can be downloaded only with a premium account example: PREMIUM_ONLY_PATTERN = r'Premium account required' + HAPPY_HOUR_PATTERN: (optional) + example: HAPPY_HOUR_PATTERN = r'Happy hour' + + IP_BLOCKED_PATTERN: (optional) + example: IP_BLOCKED_PATTERN = r'in your country' + + DOWNLOAD_LIMIT_PATTERN: (optional) + example: DOWNLOAD_LIMIT_PATTERN = r'download limit' + + SIZE_LIMIT_PATTERN: (optional) + example: SIZE_LIMIT_PATTERN = r'up to' + ERROR_PATTERN: (optional) Detect any error preventing download example: ERROR_PATTERN = r'' - Instead overriding handleFree and handlePremium methods you can define the following patterns for direct download: + Instead overriding handleFree and handlePremium methods you may define the following patterns for basic link handling: LINK_FREE_PATTERN: (optional) group(1) should be the direct link for free download example: LINK_FREE_PATTERN = r'<div class="link"><a href="(.+?)"' @@ -217,31 +308,46 @@ class SimpleHoster(Hoster): SIZE_REPLACEMENTS = [] URL_REPLACEMENTS = [] - TEXT_ENCODING = False #: Set to True or encoding name if encoding value in http header is not correct - COOKIES = True #: or False or list of tuples [(domain, name, value)] - CHECK_TRAFFIC = False #: Set to True to force checking traffic left for premium account - DIRECT_LINK = None #: Set to True to looking for direct link (as defined in handleDirect method), set to None to do it if self.account is True else False - MULTI_HOSTER = False #: Set to True to leech other hoster link (as defined in handleMulti method) + TEXT_ENCODING = False #: Set to True or encoding name if encoding value in http header is not correct + COOKIES = True #: or False or list of tuples [(domain, name, value)] + CHECK_TRAFFIC = False #: Set to True to force checking traffic left for premium account + DIRECT_LINK = None #: Set to True to looking for direct link (as defined in handleDirect method), set to None to do it if self.account is True else False + MULTI_HOSTER = False #: Set to True to leech other hoster link (as defined in handleMulti method) + LOGIN_ACCOUNT = False #: Set to True to require account login + DISPOSITION = True #: Set to True to use any content-disposition value in http header as file name + + directLink = getFileURL #@TODO: Remove in 0.4.10 @classmethod - def parseInfos(cls, urls): - for url in urls: - url = replace_patterns(url, cls.FILE_URL_REPLACEMENTS if hasattr(cls, "FILE_URL_REPLACEMENTS") else cls.URL_REPLACEMENTS) #@TODO: Remove FILE_URL_REPLACEMENTS check in 0.4.10 - yield cls.getInfo(url) + def apiInfo(cls, url): + url = urllib.unquote(url) + url_p = urlparse.urlparse(url) + return {'name' : (url_p.path.split('/')[-1] + or url_p.query.split('=', 1)[::-1][0].split('&', 1)[0] + or url_p.netloc.split('.', 1)[0]), + 'size' : 0, + 'status': 3 if url else 8, + 'url' : url} @classmethod def getInfo(cls, url="", html=""): - info = {'name': urlparse(unquote(url)).path.split('/')[-1] or _("Unknown"), 'size': 0, 'status': 3, 'url': url} + info = cls.apiInfo(url) + online = True if info['status'] is 2 else False - if not html: - try: - if not url: - info['error'] = "missing url" - info['status'] = 1 - raise + try: + info['pattern'] = re.match(cls.__pattern__, url).groupdict() #: pattern groups will be saved here + except Exception: + info['pattern'] = {} + + if not html and not online: + if not url: + info['error'] = "missing url" + info['status'] = 1 + + elif info['status'] is 3: try: html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING) @@ -253,61 +359,45 @@ class SimpleHoster(Hoster): if e.code is 404: info['status'] = 1 - raise - if e.code is 503: + elif e.code is 503: info['status'] = 6 - raise - except: - return info - online = False + except Exception: + pass - if hasattr(cls, "OFFLINE_PATTERN") and re.search(cls.OFFLINE_PATTERN, html): - info['status'] = 1 + if html: + if hasattr(cls, "OFFLINE_PATTERN") and re.search(cls.OFFLINE_PATTERN, html): + info['status'] = 1 - elif hasattr(cls, "FILE_OFFLINE_PATTERN") and re.search(cls.FILE_OFFLINE_PATTERN, html): #@TODO: Remove in 0.4.10 - info['status'] = 1 + elif hasattr(cls, "TEMP_OFFLINE_PATTERN") and re.search(cls.TEMP_OFFLINE_PATTERN, html): + info['status'] = 6 - elif hasattr(cls, "TEMP_OFFLINE_PATTERN") and re.search(cls.TEMP_OFFLINE_PATTERN, html): - info['status'] = 6 - - else: - try: - info['pattern'] = re.match(cls.__pattern__, url).groupdict() #: pattern groups will be saved here, please save api stuff to info['api'] - except: - info['pattern'] = {} - - for pattern in ("FILE_INFO_PATTERN", "INFO_PATTERN", - "FILE_NAME_PATTERN", "NAME_PATTERN", - "FILE_SIZE_PATTERN", "SIZE_PATTERN", - "HASHSUM_PATTERN"): #@TODO: Remove old patterns starting with "FILE_" in 0.4.10 - try: - attr = getattr(cls, pattern) - pdict = re.search(attr, html).groupdict() - - if all(True for k in pdict if k not in info['pattern']): - info['pattern'].update(pdict) + else: + for pattern in ("INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN", "HASHSUM_PATTERN"): + try: + attr = getattr(cls, pattern) + pdict = re.search(attr, html).groupdict() - except AttributeError: - continue + if all(True for k in pdict if k not in info['pattern']): + info['pattern'].update(pdict) - else: - online = True + except AttributeError: + continue - if not info['pattern']: - info.pop('pattern', None) + else: + online = True if online: info['status'] = 2 if 'N' in info['pattern']: - info['name'] = replace_patterns(unquote(info['pattern']['N'].strip()), - cls.FILE_NAME_REPLACEMENTS if hasattr(cls, "FILE_NAME_REPLACEMENTS") else cls.NAME_REPLACEMENTS) #@TODO: Remove FILE_NAME_REPLACEMENTS check in 0.4.10 + info['name'] = replace_patterns(urllib.unquote(info['pattern']['N'].strip()), + cls.NAME_REPLACEMENTS) if 'S' in info['pattern']: size = replace_patterns(info['pattern']['S'] + info['pattern']['U'] if 'U' in info['pattern'] else info['pattern']['S'], - cls.FILE_SIZE_REPLACEMENTS if hasattr(cls, "FILE_SIZE_REPLACEMENTS") else cls.SIZE_REPLACEMENTS) #@TODO: Remove FILE_SIZE_REPLACEMENTS check in 0.4.10 + cls.SIZE_REPLACEMENTS) info['size'] = parseFileSize(size) elif isinstance(info['size'], basestring): @@ -318,6 +408,9 @@ class SimpleHoster(Hoster): hashtype = info['pattern']['T'] if 'T' in info['pattern'] else "hash" info[hashtype] = info['pattern']['H'] + if not info['pattern']: + info.pop('pattern', None) + return info @@ -326,11 +419,20 @@ class SimpleHoster(Hoster): def prepare(self): + self.pyfile.error = "" #@TODO: Remove in 0.4.10 + self.info = {} + self.html = "" self.link = "" #@TODO: Move to hoster class in 0.4.10 self.directDL = False #@TODO: Move to hoster class in 0.4.10 self.multihost = False #@TODO: Move to hoster class in 0.4.10 + if not self.getConfig('use_premium', True): + self.retryFree() + + if self.LOGIN_ACCOUNT and not self.account: + self.fail(_("Required account not found")) + self.req.setOption("timeout", 120) if isinstance(self.COOKIES, list): @@ -347,8 +449,7 @@ class SimpleHoster(Hoster): else: self.directDL = self.DIRECT_LINK - self.pyfile.url = replace_patterns(self.pyfile.url, - self.FILE_URL_REPLACEMENTS if hasattr(self, "FILE_URL_REPLACEMENTS") else self.URL_REPLACEMENTS) #@TODO: Remove FILE_URL_REPLACEMENTS check in 0.4.10 + self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) def preload(self): @@ -359,132 +460,265 @@ class SimpleHoster(Hoster): def process(self, pyfile): - self.prepare() - self.checkInfo() + try: + self.prepare() + self.checkInfo() - if self.directDL: - self.logDebug("Looking for direct download link...") - self.handleDirect() + if self.directDL: + self.logDebug("Looking for direct download link...") + self.handleDirect(pyfile) - if self.multihost and not self.link and not self.lastDownload: - self.logDebug("Looking for leeched download link...") - self.handleMulti() + if self.multihost and not self.link and not self.lastDownload: + self.logDebug("Looking for leeched download link...") + self.handleMulti(pyfile) + + if not self.link and not self.lastDownload: + self.MULTI_HOSTER = False + self.retry(1, reason="Multi hoster fails") if not self.link and not self.lastDownload: - self.MULTI_HOSTER = False - self.retry(1, reason="Multi hoster fails") + self.preload() + self.checkInfo() - if not self.link and not self.lastDownload: - self.preload() - self.checkInfo() + if self.premium and (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): + self.logDebug("Handled as premium download") + self.handlePremium(pyfile) - if self.html is None: - self.fail(_("No html retrieved")) + elif not self.LOGIN_ACCOUNT or (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): + self.logDebug("Handled as free download") + self.handleFree(pyfile) - if self.premium and (not self.CHECK_TRAFFIC or self.checkTrafficLeft()): - self.logDebug("Handled as premium download") - self.handlePremium() + self.downloadLink(self.link, self.DISPOSITION) + self.checkFile() + + except Fail, e: #@TODO: Move to PluginThread in 0.4.10 + err = str(e) #@TODO: Recheck in 0.4.10 + + if err == _("No captcha result obtained in appropiate time by any of the plugins."): #@TODO: Fix in 0.4.10 + self.checkFile() + + elif self.getConfig('fallback', True) and self.premium: + self.logWarning(_("Premium download failed"), e) + self.retryFree() else: - self.logDebug("Handled as free download") - self.handleFree() + raise Fail(err) + - self.downloadLink(self.link) - self.checkFile() + def downloadLink(self, link, disposition=True): + if not link or not isinstance(link, basestring): + return + + self.correctCaptcha() + link = html_unescape(link.strip().decode('unicode-escape')) #@TODO: Move this check to plugin `load` method in 0.4.10 - def downloadLink(self, link): - if link and isinstance(link, basestring): - self.correctCaptcha() - self.download(link, disposition=True) + if not urlparse.urlparse(link).scheme: + url_p = urlparse.urlparse(self.pyfile.url) + baseurl = "%s://%s" % (url_p.scheme, url_p.netloc) + link = urlparse.urljoin(baseurl, link) + self.download(link, ref=False, disposition=disposition) - def checkFile(self): + + def checkFile(self, rules={}): if self.cTask and not self.lastDownload: self.invalidCaptcha() self.retry(10, reason=_("Wrong captcha")) - elif not self.lastDownload or not exists(fs_encode(self.lastDownload)): - self.fail(_("No file downloaded")) + elif not self.lastDownload or not os.path.exists(fs_encode(self.lastDownload)): + self.lastDownload = "" + self.error(self.pyfile.error or _("No file downloaded")) else: - rules = {'empty file': re.compile(r"^$")} + errmsg = self.checkDownload({'Empty file': re.compile(r'\A\s*\Z'), + 'Html error': re.compile(r'\A(?:\s*<.+>)?((?:[\w\s]*(?:[Ee]rror|ERROR)\s*\:?)?\s*\d{3})(?:\Z|\s+)')}) - if hasattr(self, 'ERROR_PATTERN'): - rules['error'] = re.compile(self.ERROR_PATTERN) + if not errmsg: + for r, p in [('Html file' , re.compile(r'\A\s*<!DOCTYPE html') ), + ('Request error', re.compile(r'([Aa]n error occured while processing your request)'))]: + if r not in rules: + rules[r] = p - check = self.checkDownload(rules) - if check: #@TODO: Move to hoster in 0.4.10 - errmsg = check.strip().capitalize() + (" | " + self.lastCheck.strip() if self.lastCheck else "") - self.retry(10, 60, errmsg) + for r, a in [('Error' , "ERROR_PATTERN" ), + ('Premium only', "PREMIUM_ONLY_PATTERN"), + ('Wait error' , "WAIT_PATTERN" )]: + if r not in rules and hasattr(self, a): + rules[r] = getattr(self, a) + + errmsg = self.checkDownload(rules) + + if not errmsg: + return + + errmsg = errmsg.strip().capitalize() + + try: + errmsg += " | " + self.lastCheck.group(1).strip() + except Exception: + pass + + self.logWarning("Check result: " + errmsg, "Waiting 1 minute and retry") + self.retry(3, 60, errmsg) def checkErrors(self): - if hasattr(self, 'PREMIUM_ONLY_PATTERN') and self.premium and re.search(self.PREMIUM_ONLY_PATTERN, self.html): - self.fail(_("Link require a premium account to be handled")) + if not self.html: + self.logWarning(_("No html code to check")) + return + + if hasattr(self, 'IP_BLOCKED_PATTERN') and re.search(self.IP_BLOCKED_PATTERN, self.html): + self.fail(_("Connection from your current IP address is not allowed")) + + elif not self.premium: + if hasattr(self, 'PREMIUM_ONLY_PATTERN') and re.search(self.PREMIUM_ONLY_PATTERN, self.html): + self.fail(_("File can be downloaded by premium users only")) + + elif hasattr(self, 'SIZE_LIMIT_PATTERN') and re.search(self.SIZE_LIMIT_PATTERN, self.html): + self.fail(_("File too large for free download")) + + elif hasattr(self, 'DOWNLOAD_LIMIT_PATTERN') and re.search(self.DOWNLOAD_LIMIT_PATTERN, self.html): + m = re.search(self.DOWNLOAD_LIMIT_PATTERN, self.html) + try: + errmsg = m.group(1).strip() + except Exception: + errmsg = m.group(0).strip() + + self.info['error'] = re.sub(r'<.*?>', " ", errmsg) + self.logWarning(self.info['error']) + + if re.search('da(il)?y|today', errmsg, re.I): + wait_time = secondsToMidnight(gmt=2) + else: + wait_time = sum(int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1, "": 1}[u.lower()] for v, u in + re.findall(r'(\d+)\s*(hr|hour|min|sec|)', errmsg, re.I)) + + self.wantReconnect = wait_time > 300 + self.retry(1, wait_time, _("Download limit exceeded")) + + if hasattr(self, 'HAPPY_HOUR_PATTERN') and re.search(self.HAPPY_HOUR_PATTERN, self.html): + self.multiDL = True if hasattr(self, 'ERROR_PATTERN'): m = re.search(self.ERROR_PATTERN, self.html) if m: - errmsg = self.info['error'] = m.group(1) - self.error(errmsg) + try: + errmsg = m.group(1).strip() + except Exception: + errmsg = m.group(0).strip() + + self.info['error'] = re.sub(r'<.*?>', " ", errmsg) + self.logWarning(self.info['error']) + + if re.search('limit|wait', errmsg, re.I): + if re.search("da(il)?y|today", errmsg): + wait_time = secondsToMidnight(gmt=2) + else: + wait_time = sum(int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1, "": 1}[u.lower()] for v, u in + re.findall(r'(\d+)\s*(hr|hour|min|sec|)', errmsg, re.I)) + + self.wantReconnect = wait_time > 300 + self.retry(1, wait_time, _("Download limit exceeded")) + + elif re.search('country|ip|region|nation', errmsg, re.I): + self.fail(_("Connection from your current IP address is not allowed")) + + elif re.search('captcha|code', errmsg, re.I): + self.invalidCaptcha() + + elif re.search('countdown|expired', errmsg, re.I): + self.retry(wait_time=60, reason=_("Link expired")) + + elif re.search('maintenance|maintainance|temp', errmsg, re.I): + self.tempOffline() - if hasattr(self, 'WAIT_PATTERN'): + elif re.search('up to', errmsg, re.I): + self.fail(_("File too large for free download")) + + elif re.search('offline|delet|remov|not (found|available)', errmsg, re.I): + self.offline() + + elif re.search('premium', errmsg, re.I): + self.fail(_("File can be downloaded by premium users only")) + + else: + self.wantReconnect = True + self.retry(wait_time=60, reason=errmsg) + + elif hasattr(self, 'WAIT_PATTERN'): m = re.search(self.WAIT_PATTERN, self.html) if m: - wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in - re.findall(r'(\d+)\s*(hr|hour|min|sec)', m.group(0), re.I)]) + try: + waitmsg = m.group(1).strip() + except Exception: + waitmsg = m.group(0).strip() + + wait_time = sum(int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1, "": 1}[u.lower()] for v, u in + re.findall(r'(\d+)\s*(hr|hour|min|sec|)', waitmsg, re.I)) self.wait(wait_time, wait_time > 300) - return self.info.pop('error', None) - def checkStatus(self): - status = self.info['status'] + def checkStatus(self, getinfo=True): + if not self.info or getinfo: + self.logDebug("Update file info...") + self.logDebug("Previous file info: %s" % self.info) + self.info.update(self.getInfo(self.pyfile.url, self.html)) + self.logDebug("Current file info: %s" % self.info) - if status is 1: - self.offline() + try: + status = self.info['status'] - elif status is 6: - self.tempOffline() + if status is 1: + self.offline() - elif status is not 2: - self.logDebug("File status: %s" % statusMap[status], - "File info: %s" % self.info) + elif status is 6: + self.tempOffline() + elif status is 8: + self.fail(self.info['error'] if 'error' in self.info else _("Failed")) - def checkNameSize(self): - name = self.info['name'] - size = self.info['size'] - url = self.info['url'] + finally: + self.logDebug("File status: %s" % statusMap[status]) - if name and name != url: - self.pyfile.name = name - else: - self.pyfile.name = name = self.info['name'] = urlparse(name).path.split('/')[-1] - if size > 0: - self.pyfile.size = size - else: - size = "Unknown" + def checkNameSize(self, getinfo=True): + if not self.info or getinfo: + self.logDebug("Update file info...") + self.logDebug("Previous file info: %s" % self.info) + self.info.update(self.getInfo(self.pyfile.url, self.html)) + self.logDebug("Current file info: %s" % self.info) - self.logDebug("File name: %s" % name, - "File size: %s" % size) + try: + url = self.info['url'].strip() + name = self.info['name'].strip() + if name and name != url: + self.pyfile.name = name + except Exception: + pass + + try: + size = self.info['size'] + if size > 0: + self.pyfile.size = size + + except Exception: + pass + + self.logDebug("File name: %s" % self.pyfile.name, + "File size: %s byte" % self.pyfile.size if self.pyfile.size > 0 else "File size: Unknown") - def checkInfo(self): - self.updateInfo(self.getInfo(self.pyfile.url, self.html)) + def checkInfo(self): self.checkNameSize() if self.html: self.checkErrors() + self.checkNameSize() - self.updateInfo(self.getInfo(self.pyfile.url, self.html)) - - self.checkNameSize() - self.checkStatus() + self.checkStatus(getinfo=False) #: Deprecated @@ -494,56 +728,43 @@ class SimpleHoster(Hoster): return self.info - def updateInfo(self, info): - self.logDebug(_("File info (BEFORE): %s") % self.info) - self.info.update(info) - self.logDebug(_("File info (AFTER): %s") % self.info) - - - def handleDirect(self): - link = _isDirectLink(self, self.pyfile.url, self.resumeDownload) + def handleDirect(self, pyfile): + link = self.directLink(pyfile.url, self.resumeDownload) if link: self.logInfo(_("Direct download link detected")) - self.link = link else: - self.logDebug(_("Direct download link not found")) + self.logDebug("Direct download link not found") - def handleMulti(self): #: Multi-hoster handler + def handleMulti(self, pyfile): #: Multi-hoster handler pass - def handleFree(self): + def handleFree(self, pyfile): if not hasattr(self, 'LINK_FREE_PATTERN'): - self.fail(_("Free download not implemented")) - - try: - m = re.search(self.LINK_FREE_PATTERN, self.html) - if m is None: - self.error(_("Free download link not found")) + self.logError(_("Free download not implemented")) + m = re.search(self.LINK_FREE_PATTERN, self.html) + if m is None: + self.error(_("Free download link not found")) + else: self.link = m.group(1) - except Exception, e: - self.fail(e) - - def handlePremium(self): + def handlePremium(self, pyfile): if not hasattr(self, 'LINK_PREMIUM_PATTERN'): - self.fail(_("Premium download not implemented")) - - try: - m = re.search(self.LINK_PREMIUM_PATTERN, self.html) - if m is None: - self.error(_("Premium download link not found")) + self.logError(_("Premium download not implemented")) + self.logDebug("Handled as free download") + self.handleFree(pyfile) + m = re.search(self.LINK_PREMIUM_PATTERN, self.html) + if m is None: + self.error(_("Premium download link not found")) + else: self.link = m.group(1) - except Exception, e: - self.fail(e) - def longWait(self, wait_time=None, max_tries=3): if wait_time and isinstance(wait_time, (int, long, float)): @@ -555,8 +776,7 @@ class SimpleHoster(Hoster): self.logInfo(_("Download limit reached, reconnect or wait %s") % time_str) - self.setWait(wait_time, True) - self.wait() + self.wait(wait_time, True) self.retry(max_tries=max_tries, reason=_("Download limit reached")) @@ -565,6 +785,9 @@ class SimpleHoster(Hoster): def checkTrafficLeft(self): + if not self.account: + return True + traffic = self.account.getAccountInfo(self.user, True)['trafficleft'] if traffic is None: @@ -578,6 +801,26 @@ class SimpleHoster(Hoster): #@TODO: Remove in 0.4.10 + def getConfig(self, option, default=''): + """getConfig with default value - sublass may not implements all config options""" + try: + return self.getConf(option) + + except KeyError: + return default + + + def retryFree(self): + if not self.premium: + return + self.premium = False + self.account = None + self.req = self.core.requestFactory.getRequest(self.__name__) + self.retries = -1 + raise Retry(_("Fallback to free download")) + + + #@TODO: Remove in 0.4.10 def wait(self, seconds=0, reconnect=None): return _wait(self, seconds, reconnect) diff --git a/module/plugins/internal/UnRar.py b/module/plugins/internal/UnRar.py index 5633b31f7..5b9f2e1c3 100644 --- a/module/plugins/internal/UnRar.py +++ b/module/plugins/internal/UnRar.py @@ -2,105 +2,108 @@ import os import re +import subprocess from glob import glob -from os.path import basename, dirname, join from string import digits -from subprocess import Popen, PIPE -from module.plugins.internal.AbstractExtractor import AbtractExtractor, PasswordError, ArchiveError, CRCError -from module.utils import save_join, decode +from module.plugins.internal.Extractor import Extractor, ArchiveError, CRCError, PasswordError +from module.utils import fs_decode, fs_encode, save_join def renice(pid, value): - if os.name != "nt" and value: + if value and os.name != "nt": try: - Popen(["renice", str(value), str(pid)], stdout=PIPE, stderr=PIPE, bufsize=-1) - except: - print "Renice failed" + subprocess.Popen(["renice", str(value), str(pid)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=-1) + except Exception: + pass -class UnRar(AbtractExtractor): + +class UnRar(Extractor): __name__ = "UnRar" - __version__ = "1.00" + __version__ = "1.20" __description__ = """Rar extractor plugin""" __license__ = "GPLv3" - __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("RaNaN" , "RaNaN@pyload.org" ), + ("Walter Purcaro", "vuolter@gmail.com"), + ("Immenz" , "immenz@gmx.net" )] CMD = "unrar" + VERSION = "" + EXTENSIONS = [".rar"] - EXTENSIONS = ["rar", "zip", "cab", "arj", "lzh", "tar", "gz", "bz2", "ace", "uue", "jar", "iso", "7z", "xz", "z"] + re_multipart = re.compile(r'\.(part|r)(\d+)(?:\.rar)?(\.rev|\.bad)?',re.I) - #@NOTE: there are some more uncovered rar formats - re_rarpart = re.compile(r'(.*)\.part(\d+)\.rar$', re.I) - re_rarfile = re.compile(r'.*\.(rar|r\d+)$', re.I) + re_filefixed = re.compile(r'Building (.+)') + re_filelist = re.compile(r'^(.)(\s*[\w\.\-]+)\s+(\d+\s+)+(?:\d+\%\s+)?[\d\-]{8}\s+[\d\:]{5}', re.M|re.I) - re_filelist = re.compile(r'(.+)\s+(\d+)\s+(\d+)\s+|(.+)\s+(\d+)\s+\d\d-\d\d-\d\d\s+\d\d:\d\d\s+(.+)') re_wrongpwd = re.compile(r'password', re.I) - re_wrongcrc = re.compile(r'encrypted|damaged|CRC failed|checksum error', re.I) + re_wrongcrc = re.compile(r'encrypted|damaged|CRC failed|checksum error|corrupt', re.I) + + re_version = re.compile(r'(?:UN)?RAR\s(\d+\.\d+)', re.I) @classmethod - def checkDeps(cls): + def isUsable(cls): if os.name == "nt": - cls.CMD = join(pypath, "UnRAR.exe") - p = Popen([cls.CMD], stdout=PIPE, stderr=PIPE) - p.communicate() - else: try: - p = Popen([cls.CMD], stdout=PIPE, stderr=PIPE) - p.communicate() + cls.CMD = os.path.join(pypath, "RAR.exe") + p = subprocess.Popen([cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + cls.__name__ = "RAR" + cls.REPAIR = True except OSError: - # fallback to rar - cls.CMD = "rar" - p = Popen([cls.CMD], stdout=PIPE, stderr=PIPE) - p.communicate() + cls.CMD = os.path.join(pypath, "UnRAR.exe") + p = subprocess.Popen([cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + else: + try: + p = subprocess.Popen(["rar"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + cls.__name__ = "RAR" + cls.REPAIR = True + + except OSError: #: fallback to unrar + p = subprocess.Popen([cls.CMD], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = p.communicate() + + m = cls.re_version.search(out) + cls.VERSION = m.group(1) if m else '(version unknown)' return True @classmethod - def isArchive(cls, file): - f = basename(file).lower() - return any(f.endswith('.%s' % ext) for ext in cls.EXTENSIONS) + def isMultipart(cls, filename): + return True if cls.re_multipart.search(filename) else False - @classmethod - def getTargets(cls, files_ids): - targets = [] - - for file, id in files_ids: - if not cls.isArchive(file): - continue - - m = cls.re_rarpart.findall(file) - if m: - # only add first parts - if int(m[0][1]) == 1: - targets.append((file, id)) - else: - targets.append((file, id)) + def verify(self, password): + p = self.call_cmd("t", "-v", fs_encode(self.filename), password=password) + self._progress(p) + err = p.stderr.read().strip() - return targets + if self.re_wrongpwd.search(err): + raise PasswordError + if self.re_wrongcrc.search(err): + raise CRCError(err) - def check(self, out="", err=""): - if not out or not err: - return - if err.strip(): - if self.re_wrongpwd.search(err): - raise PasswordError + def check(self, password): + p = self.call_cmd("l", "-v", fs_encode(self.filename), password=password) + out, err = p.communicate() - elif self.re_wrongcrc.search(err): - raise CRCError + if self.re_wrongpwd.search(err): + raise PasswordError - else: #: raise error if anything is on stderr - raise ArchiveError(err.strip()) + if self.re_wrongcrc.search(err): + raise CRCError(err) # output only used to check if passworded files are present for attr in self.re_filelist.findall(out): @@ -108,114 +111,99 @@ class UnRar(AbtractExtractor): raise PasswordError - def verify(self): - p = self.call_cmd("l", "-v", self.file, password=self.password) - - self.check(*p.communicate()) - - if p and p.returncode: - raise ArchiveError("Process terminated") - - if not self.list(): - raise ArchiveError("Empty archive") - - - def isPassword(self, password): - if isinstance(password, basestring): - p = self.call_cmd("l", "-v", self.file, password=password) - out, err = p.communicate() - - if not self.re_wrongpwd.search(err): - return True - - return False - - def repair(self): - p = self.call_cmd("rc", self.file) - out, err = p.communicate() - - if p.returncode or err.strip(): - p = self.call_cmd("r", self.file) - out, err = p.communicate() - - if p.returncode or err.strip(): - return False - else: - self.file = join(dirname(self.file), re.search(r'(fixed|rebuild)\.%s' % basename(self.file), out).group(0)) + p = self.call_cmd("rc", fs_encode(self.filename)) + # communicate and retrieve stderr + self._progress(p) + err = p.stderr.read().strip() + if err or p.returncode: + return False return True - def extract(self, progress=lambda x: None): - self.verify() - - progress(0) - - command = "x" if self.fullpath else "e" - - p = self.call_cmd(command, self.file, self.out, password=self.password) - - renice(p.pid, self.renice) - - progressstring = "" + def _progress(self, process): + s = "" while True: - c = p.stdout.read(1) + c = process.stdout.read(1) # quit loop on eof if not c: break # reading a percentage sign -> set progress and restart - if c is '%': - progress(int(progressstring)) - progressstring = "" + if c == '%': + self.notifyProgress(int(s)) + s = "" # not reading a digit -> therefore restart elif c not in digits: - progressstring = "" + s = "" # add digit to progressstring else: - progressstring += c + s += c + + + def extract(self, password=None): + command = "x" if self.fullpath else "e" - progress(100) + p = self.call_cmd(command, fs_encode(self.filename), self.out, password=password) - self.files = self.list() + renice(p.pid, self.renice) - # retrieve stderr - self.check(err=p.stderr.read()) + # communicate and retrieve stderr + self._progress(p) + err = p.stderr.read().strip() + + if err: + if self.re_wrongpwd.search(err): + raise PasswordError + + elif self.re_wrongcrc.search(err): + raise CRCError(err) + + else: #: raise error if anything is on stderr + raise ArchiveError(err) if p.returncode: - raise ArchiveError("Process terminated") + raise ArchiveError(_("Process return code: %d") % p.returncode) + + self.files = self.list(password) def getDeleteFiles(self): - if ".part" in basename(self.file): - return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.I)) + dir, name = os.path.split(self.filename) + + # actually extracted file + files = [self.filename] - # get files which matches .r* and filter unsuited files out - parts = glob(re.sub(r"(?<=\.r)ar$", "*", self.file, re.I)) + # eventually Multipart Files + files.extend(save_join(dir, os.path.basename(file)) for file in filter(self.isMultipart, os.listdir(dir)) + if re.sub(self.re_multipart,".rar",name) == re.sub(self.re_multipart,".rar",file)) - return filter(lambda x: self.re_rarfile.match(x), parts) + return files - def list(self): + def list(self, password=None): command = "vb" if self.fullpath else "lb" - p = self.call_cmd(command, "-v", self.file, password=self.password) + p = self.call_cmd(command, "-v", fs_encode(self.filename), password=password) out, err = p.communicate() - if err.strip(): - self.m.logError(err) - if "Cannot open" in err: - return list() + if "Cannot open" in err: + raise ArchiveError(_("Cannot open file")) - if p.returncode: - self.m.logError("Process terminated") - return list() + if err.strip(): #: only log error at this point + self.manager.logError(err.strip()) result = set() - - for f in decode(out).splitlines(): - f = f.strip() - result.add(save_join(self.out, f)) + if not self.fullpath and self.VERSION.startswith('5'): + # NOTE: Unrar 5 always list full path + for f in fs_decode(out).splitlines(): + f = save_join(self.out, os.path.basename(f.strip())) + if os.path.isfile(f): + result.add(save_join(self.out, os.path.basename(f))) + else: + for f in fs_decode(out).splitlines(): + f = f.strip() + result.add(save_join(self.out, f)) return list(result) @@ -228,11 +216,11 @@ class UnRar(AbtractExtractor): args.append("-o+") else: args.append("-o-") - if self.delete: + if self.delete != 'No': args.append("-or") for word in self.excludefiles: - args.append("-x%s" % word.strip()) + args.append("-x'%s'" % word.strip()) # assume yes on all queries args.append("-y") @@ -248,6 +236,8 @@ class UnRar(AbtractExtractor): # NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue call = [self.CMD, command] + args + list(xargs) - self.m.logDebug(" ".join(call)) - return Popen(call, stdout=PIPE, stderr=PIPE) + self.manager.logDebug(" ".join(call)) + + p = subprocess.Popen(call, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return p diff --git a/module/plugins/internal/UnZip.py b/module/plugins/internal/UnZip.py index b3d54cba0..8d3fec370 100644 --- a/module/plugins/internal/UnZip.py +++ b/module/plugins/internal/UnZip.py @@ -2,85 +2,71 @@ from __future__ import with_statement +import os import sys import zipfile -from module.plugins.internal.AbstractExtractor import AbtractExtractor, PasswordError, ArchiveError, CRCError +from module.plugins.internal.Extractor import Extractor, ArchiveError, CRCError, PasswordError +from module.utils import fs_encode -class UnZip(AbtractExtractor): +class UnZip(Extractor): __name__ = "UnZip" - __version__ = "1.00" + __version__ = "1.12" __description__ = """Zip extractor plugin""" __license__ = "GPLv3" __authors__ = [("Walter Purcaro", "vuolter@gmail.com")] - EXTENSIONS = ["zip", "zip64"] + EXTENSIONS = [".zip", ".zip64"] + VERSION ="(python %s.%s.%s)" % (sys.version_info[0], sys.version_info[1], sys.version_info[2]) @classmethod - def checkDeps(cls): + def isUsable(cls): return sys.version_info[:2] >= (2, 6) - @classmethod - def isArchive(cls, file): - return zipfile.is_zipfile(file) + def list(self, password=None): + with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z: + z.setpassword(password) + return z.namelist() - def verify(self): - try: - with zipfile.ZipFile(self.file, 'r', allowZip64=True) as z: - z.setpassword(self.password) - badcrc = z.testzip() - - except (BadZipfile, LargeZipFile), e: - raise ArchiveError(e) + def check(self, password): + pass - except RuntimeError, e: - if 'encrypted' in e: - raise PasswordError - else: - raise ArchiveError(e) - else: - if badcrc: - raise CRCError + def verify(self): + with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z: + badfile = z.testzip() - if not self.list(): - raise ArchiveError("Empty archive") + if badfile: + raise CRCError(badfile) + else: + raise PasswordError - def list(self): + def extract(self, password=None): try: - with zipfile.ZipFile(self.file, 'r', allowZip64=True) as z: - z.setpassword(self.password) - return z.namelist() - except Exception: - return list() + with zipfile.ZipFile(fs_encode(self.filename), 'r', allowZip64=True) as z: + z.setpassword(password) + badfile = z.testzip() - def extract(self, progress=lambda x: None): - try: - with zipfile.ZipFile(self.file, 'r', allowZip64=True) as z: - progress(0) - z.extractall(self.out, pwd=self.password) - progress(100) + if badfile: + raise CRCError(badfile) + else: + z.extractall(self.out) - except (BadZipfile, LargeZipFile), e: + except (zipfile.BadZipfile, zipfile.LargeZipFile), e: raise ArchiveError(e) except RuntimeError, e: - if e is "Bad password for file": + if "encrypted" in e: raise PasswordError else: raise ArchiveError(e) - - finally: - self.files = self.list() - - - def getDeleteFiles(self): - return [self.file] + else: + self.files = z.namelist() diff --git a/module/plugins/internal/XFSAccount.py b/module/plugins/internal/XFSAccount.py index 2784ecd0b..e619cb038 100644 --- a/module/plugins/internal/XFSAccount.py +++ b/module/plugins/internal/XFSAccount.py @@ -1,9 +1,8 @@ # -*- coding: utf-8 -*- import re - -from time import gmtime, mktime, strptime -from urlparse import urljoin +import time +import urlparse from module.plugins.Account import Account from module.plugins.internal.SimpleHoster import parseHtmlForm, set_cookies @@ -12,18 +11,19 @@ from module.plugins.internal.SimpleHoster import parseHtmlForm, set_cookies class XFSAccount(Account): __name__ = "XFSAccount" __type__ = "account" - __version__ = "0.33" + __version__ = "0.37" __description__ = """XFileSharing account plugin""" __license__ = "GPLv3" - __authors__ = [("zoidberg", "zoidberg@mujmail.cz"), - ("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("zoidberg" , "zoidberg@mujmail.cz"), + ("Walter Purcaro", "vuolter@gmail.com" )] HOSTER_DOMAIN = None HOSTER_URL = None + LOGIN_URL = None - COOKIES = [(HOSTER_DOMAIN, "lang", "english")] + COOKIES = True PREMIUM_PATTERN = r'\(Premium only\)' @@ -35,7 +35,7 @@ class XFSAccount(Account): LEECH_TRAFFIC_PATTERN = r'Leech Traffic left:<b>.*?(?P<S>[\d.,]+|[Uu]nlimited)\s*(?:(?P<U>[\w^_]+)\s*)?</b>' LEECH_TRAFFIC_UNIT = "MB" #: used only if no group <U> was found - LOGIN_FAIL_PATTERN = r'>\s*(Incorrect Login or Password|Error<)' + LOGIN_FAIL_PATTERN = r'Incorrect Login or Password|account was banned|Error<' def __init__(self, manager, accounts): #@TODO: remove in 0.4.10 @@ -46,9 +46,15 @@ class XFSAccount(Account): def init(self): if not self.HOSTER_DOMAIN: self.logError(_("Missing HOSTER_DOMAIN")) + self.COOKIES = False + + else: + if not self.HOSTER_URL: + self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN - if not self.HOSTER_URL: - self.HOSTER_URL = "http://www.%s/" % self.HOSTER_DOMAIN or "" + if isinstance(self.COOKIES, list): + self.COOKIES.insert((self.HOSTER_DOMAIN, "lang", "english")) + set_cookies(req.cj, self.COOKIES) def loadAccountInfo(self, user, req): @@ -57,6 +63,12 @@ class XFSAccount(Account): leechtraffic = None premium = None + if not self.HOSTER_URL: #@TODO: Remove in 0.4.10 + return {'validuntil' : validuntil, + 'trafficleft' : trafficleft, + 'leechtraffic': leechtraffic, + 'premium' : premium} + html = req.load(self.HOSTER_URL, get={'op': "my_account"}, decode=True) premium = True if re.search(self.PREMIUM_PATTERN, html) else False @@ -67,7 +79,7 @@ class XFSAccount(Account): self.logDebug("Expire date: " + expiredate) try: - validuntil = mktime(strptime(expiredate, "%d %B %Y")) + validuntil = time.mktime(time.strptime(expiredate, "%d %B %Y")) except Exception, e: self.logError(e) @@ -75,11 +87,11 @@ class XFSAccount(Account): else: self.logDebug("Valid until: %s" % validuntil) - if validuntil > mktime(gmtime()): - premium = True + if validuntil > time.mktime(time.gmtime()): + premium = True trafficleft = -1 else: - premium = False + premium = False validuntil = None #: registered account type (not premium) else: self.logDebug("VALID_UNTIL_PATTERN not found") @@ -136,25 +148,34 @@ class XFSAccount(Account): else: self.logDebug("LEECH_TRAFFIC_PATTERN not found") - return {'validuntil': validuntil, 'trafficleft': trafficleft, 'leechtraffic': leechtraffic, 'premium': premium} + return {'validuntil' : validuntil, + 'trafficleft' : trafficleft, + 'leechtraffic': leechtraffic, + 'premium' : premium} def login(self, user, data, req): - if isinstance(self.COOKIES, list): - set_cookies(req.cj, self.COOKIES) + if not self.HOSTER_URL: #@TODO: Remove in 0.4.10 + raise Exception(_("Missing HOSTER_DOMAIN")) - url = urljoin(self.HOSTER_URL, "login.html") - html = req.load(url, decode=True) + if not self.LOGIN_URL: + self.LOGIN_URL = urlparse.urljoin(self.HOSTER_URL, "login.html") + html = req.load(self.LOGIN_URL, decode=True) action, inputs = parseHtmlForm('name="FL"', html) if not inputs: - inputs = {'op': "login", + inputs = {'op' : "login", 'redirect': self.HOSTER_URL} - inputs.update({'login': user, + inputs.update({'login' : user, 'password': data['password']}) - html = req.load(self.HOSTER_URL, post=inputs, decode=True) + if action: + url = urlparse.urljoin("http://", action) + else: + url = self.HOSTER_URL + + html = req.load(url, post=inputs, decode=True) if re.search(self.LOGIN_FAIL_PATTERN, html): self.wrongPassword() diff --git a/module/plugins/internal/XFSCrypter.py b/module/plugins/internal/XFSCrypter.py index 4b57dab90..80eff53ea 100644 --- a/module/plugins/internal/XFSCrypter.py +++ b/module/plugins/internal/XFSCrypter.py @@ -6,7 +6,7 @@ from module.plugins.internal.SimpleCrypter import SimpleCrypter, create_getInfo class XFSCrypter(SimpleCrypter): __name__ = "XFSCrypter" __type__ = "crypter" - __version__ = "0.05" + __version__ = "0.09" __pattern__ = r'^unmatchable$' @@ -16,14 +16,30 @@ class XFSCrypter(SimpleCrypter): HOSTER_DOMAIN = None - HOSTER_NAME = None URL_REPLACEMENTS = [(r'&?per_page=\d+', ""), (r'[?/&]+$', ""), (r'(.+/[^?]+)$', r'\1?'), (r'$', r'&per_page=10000')] - COOKIES = [(HOSTER_DOMAIN, "lang", "english")] - - LINK_PATTERN = r'<(?:td|TD).*?>\s*<a href="(.+?)".*?>.+?(?:</a>)?\s*</(?:td|TD)>' - NAME_PATTERN = r'<[tT]itle>.*?\: (?P<N>.+) folder</[tT]itle>' + LINK_PATTERN = r'<(?:td|TD).*?>\s*(?:<.+>\s*)?<a href="(.+?)".*?>.+?(?:</a>)?\s*(?:<.+>\s*)?</(?:td|TD)>' + NAME_PATTERN = r'<[Tt]itle>.*?\: (?P<N>.+) folder</[Tt]itle>' OFFLINE_PATTERN = r'>\s*\w+ (Not Found|file (was|has been) removed)' TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)' + + + def prepare(self): + if not self.HOSTER_DOMAIN: + if self.account: + account = self.account + else: + account_name = (self.__name__ + ".py").replace("Folder.py", "").replace(".py", "") + account = self.pyfile.m.core.accountManager.getAccountPlugin(account_name) + + if account and hasattr(account, "HOSTER_DOMAIN") and account.HOSTER_DOMAIN: + self.HOSTER_DOMAIN = account.HOSTER_DOMAIN + else: + self.fail(_("Missing HOSTER_DOMAIN")) + + if isinstance(self.COOKIES, list): + self.COOKIES.insert((self.HOSTER_DOMAIN, "lang", "english")) + + return super(XFSCrypter, self).prepare() diff --git a/module/plugins/internal/XFSHoster.py b/module/plugins/internal/XFSHoster.py index dd2dd2527..e2818886c 100644 --- a/module/plugins/internal/XFSHoster.py +++ b/module/plugins/internal/XFSHoster.py @@ -1,39 +1,34 @@ # -*- coding: utf-8 -*- +import pycurl +import random import re +import urlparse -from random import random -from time import sleep - -from pycurl import FOLLOWLOCATION, LOW_SPEED_TIME - -from module.plugins.hoster.UnrestrictLi import secondsToMidnight from module.plugins.internal.CaptchaService import ReCaptcha, SolveMedia -from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo +from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, secondsToMidnight from module.utils import html_unescape class XFSHoster(SimpleHoster): __name__ = "XFSHoster" __type__ = "hoster" - __version__ = "0.31" + __version__ = "0.51" __pattern__ = r'^unmatchable$' __description__ = """XFileSharing hoster plugin""" __license__ = "GPLv3" - __authors__ = [("zoidberg", "zoidberg@mujmail.cz"), - ("stickell", "l.stickell@yahoo.it"), - ("Walter Purcaro", "vuolter@gmail.com")] + __authors__ = [("zoidberg" , "zoidberg@mujmail.cz"), + ("stickell" , "l.stickell@yahoo.it"), + ("Walter Purcaro", "vuolter@gmail.com" )] HOSTER_DOMAIN = None - HOSTER_NAME = None - TEXT_ENCODING = False - COOKIES = [(HOSTER_DOMAIN, "lang", "english")] - CHECK_DIRECT_LINK = None - MULTI_HOSTER = True #@NOTE: Should be default to False for safe, but I'm lazy... + TEXT_ENCODING = False + DIRECT_LINK = None + MULTI_HOSTER = True #@NOTE: Should be default to False for safe, but I'm lazy... NAME_PATTERN = r'(Filename[ ]*:[ ]*</b>(</td><td nowrap>)?|name="fname"[ ]+value="|<[\w^_]+ class="(file)?name">)\s*(?P<N>.+?)(\s*<|")' SIZE_PATTERN = r'(Size[ ]*:[ ]*</b>(</td><td>)?|File:.*>|</font>\s*\(|<[\w^_]+ class="size">)\s*(?P<S>[\d.,]+)\s*(?P<U>[\w^_]+)' @@ -41,11 +36,12 @@ class XFSHoster(SimpleHoster): OFFLINE_PATTERN = r'>\s*\w+ (Not Found|file (was|has been) removed)' TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)' - WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>|id="countdown" value=".*?(\d+).*?"' + WAIT_PATTERN = r'<span id="countdown_str".*>(\d+)</span>|id="countdown" value=".*?(\d+).*?"' PREMIUM_ONLY_PATTERN = r'>This file is available for Premium Users only' + HAPPY_HOUR_PATTERN = r'>[Hh]appy hour' ERROR_PATTERN = r'(?:class=["\']err["\'].*?>|<[Cc]enter><b>|>Error</td>|>\(ERROR:)(?:\s*<.+?>\s*)*(.+?)(?:["\']|<|\))' - LEECH_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)' + LINK_LEECH_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)' LINK_PATTERN = None #: final download url pattern CAPTCHA_PATTERN = r'(https?://[^"\']+?/captchas?/[^"\']+)' @@ -58,58 +54,39 @@ class XFSHoster(SimpleHoster): def setup(self): - self.chunkLimit = 1 + self.chunkLimit = -1 if self.premium else 1 self.resumeDownload = self.multiDL = self.premium def prepare(self): """ Initialize important variables """ if not self.HOSTER_DOMAIN: - self.fail(_("Missing HOSTER_DOMAIN")) + if self.account: + account = self.account + else: + account = self.pyfile.m.core.accountManager.getAccountPlugin(self.__name__) - if not self.HOSTER_NAME: - self.HOSTER_NAME = "".join([str.capitalize() for str in self.HOSTER_DOMAIN.split('.')]) + if account and hasattr(account, "HOSTER_DOMAIN") and account.HOSTER_DOMAIN: + self.HOSTER_DOMAIN = account.HOSTER_DOMAIN + else: + self.fail(_("Missing HOSTER_DOMAIN")) + + if isinstance(self.COOKIES, list): + self.COOKIES.insert((self.HOSTER_DOMAIN, "lang", "english")) if not self.LINK_PATTERN: - pattern = r'(https?://(www\.)?([^/]*?%s|\d+\.\d+\.\d+\.\d+)(\:\d+)?(/d/|(/files)?/\d+/\w+/).+?)["\'<]' + pattern = r'(?:file: "(.+?)"|(https?://(?:www\.)?([^/]*?%s|\d+\.\d+\.\d+\.\d+)(\:\d+)?(/d/|(/files)?/\d+/\w+/).+?)["\'<])' self.LINK_PATTERN = pattern % self.HOSTER_DOMAIN.replace('.', '\.') - self.captcha = None - self.errmsg = None - self.passwords = self.getPassword().splitlines() - super(XFSHoster, self).prepare() - if self.CHECK_DIRECT_LINK is None: - self.directDL = bool(self.premium) - - - def handleFree(self): - link = self.getDownloadLink() - - if link: - if self.captcha: - self.correctCaptcha() - - self.download(link, ref=True, cookies=True, disposition=True) - - elif self.errmsg: - if 'captcha' in self.errmsg: - self.fail(_("No valid captcha code entered")) - else: - self.fail(self.errmsg) - - else: - self.fail(_("Download link not found")) - + if self.DIRECT_LINK is None: + self.directDL = self.premium - def handlePremium(self): - return self.handleFree() - - def getDownloadLink(self): + def handleFree(self, pyfile): for i in xrange(1, 6): - self.logDebug("Getting download link: #%d" % i) + self.logDebug("Getting download link #%d" % i) self.checkErrors() @@ -119,11 +96,11 @@ class XFSHoster(SimpleHoster): data = self.getPostParameters() - self.req.http.c.setopt(FOLLOWLOCATION, 0) + self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0) - self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True) + self.html = self.load(pyfile.url, post=data, decode=True) - self.req.http.c.setopt(FOLLOWLOCATION, 1) + self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1) m = re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I) if m and not "op=" in m.group(1): @@ -136,12 +113,14 @@ class XFSHoster(SimpleHoster): self.logError(data['op'] if 'op' in data else _("UNKNOWN")) return "" - self.errmsg = None + self.link = m.group(1).strip() #@TODO: Remove .strip() in 0.4.10 + - return m.group(1).strip() #@TODO: Remove .strip() in 0.4.10 + def handlePremium(self, pyfile): + return self.handleFree(pyfile) - def handleMulti(self): + def handleMulti(self, pyfile): if not self.account: self.fail(_("Only registered or premium users can use url leech feature")) @@ -150,11 +129,11 @@ class XFSHoster(SimpleHoster): action, inputs = self.parseHtmlForm() - upload_id = "%012d" % int(random() * 10 ** 12) + upload_id = "%012d" % int(random.random() * 10 ** 12) action += upload_id + "&js_on=1&utype=prem&upload_type=url" inputs['tos'] = '1' - inputs['url_mass'] = self.pyfile.url + inputs['url_mass'] = pyfile.url inputs['up1oad_type'] = 'url' self.logDebug(action, inputs) @@ -167,10 +146,7 @@ class XFSHoster(SimpleHoster): action, inputs = self.parseHtmlForm('F1') if not inputs: - if self.errmsg: - self.retry(reason=self.errmsg) - else: - self.error(_("TEXTAREA F1 not found")) + self.retry(reason=self.info['error'] if 'error' in self.info else _("TEXTAREA F1 not found")) self.logDebug(inputs) @@ -189,69 +165,14 @@ class XFSHoster(SimpleHoster): self.fail(stmsg) #get easybytez.com link for uploaded file - m = re.search(self.LEECH_LINK_PATTERN, self.html) + m = re.search(self.LINK_LEECH_PATTERN, self.html) if m is None: - self.error(_("LEECH_LINK_PATTERN not found")) + self.error(_("LINK_LEECH_PATTERN not found")) header = self.load(m.group(1), just_header=True, decode=True) if 'location' in header: #: Direct download link self.link = header['location'] - else: - self.fail(_("Download link not found")) - - - def checkErrors(self): - m = re.search(self.ERROR_PATTERN, self.html) - if m is None: - self.errmsg = None - else: - self.errmsg = m.group(1).strip() - - self.logWarning(re.sub(r"<.*?>", " ", self.errmsg)) - - if 'wait' in self.errmsg: - wait_time = sum([int(v) * {"hr": 3600, "hour": 3600, "min": 60, "sec": 1}[u.lower()] for v, u in - re.findall(r'(\d+)\s*(hr|hour|min|sec)', self.errmsg, re.I)]) - self.wait(wait_time, True) - - elif 'country' in self.errmsg: - self.fail(_("Downloads are disabled for your country")) - - elif 'captcha' in self.errmsg: - self.invalidCaptcha() - - elif 'premium' in self.errmsg and 'require' in self.errmsg: - self.fail(_("File can be downloaded by premium users only")) - - elif 'limit' in self.errmsg: - if 'days' in self.errmsg: - delay = secondsToMidnight(gmt=2) - retries = 3 - else: - delay = 1 * 60 * 60 - retries = 24 - - self.wantReconnect = True - self.retry(retries, delay, _("Download limit exceeded")) - - elif 'countdown' in self.errmsg or 'Expired' in self.errmsg: - self.retry(reason=_("Link expired")) - - elif 'maintenance' in self.errmsg or 'maintainance' in self.errmsg: - self.tempOffline() - - elif 'up to' in self.errmsg: - self.fail(_("File too large for free download")) - - else: - self.wantReconnect = True - self.retry(wait_time=60, reason=self.errmsg) - - if self.errmsg: - self.info['error'] = self.errmsg - else: - self.info.pop('error', None) def getPostParameters(self): @@ -263,17 +184,15 @@ class XFSHoster(SimpleHoster): if not inputs: action, inputs = self.parseHtmlForm('F1') if not inputs: - if self.errmsg: - self.retry(reason=self.errmsg) - else: - self.error(_("TEXTAREA F1 not found")) + self.retry(reason=self.info['error'] if 'error' in self.info else _("TEXTAREA F1 not found")) self.logDebug(inputs) if 'op' in inputs: if "password" in inputs: - if self.passwords: - inputs['password'] = self.passwords.pop(0) + password = self.getPassword() + if password: + inputs['password'] = password else: self.fail(_("Missing password")) @@ -283,8 +202,7 @@ class XFSHoster(SimpleHoster): wait_time = int(m.group(1)) self.setWait(wait_time, False) - self.captcha = self.handleCaptcha(inputs) - + self.handleCaptcha(inputs) self.wait() else: inputs['referer'] = self.pyfile.url @@ -304,39 +222,43 @@ class XFSHoster(SimpleHoster): if m: captcha_url = m.group(1) inputs['code'] = self.decryptCaptcha(captcha_url) - return 1 + return m = re.search(self.CAPTCHA_BLOCK_PATTERN, self.html, re.S) if m: captcha_div = m.group(1) numerals = re.findall(r'<span.*?padding-left\s*:\s*(\d+).*?>(\d)</span>', html_unescape(captcha_div)) + self.logDebug(captcha_div) - inputs['code'] = "".join([a[1] for a in sorted(numerals, key=lambda num: int(num[0]))]) + + inputs['code'] = "".join(a[1] for a in sorted(numerals, key=lambda num: int(num[0]))) + self.logDebug("Captcha code: %s" % inputs['code'], numerals) - return 2 + return recaptcha = ReCaptcha(self) try: captcha_key = re.search(self.RECAPTCHA_PATTERN, self.html).group(1) - except: + + except Exception: captcha_key = recaptcha.detect_key() + else: self.logDebug("ReCaptcha key: %s" % captcha_key) if captcha_key: - inputs['recaptcha_challenge_field'], inputs['recaptcha_response_field'] = recaptcha.challenge(captcha_key) - return 3 + inputs['recaptcha_response_field'], inputs['recaptcha_challenge_field'] = recaptcha.challenge(captcha_key) + return solvemedia = SolveMedia(self) try: captcha_key = re.search(self.SOLVEMEDIA_PATTERN, self.html).group(1) - except: + + except Exception: captcha_key = solvemedia.detect_key() + else: self.logDebug("SolveMedia key: %s" % captcha_key) if captcha_key: - inputs['adcopy_challenge'], inputs['adcopy_response'] = solvemedia.challenge(captcha_key) - return 4 - - return 0 + inputs['adcopy_response'], inputs['adcopy_challenge'] = solvemedia.challenge(captcha_key) |