diff options
author | Walter Purcaro <vuolter@gmail.com> | 2014-12-05 19:29:24 +0100 |
---|---|---|
committer | Walter Purcaro <vuolter@gmail.com> | 2014-12-05 19:29:24 +0100 |
commit | dc05dcc1dcd44cdad8a6bdd0a539c5a8124d4820 (patch) | |
tree | 61b3e88aed657cba2c0bc474232926edcf0b3f9c /module/plugins/hoster | |
parent | Fix with statement on old env like python 2.5 (2) (diff) | |
download | pyload-dc05dcc1dcd44cdad8a6bdd0a539c5a8124d4820.tar.xz |
[FilerNet] Cleanup
Diffstat (limited to 'module/plugins/hoster')
-rw-r--r-- | module/plugins/hoster/FilerNet.py | 64 |
1 files changed, 22 insertions, 42 deletions
diff --git a/module/plugins/hoster/FilerNet.py b/module/plugins/hoster/FilerNet.py index d9876c573..2b98788bb 100644 --- a/module/plugins/hoster/FilerNet.py +++ b/module/plugins/hoster/FilerNet.py @@ -4,7 +4,6 @@ # http://filer.net/get/ivgf5ztw53et3ogd # http://filer.net/get/hgo14gzcng3scbvv -import pycurl import re from urlparse import urljoin @@ -16,83 +15,64 @@ from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class FilerNet(SimpleHoster): __name__ = "FilerNet" __type__ = "hoster" - __version__ = "0.08" + __version__ = "0.09" __pattern__ = r'https?://(?:www\.)?filer\.net/get/\w+' __description__ = """Filer.net hoster plugin""" __license__ = "GPLv3" - __authors__ = [("stickell", "l.stickell@yahoo.it")] + __authors__ = [("stickell", "l.stickell@yahoo.it") + ("Walter Purcaro", "vuolter@gmail.com")] - INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>' + CONTENT_DISPOSITION = True + + INFO_PATTERN = r'<h1 class="page-header">Free Download (?P<N>\S+) <small>(?P<S>[\w.]+) (?P<U>[\w^_]+)</small></h1>' OFFLINE_PATTERN = r'Nicht gefunden' - LINK_PATTERN = r'href="([^"]+)">Get download</a>' + LINK_FREE_PATTERN = LINK_PREMIUM_PATTERN = r'href="([^"]+)">Get download</a>' - def handleFree(self): + def checkErrors(self): # Wait between downloads m = re.search(r'musst du <span id="time">(\d+)</span> Sekunden warten', self.html) if m: self.retry(wait_time=int(m.group(1)), reason=_("Wait between free downloads")) - self.html = self.load(self.pyfile.url, decode=True) + def handleFree(self): inputs = self.parseHtmlForm(input_names={'token': re.compile(r'.+')})[1] if 'token' not in inputs: self.error(_("Unable to detect token")) - token = inputs['token'] - self.logDebug("Token: " + token) - self.html = self.load(self.pyfile.url, post={'token': token}, decode=True) + self.html = self.load(self.pyfile.url, post={'token': inputs['token']}, decode=True) inputs = self.parseHtmlForm(input_names={'hash': re.compile(r'.+')})[1] if 'hash' not in inputs: self.error(_("Unable to detect hash")) - hash_data = inputs['hash'] - self.logDebug("Hash: " + hash_data) - downloadURL = r'' recaptcha = ReCaptcha(self) for _i in xrange(5): challenge, response = recaptcha.challenge() - post_data = {'recaptcha_challenge_field': challenge, - 'recaptcha_response_field': response, - 'hash': hash_data} - - # Workaround for 0.4.9 just_header issue. In 0.5 clean the code using just_header - self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 0) - self.load(self.pyfile.url, post=post_data) - self.req.http.c.setopt(pycurl.FOLLOWLOCATION, 1) - - if 'location' in self.req.http.header.lower(): - location = re.search(r'location: (\S+)', self.req.http.header, re.I).group(1) - downloadURL = urljoin('http://filer.net', location) + + header = self.load(self.pyfile.url, + post={'recaptcha_challenge_field': challenge, + 'recaptcha_response_field' : response, + 'hash' : inputs['hash']}) + + if 'location' in header and header['location']: self.correctCaptcha() - break + self.link = urljoin('http://filer.net', header['location']) + return else: self.invalidCaptcha() - if not downloadURL: - self.fail(_("No Download url retrieved/all captcha attempts failed")) - - self.download(downloadURL, disposition=True) - def handlePremium(self): - header = self.load(self.pyfile.url, just_header=True) - if 'location' in header: # Direct Download ON - dl = self.pyfile.url - else: # Direct Download OFF - html = self.load(self.pyfile.url) - m = re.search(self.LINK_PATTERN, html) - if m is None: - self.error(_("LINK_PATTERN not found")) - dl = 'http://filer.net' + m.group(1) - - self.download(dl, disposition=True) + super(FilerNet, self).handlePremium() + if self.link: + self.link = urljoin("http://filer.net/", self.link) getInfo = create_getInfo(FilerNet) |