diff options
author | zoidberg10 <zoidberg@mujmail.cz> | 2011-11-29 21:45:27 +0100 |
---|---|---|
committer | zoidberg10 <zoidberg@mujmail.cz> | 2011-11-29 21:45:27 +0100 |
commit | 8d3d5625aa14614f5799621137a27f07d08e3dca (patch) | |
tree | 46ba08101a8f225805ca2f77e8db0869edb58b45 /module/plugins | |
parent | Merged in nick_de/pyload (pull request #6) (diff) | |
download | pyload-8d3d5625aa14614f5799621137a27f07d08e3dca.tar.xz |
update hellspy.cz, add bayfiles.com
Diffstat (limited to 'module/plugins')
-rw-r--r-- | module/plugins/crypter/EmbeduploadCom.py | 49 | ||||
-rw-r--r-- | module/plugins/crypter/LinkdecrypterCom.py | 4 | ||||
-rw-r--r-- | module/plugins/crypter/MultiuploadCom.py | 58 | ||||
-rw-r--r-- | module/plugins/hoster/BayfilesCom.py | 77 | ||||
-rw-r--r-- | module/plugins/hoster/CzshareCom.py | 4 | ||||
-rw-r--r-- | module/plugins/hoster/EnteruploadCom.py | 81 | ||||
-rw-r--r-- | module/plugins/hoster/HellspyCz.py | 74 | ||||
-rw-r--r-- | module/plugins/internal/SimpleHoster.py | 42 |
8 files changed, 317 insertions, 72 deletions
diff --git a/module/plugins/crypter/EmbeduploadCom.py b/module/plugins/crypter/EmbeduploadCom.py new file mode 100644 index 000000000..e84a06cc1 --- /dev/null +++ b/module/plugins/crypter/EmbeduploadCom.py @@ -0,0 +1,49 @@ +# -*- coding: utf-8 -*- + +import re +from module.plugins.Crypter import Crypter + +class EmbeduploadCom(Crypter): + __name__ = "EmbeduploadCom" + __type__ = "crypter" + __pattern__ = r"http://(www\.)?embedupload.com/\?d=.*" + __version__ = "0.01" + __description__ = """EmbedUpload.com crypter""" + __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "embedupload"), + ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")] + __author_name__ = ("zoidberg") + __author_mail__ = ("zoidberg@mujmail.cz") + + LINK_PATTERN = r'<div id="([^"]+)"[^>]*>\s*<a href="([^"]+)" target="_blank" (?:class="DownloadNow"|style="color:red")>' + + def decrypt(self, pyfile): + self.html = self.load(self.pyfile.url, decode=True) + tmp_links = new_links = [] + + found = re.findall(self.LINK_PATTERN, self.html) + if found: + prefered_set = set(self.getConfig("preferedHoster").split('|')) + prefered_set = map(lambda s: s.lower().split('.')[0], prefered_set) + print "PF", prefered_set + tmp_links.extend([x[1] for x in found if x[0] in prefered_set]) + self.getLocation(tmp_links, new_links) + + if not new_links: + ignored_set = set(self.getConfig("ignoredHoster").split('|')) + ignored_set = map(lambda s: s.lower().split('.')[0], ignored_set) + print "IG", ignored_set + tmp_links.extend([x[1] for x in found if x[0] in ignored_set]) + self.getLocation(tmp_links, new_links) + + if new_links: + self.core.files.addLinks(new_links, self.pyfile.package().id) + else: + self.fail('Could not extract any links') + + def getLocation(self, tmp_links, new_links): + for link in tmp_links: + header = self.load(link, just_header = True) + if "location" in header: + new_links.append(header['location']) + +
\ No newline at end of file diff --git a/module/plugins/crypter/LinkdecrypterCom.py b/module/plugins/crypter/LinkdecrypterCom.py index e2fc55150..c087c3518 100644 --- a/module/plugins/crypter/LinkdecrypterCom.py +++ b/module/plugins/crypter/LinkdecrypterCom.py @@ -22,8 +22,8 @@ from module.plugins.Crypter import Crypter class LinkdecrypterCom(Crypter): __name__ = "LinkdecrypterCom" __type__ = "crypter" - __pattern__ = r"http://(\w*\.)?(10001mb\.com|123link\.it|1cl\.in|1kh\.de|1zh\.us|2joy\.de|2so\.be|3\.ly|5\.gp|6nc\.net|7li\.in|9\.bb|adf\.ly|adflav\.com|adfoc\.us|allanalpass\.com|alturl\.com|amy\.gs|any\.gs|apurl\.ru|aurl\.es|b23\.ru|baberepublic\.com|bat5\.com|bax\.li|beam\.to|bit\.ly|blu\.cc|c\.ly|capourl\.com|cc\.st|cd\.vg|cloneurl\.com|convertircodigo\.com|crypt-it\.com|crypt\.to|cryptlink\.ws|deb\.gs|digzip\.com|djurl\.com|dl-protect\.com|doiop\.com|ehe\.me|embedupload\.com|encript\.in|encurtador\.com|enlacs\.com|evg\.in|extreme-protect\.com|fa\.by|faja\.me|fapoff\.com|fdnlinks\.com|fea\.me|fff\.to|filedeck\.net|filemirrorupload\.com|fileupster\.com|flameupload\.com|freetexthost\.com|fwd4\.me|fyad\.org|goandgrab\.info|goblig\.com|goo\.gl|h-url\.in|hasurl\.co\.cc|hide-url\.net|hidemyass\.com|hides\.at|hideurl\.biz|ho\.io|hornywood\.tv|href\.hu|id2\.tryjav\.com|ilix\.in|ily\.me|ino\.me|interupload\.com|is\.gd|ivpaste\.com|j\.mp|je\.pl|jheberg\.com|just\.as|kickupload\.com|klnk\.de|knoffl\.com|kodo\.ameoto\.com|ks\.gs|latwy\.pl|link-go\.info|link-protector\.com|link-safe\.net|link4jo\.com|linkanonimo\.com|linkbabes\.com|linkbank\.eu|linkbee\.com|linkblur\.com|linkbucks\.com|linkcrypt\.com|linkcrypt\.ws|linkencrypter\.com|linkhide\.com\.ar|linkhide\.in|linkoculto\.net|linkok\.org|linkprivado\.com|linkprivate\.net|linkprotect\.in|links-protect\.com|links-protect\.info|links\.tc|linksafe\.me|linksaver\.info|linkse\.info|linkseguro\.com\.ar|linkseguro\.org|linksole\.com|linksprotegidos\.info|linkto\.net|linkweb\.dk|linkx\.in|linkzip\.net|listedfiles\.com|littleurl\.net|lixk\.me|ljv2\.com|ll11\.org|lnk\.cm|lnk\.co|longr\.us|lovelink\.in|mcaf\.ee|megaline\.co|megaupper\.com|mhz\.me|migre\.me|miniurls\.co|minu\.me|mir\.cr|mirrorcreator\.com|mo\.by|multi-uploadeur\.com|multiupload\.com|murl\.kz|musicalmente\.info|mypaqe\.com|mypl\.us|myrapidlinks\.com|myref\.de|myurl\.in|nbanews\.us|okconsolas\.com|oneddl\.canhaz\.it|ow\.ly|p4p\.com\.es|p6l\.org|paste\.frubar\.net|paste\.hotfile-bb\.com|paste\.ubuntu\.com|paste2\.org|paste21\.info|pastebin\.com|paylesssofts\.net|poontown\.net|pqueno\.com|priva\.us|protec-link\.com|protect-ddl\.com|protect-my-links\.com|protected\.socadvnet\.com|protectlinks\.com|protectlinks\.net|protectlk\.com|protege-mes-liens\.com|ptl\.li|qooy\.com|qqc\.co|qvvo\.com|rapidfolder\.com|rapidsafe\.de|rapidsafe\.org|rapidshare\.mu|realfiles\.net|redir\.ec|ref\.so|relinka\.net|rexwo\.com|rqq\.co|rs-layer\.com|rsmonkey\.com|s2l\.biz|saf\.li|safe\.mn|safelinking\.net|saferlinks\.com|sealed\.in|seclnk\.in|seriousfiles\.com|sharebee\.com|short-link\.fr|shortlink\.ca|shorturlscript\.net|simurl\.com|sinl\.es|skroc\.pl|slexy\.org|slnky\.net|smsdl\.com|sn\.im|sonofertas\.es|spedr\.com|spreadlink\.us|star-cyber\.com|subedlc\.com|subirfacil\.com|syl\.me|szort\.pl|takemyfile\.com|takemylinks\.com|textsnip\.com|thecow\.me|thesefiles\.com|tilien\.net|tiny\.cc|tiny\.lt|tinylinks\.co|tinypaste\.com|tinyurl\.com|tinyurlscript\.info|tmf\.myegy\.com|togoto\.us|tot\.to|tra\.kz|u\.to|uberpicz\.com|ulinks\.net|ultra-protect\.com|ultrafiles\.net|undeadlink\.com|uploadjockey\.com|uploadmirrors\.com|uploadonall\.com|upmirror\.com|upsafe\.org|ur\.ly|url-go\.com|url-site\.com|url4t\.com|urla\.in|urlbeat\.net|urlcash\.net|urlcrypt\.com|urlcut\.com|urlcut\.in|urldefender\.com|urln\.tk|urlpulse\.net|urlspy\.co\.cc|urwij|uselink\.info|uucc\.cc|uze\.in|wcrypt\.in|webtooljungle\.com|weepax\.com|whackyvidz\.com|x-ls\.ru|x\.co|xa\.ly|xc\.io|xr\.com|xtreemhost\.com|xurl\.cn|xxs\.ru|ysu\.me|yyv\.co|zff\.co|zio\.in|zpag\.es)/.*" - __version__ = "0.2" + __pattern__ = r"http://(\w*\.)?(10001mb\.com|123link\.it|1cl\.in|1kh\.de|1zh\.us|2joy\.de|2so\.be|3\.ly|5\.gp|6nc\.net|7li\.in|9\.bb|adf\.ly|adflav\.com|adfoc\.us|allanalpass\.com|alturl\.com|amy\.gs|any\.gs|apurl\.ru|aurl\.es|b23\.ru|baberepublic\.com|bat5\.com|bax\.li|beam\.to|bit\.ly|blu\.cc|c\.ly|capourl\.com|cc\.st|cd\.vg|cloneurl\.com|convertircodigo\.com|crypt-it\.com|crypt\.to|cryptlink\.ws|deb\.gs|digzip\.com|djurl\.com|dl-protect\.com|doiop\.com|ehe\.me|encript\.in|encurtador\.com|enlacs\.com|evg\.in|extreme-protect\.com|fa\.by|faja\.me|fapoff\.com|fdnlinks\.com|fea\.me|fff\.to|filedeck\.net|filemirrorupload\.com|fileupster\.com|flameupload\.com|freetexthost\.com|fwd4\.me|fyad\.org|goandgrab\.info|goblig\.com|goo\.gl|h-url\.in|hasurl\.co\.cc|hide-url\.net|hidemyass\.com|hides\.at|hideurl\.biz|ho\.io|hornywood\.tv|href\.hu|id2\.tryjav\.com|ilix\.in|ily\.me|ino\.me|interupload\.com|is\.gd|ivpaste\.com|j\.mp|je\.pl|jheberg\.com|just\.as|kickupload\.com|klnk\.de|knoffl\.com|kodo\.ameoto\.com|ks\.gs|latwy\.pl|link-go\.info|link-protector\.com|link-safe\.net|link4jo\.com|linkanonimo\.com|linkbabes\.com|linkbank\.eu|linkbee\.com|linkblur\.com|linkbucks\.com|linkcrypt\.com|linkcrypt\.ws|linkencrypter\.com|linkhide\.com\.ar|linkhide\.in|linkoculto\.net|linkok\.org|linkprivado\.com|linkprivate\.net|linkprotect\.in|links-protect\.com|links-protect\.info|links\.tc|linksafe\.me|linksaver\.info|linkse\.info|linkseguro\.com\.ar|linkseguro\.org|linksole\.com|linksprotegidos\.info|linkto\.net|linkweb\.dk|linkx\.in|linkzip\.net|listedfiles\.com|littleurl\.net|lixk\.me|ljv2\.com|ll11\.org|lnk\.cm|lnk\.co|longr\.us|lovelink\.in|mcaf\.ee|megaline\.co|megaupper\.com|mhz\.me|migre\.me|miniurls\.co|minu\.me|mir\.cr|mirrorcreator\.com|mo\.by|multi-uploadeur\.com|murl\.kz|musicalmente\.info|mypaqe\.com|mypl\.us|myrapidlinks\.com|myref\.de|myurl\.in|nbanews\.us|okconsolas\.com|oneddl\.canhaz\.it|ow\.ly|p4p\.com\.es|p6l\.org|paste\.frubar\.net|paste\.hotfile-bb\.com|paste\.ubuntu\.com|paste2\.org|paste21\.info|pastebin\.com|paylesssofts\.net|poontown\.net|pqueno\.com|priva\.us|protec-link\.com|protect-ddl\.com|protect-my-links\.com|protected\.socadvnet\.com|protectlinks\.com|protectlinks\.net|protectlk\.com|protege-mes-liens\.com|ptl\.li|qooy\.com|qqc\.co|qvvo\.com|rapidfolder\.com|rapidsafe\.de|rapidsafe\.org|rapidshare\.mu|realfiles\.net|redir\.ec|ref\.so|relinka\.net|rexwo\.com|rqq\.co|rs-layer\.com|rsmonkey\.com|s2l\.biz|saf\.li|safe\.mn|safelinking\.net|saferlinks\.com|sealed\.in|seclnk\.in|seriousfiles\.com|sharebee\.com|short-link\.fr|shortlink\.ca|shorturlscript\.net|simurl\.com|sinl\.es|skroc\.pl|slexy\.org|slnky\.net|smsdl\.com|sn\.im|sonofertas\.es|spedr\.com|spreadlink\.us|star-cyber\.com|subedlc\.com|subirfacil\.com|syl\.me|szort\.pl|takemyfile\.com|takemylinks\.com|textsnip\.com|thecow\.me|thesefiles\.com|tilien\.net|tiny\.cc|tiny\.lt|tinylinks\.co|tinypaste\.com|tinyurl\.com|tinyurlscript\.info|tmf\.myegy\.com|togoto\.us|tot\.to|tra\.kz|u\.to|uberpicz\.com|ulinks\.net|ultra-protect\.com|ultrafiles\.net|undeadlink\.com|uploadjockey\.com|uploadmirrors\.com|uploadonall\.com|upmirror\.com|upsafe\.org|ur\.ly|url-go\.com|url-site\.com|url4t\.com|urla\.in|urlbeat\.net|urlcash\.net|urlcrypt\.com|urlcut\.com|urlcut\.in|urldefender\.com|urln\.tk|urlpulse\.net|urlspy\.co\.cc|urwij|uselink\.info|uucc\.cc|uze\.in|wcrypt\.in|webtooljungle\.com|weepax\.com|whackyvidz\.com|x-ls\.ru|x\.co|xa\.ly|xc\.io|xr\.com|xtreemhost\.com|xurl\.cn|xxs\.ru|ysu\.me|yyv\.co|zff\.co|zio\.in|zpag\.es)/.*" + __version__ = "0.21" __description__ = """linkdecrypter.com""" __author_name__ = ("zoidberg") diff --git a/module/plugins/crypter/MultiuploadCom.py b/module/plugins/crypter/MultiuploadCom.py new file mode 100644 index 000000000..bf5540982 --- /dev/null +++ b/module/plugins/crypter/MultiuploadCom.py @@ -0,0 +1,58 @@ +# -*- coding: utf-8 -*- + +import re +from module.plugins.Crypter import Crypter +from module.common.json_layer import json_loads +from time import time + +class MultiuploadCom(Crypter): + __name__ = "MultiuploadCom" + __type__ = "crypter" + __pattern__ = r"http://(?:www\.)?multiupload.com/(\w+)" + __version__ = "0.01" + __description__ = """MultiUpload.com crypter""" + __config__ = [("preferedHoster", "str", "Prefered hoster list (bar-separated) ", "multiupload"), + ("ignoredHoster", "str", "Ignored hoster list (bar-separated) ", "")] + __author_name__ = ("zoidberg") + __author_mail__ = ("zoidberg@mujmail.cz") + + ML_LINK_PATTERN = r'<div id="downloadbutton_" style=""><a href="([^"]+)"' + + def decrypt(self, pyfile): + self.html = self.load(pyfile.url) + found = re.search(self.ML_LINK_PATTERN, self.html) + ml_url = found.group(1) if found else None + + json_list = json_loads(self.load("http://multiupload.com/progress/", get = { + "d": re.search(self.__pattern__, pyfile.url).group(1), + "r": str(int(time()*1000)) + })) + new_links = [] + + prefered_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("preferedHoster").split('|'))) + + if ml_url and 'multiupload' in prefered_set: new_links.append(ml_url) + + for link in json_list: + if link['service'].lower() in prefered_set and int(link['status']) and not int(link['deleted']): + url = self.getLocation(link['url']) + if url: new_links.append(url) + + if not new_links: + ignored_set = map(lambda s: s.lower().split('.')[0], set(self.getConfig("ignoredHoster").split('|'))) + + if 'multiupload' not in ignored_set: new_links.append(ml_url) + + for link in json_list: + if link['service'].lower() not in ignored_set and int(link['status']) and not int(link['deleted']): + url = self.getLocation(link['url']) + if url: new_links.append(url) + + if new_links: + self.core.files.addLinks(new_links, self.pyfile.package().id) + else: + self.fail('Could not extract any links') + + def getLocation(self, url): + header = self.load(url, just_header = True) + return header['location'] if "location" in header else None
\ No newline at end of file diff --git a/module/plugins/hoster/BayfilesCom.py b/module/plugins/hoster/BayfilesCom.py new file mode 100644 index 000000000..c771f28c6 --- /dev/null +++ b/module/plugins/hoster/BayfilesCom.py @@ -0,0 +1,77 @@ +# -*- coding: utf-8 -*- +""" + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + @author: zoidberg +""" + +import re +from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo +from module.plugins.ReCaptcha import ReCaptcha +from module.common.json_layer import json_loads +from time import time + +class BayfilesCom(SimpleHoster): + __name__ = "BayfilesCom" + __type__ = "hoster" + __pattern__ = r"http://(?:www\.)?bayfiles\.com/file/\w+/\w+/.*" + __version__ = "0.01" + __description__ = """Bayfiles.com plugin - free only""" + __author_name__ = ("zoidberg") + __author_mail__ = ("zoidberg@mujmail.cz") + + FILE_INFO_PATTERN = r'<p title="(?P<N>[^"]+)">[^<]*<strong>(?P<S>[0-9., ]+)(?P<U>[kKMG])i?B</strong></p>' + FILE_OFFLINE_PATTERN = r'<p>The requested file could not be found.</p>' + + WAIT_PATTERN = r'>Your IP [0-9.]* has recently downloaded a file\. Upgrade to premium or wait (\d+) minutes\.<' + VARS_PATTERN = r'var vfid = (\d+);\s*var delay = (\d+);' + LINK_PATTERN = r"javascript:window.location.href = '([^']+)';" + + def handleFree(self): + found = re.search(self.WAIT_PATTERN, self.html) + if found: + self.setWait(int(found.group(1)) * 60) + self.wait() + self.retry() + + # Get download token + found = re.search(self.VARS_PATTERN, self.html) + if not found: self.parseError('VARS') + vfid, delay = found.groups() + + response = json_loads(self.load('http://bayfiles.com/ajax_download', get = { + "_": time() * 1000, + "action": "startTimer", + "vfid": vfid}, decode = True)) + + if not "token" in response or not response['token']: + self.fail('No token') + + self.setWait(int(delay)) + self.wait() + + self.html = self.load('http://bayfiles.com/ajax_download', get = { + "token": response['token'], + "action": "getLink", + "vfid": vfid}) + + # Get final link and download + found = re.search(self.LINK_PATTERN, self.html) + if not found: self.parseError("Free link") + url = found.group(1) + self.logDebug("URL: " + url) + + self.download(url) + +getInfo = create_getInfo(BayfilesCom)
\ No newline at end of file diff --git a/module/plugins/hoster/CzshareCom.py b/module/plugins/hoster/CzshareCom.py index 1a705e302..158fb0d1d 100644 --- a/module/plugins/hoster/CzshareCom.py +++ b/module/plugins/hoster/CzshareCom.py @@ -45,11 +45,11 @@ class CzshareCom(SimpleHoster): __name__ = "CzshareCom" __type__ = "hoster" __pattern__ = r"http://(\w*\.)*czshare\.(com|cz)/(\d+/|download.php\?).*" - __version__ = "0.84" + __version__ = "0.85" __description__ = """CZshare.com""" __author_name__ = ("zoidberg") - SIZE_REPLACEMENTS = {',': '.', ' ': ''} + SIZE_REPLACEMENTS = [(',', '.'), (' ', '')] FREE_URL_PATTERN = r'<a href="([^"]+)" class="page-download">[^>]*alt="([^"]+)" /></a>' FREE_FORM_PATTERN = r'<form action="download.php" method="post">\s*<img src="captcha.php" id="captcha" />(.*?)</form>' PREMIUM_FORM_PATTERN = r'<form action="/profi_down.php" method="post">(.*?)</form>' diff --git a/module/plugins/hoster/EnteruploadCom.py b/module/plugins/hoster/EnteruploadCom.py new file mode 100644 index 000000000..5e899ae96 --- /dev/null +++ b/module/plugins/hoster/EnteruploadCom.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*-
+"""
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License,
+ or (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ See the GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, see <http://www.gnu.org/licenses/>.
+
+ @author: zoidberg
+"""
+
+import re
+from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
+
+class EnteruploadCom(SimpleHoster):
+ __name__ = "EnteruploadCom"
+ __type__ = "hoster"
+ __pattern__ = r"http://(?:www\.)?enterupload.com/\w+.*"
+ __version__ = "0.01"
+ __description__ = """EnterUpload.com plugin - free only"""
+ __author_name__ = ("zoidberg")
+ __author_mail__ = ("zoidberg@mujmail.cz")
+
+ FILE_INFO_PATTERN = r'<h3>(?P<N>[^<]+)</h3>\s*<span>File size:\s*(?P<S>[0-9.]+)\s*(?P<U>[kKMG])i?B</span>'
+ FILE_OFFLINE_PATTERN = r'<(b|h2)>File Not Found</(b|h2)>|<font class="err">No such file with this filename</font>'
+ URL_REPLACEMENTS = [(r"(http://(?:www\.)?enterupload.com/\w+).*", r"\1")]
+
+ FORM1_PATTERN = r'<form method="POST" action=\'\' style="display: none;">(.*?)</form>'
+ FORM2_PATTERN = r'<form name="F1" method="POST"[^>]*>(.*?)</form>'
+ FORM3_PATTERN = r'<form action="([^"]+)" method="get">'
+ FORM_INPUT_PATTERN = r'<input[^>]* name="([^"]+)" value="([^"]*)"[^>]*>'
+ WAIT_PATTERN = r'<span id="countdown_str">Wait <[^>]*>(\d+)</span> seconds</span>'
+
+ def handleFree(self):
+ # Page 1
+ try:
+ form = re.search(self.FORM1_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ except Exception, e:
+ self.logError(e)
+ self.parseError("Form 1")
+
+ inputs['method_free'] = 'Free Download'
+ self.logDebug(inputs)
+ self.html = self.load(self.pyfile.url, post = inputs, decode = True, cookies = True, ref = True)
+
+ # Page 2
+ try:
+ form = re.search(self.FORM2_PATTERN, self.html, re.DOTALL).group(1)
+ inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
+ except Exception, e:
+ self.logError(e)
+ self.parseError("Form 2")
+
+ inputs['method_free'] = self.pyfile.url
+ self.logDebug(inputs)
+
+ found = re.search(self.WAIT_PATTERN, self.html)
+ if found:
+ self.setWait(int(found.group(1)) + 1)
+ self.wait()
+
+ self.html = self.load(self.pyfile.url, post = inputs, decode = True, cookies = True, ref = True)
+
+ # Page 3
+ found = re.search(self.FORM3_PATTERN, self.html)
+ if not found: self.parseError("Form 3")
+ url = found.group(1)
+
+ # Download
+ self.logDebug("Download URL: " + url)
+ self.download(url, cookies = True, ref = True)
+
+getInfo = create_getInfo(EnteruploadCom)
\ No newline at end of file diff --git a/module/plugins/hoster/HellspyCz.py b/module/plugins/hoster/HellspyCz.py index 6d3f84b55..9a8817c54 100644 --- a/module/plugins/hoster/HellspyCz.py +++ b/module/plugins/hoster/HellspyCz.py @@ -17,83 +17,53 @@ """ import re -from math import ceil -from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo -from module.network.RequestFactory import getURL - -def getInfo(urls): - result = [] - - for url in urls: - file_info = parseFileInfo(HellspyCz, url, getURL(url, decode=True)) - result.append(file_info) - - yield result +from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo class HellspyCz(SimpleHoster): __name__ = "HellspyCz" __type__ = "hoster" __pattern__ = r"http://(?:\w*\.)*hellspy\.(?:cz|com|sk|hu)(/\S+/\d+)/?.*" - __version__ = "0.22" + __version__ = "0.23" __description__ = """HellSpy.cz""" __author_name__ = ("zoidberg") __author_mail__ = ("zoidberg@mujmail.cz") FILE_INFO_PATTERN = '<span class="filesize right">(?P<S>[0-9.]+) <span>(?P<U>[kKMG])i?B</span></span>\s*<h1>(?P<N>[^<]+)</h1>' FILE_OFFLINE_PATTERN = r'<h2>(404 - Page|File) not found</h2>' + URL_REPLACEMENTS = [(r"http://(?:\w*\.)*hellspy\.(?:cz|com|sk|hu)(/\S+/\d+)/?.*", r"http://www.hellspy.com\1")] + CREDIT_LEFT_PATTERN = r'<strong>Credits: </strong>\s*(\d+)' - PREMIUM_URL_PATTERN = r'<a href="([^"]+)" class="ajax button button-blue button-download"' DOWNLOAD_AGAIN_PATTERN = r'<a id="button-download-start"[^>]*title="You can download the file without deducting your credit.">' + DOWNLOAD_URL_PATTERN = r"launchFullDownload\('([^']+)'" def setup(self): - self.resumeDownload = self.multiDL = True if self.account else False + self.resumeDownload = self.multiDL = True self.chunkLimit = 1 - def process(self, pyfile): - if not self.account: self.fail("Only premium users can download from HellSpy.cz") + def handleFree(self): + self.fail("Only premium users can download from HellSpy.cz") + def handlePremium(self): # set PHPSESSID cookie cj = self.account.getAccountCookies(self.user) cj.setCookie(".hellspy.com", "PHPSESSID", self.account.phpsessid) self.logDebug("PHPSESSID: " + cj.getCookie("PHPSESSID")) - info = self.account.getAccountInfo(self.user) + info = self.account.getAccountInfo(self.user, True) self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"]/1024)) - # load html - rel_url = re.search(self.__pattern__, pyfile.url).group(1) - self.html = self.load("http://www.hellspy.com/--%s-/%s" % (self.account.phpsessid, rel_url), decode = True) - - self.getFileInfo() + if self.pyfile.size / 1024 > info["trafficleft"]: + self.logWarning("Not enough credit left to download file") # get premium download URL and download - found = re.search(self.PREMIUM_URL_PATTERN, self.html) - if not found: self.parseError('Download URL') - download_url = "http://www.hellspy.cz" + found.group(1) - self.logDebug("Download URL: " + download_url) - self.download(download_url, disposition = True) - - info = self.account.getAccountInfo(self.user) + self.html = self.load(self.pyfile.url + "?download=1") + found = re.search(self.DOWNLOAD_URL_PATTERN, self.html) + if not found: self.parseError("Download URL") + url = found.group(1) + self.logDebug("Download URL: " + url) + self.download(url) + + info = self.account.getAccountInfo(self.user, True) self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"]/1024)) - - """ - # parse credits left info - found = re.search(self.CREDIT_LEFT_PATTERN, self.html) - if found is None: - self.logInfo("Not logged in... relogin and retry") - self.account.relogin(self.user) - self.retry(max_tries = 2, reason = "Not logged in") - credits_left = int(found.group(1)) - self.logInfo("User %s has %i credits left" % (self.user, credits_left)) - - # parse credit needed to proceed - found = re.search(self.DOWNLOAD_AGAIN_PATTERN, self.html) - if found: - self.logInfo("Free download (file downloaded before)") - else: - found = re.search(self.FILE_CREDITS_PATTERN, self.html) - if found is None: self.fail("Parse error (FILE CREDITS)") - file_credits = int(found.group(3)) - if file_credits > credits_left: self.fail("Not enough credits left to download file") - self.logInfo("Premium download for %i credits" % file_credits) - """ + +getInfo = create_getInfo(HellspyCz)
\ No newline at end of file diff --git a/module/plugins/internal/SimpleHoster.py b/module/plugins/internal/SimpleHoster.py index 05ef03d58..c101cbf6d 100644 --- a/module/plugins/internal/SimpleHoster.py +++ b/module/plugins/internal/SimpleHoster.py @@ -23,6 +23,12 @@ from module.plugins.Hoster import Hoster from module.utils import html_unescape from module.network.RequestFactory import getURL +def reSub(string, ruleslist): + for r in ruleslist: + rf, rt = r + string = sub(rf, rt, string) + return string + def parseFileInfo(self, url = '', html = ''): if not html and hasattr(self, "html"): html = self.html name, size, status, found = '', 0, 3, 0 @@ -47,25 +53,17 @@ def parseFileInfo(self, url = '', html = ''): if size: # File online, return name and size - for r in self.SIZE_REPLACEMENTS: - size = size.replace(r, self.SIZE_REPLACEMENTS[r]) - size = float(size) * 1024 ** self.SIZE_UNITS[units] + size = float(reSub(size, self.SIZE_REPLACEMENTS)) * 1024 ** self.SIZE_UNITS[units] status = 2 - if name: - for r in self.NAME_REPLACEMENTS: - rf, rt = r - name = sub(rf, rt, name) - else: - name = url + name = reSub(name, self.NAME_REPLACEMENTS) if name else url return name, size, status, url - def create_getInfo(plugin): def getInfo(urls): for url in urls: - file_info = parseFileInfo(plugin, url, getURL(url, decode=True)) + file_info = parseFileInfo(plugin, url, getURL(reSub(url, plugin.URL_REPLACEMENTS), decode=True)) yield file_info return getInfo @@ -78,24 +76,33 @@ class PluginParseError(Exception): class SimpleHoster(Hoster): __name__ = "SimpleHoster" - __version__ = "0.12" + __version__ = "0.13" __pattern__ = None __type__ = "hoster" __description__ = """Base hoster plugin""" __author_name__ = ("zoidberg") __author_mail__ = ("zoidberg@mujmail.cz") - + """ + These patterns should be defined by each hoster: + FILE_INFO_PATTERN = r'(?P<N>file_name) (?P<S>file_size) (?P<U>units)' + or FILE_NAME_INFO = r'(?P<N>file_name)' + and FILE_SIZE_INFO = r'(?P<S>file_size) (?P<U>units)' + FILE_OFFLINE_PATTERN = r'File (deleted|not found)' + TEMP_OFFLINE_PATTERN = r'Server maintainance' + """ #TODO: could be replaced when using utils.parseFileSize ? #some plugins need to override these SIZE_UNITS = {'k': 1, 'K': 1, 'M': 2, 'G': 3} - SIZE_REPLACEMENTS = {',': '', ' ': ''} + SIZE_REPLACEMENTS = [(',', ''), (' ', '')] NAME_REPLACEMENTS = [] + URL_REPLACEMENTS = [] def setup(self): self.resumeDownload = self.multiDL = True if self.account else False def process(self, pyfile): - self.html = self.load(pyfile.url, decode = True, cookies = True) + pyfile.url = reSub(pyfile.url, self.URL_REPLACEMENTS) + self.html = self.load(pyfile.url, decode = True) self.getFileInfo() if self.account: self.handlePremium() @@ -103,7 +110,10 @@ class SimpleHoster(Hoster): self.handleFree() def getFileInfo(self): - self.logDebug("URL: %s" % self.pyfile.url) + self.logDebug("URL: %s" % self.pyfile.url) + if hasattr(self, "TEMP_OFFLINE_PATTERN") and search(self.TEMP_OFFLINE_PATTERN, html): + self.tempOffline() + name, size, status, url = parseFileInfo(self) if status == 1: self.offline() |