diff options
author | Walter Purcaro <vuolter@gmail.com> | 2014-01-09 11:47:51 +0100 |
---|---|---|
committer | Stefano <l.stickell@yahoo.it> | 2014-04-21 16:52:58 +0200 |
commit | 039ba73e2ec4f4d2bad052176b63673926eb375b (patch) | |
tree | 7741ae7e6e0533e6b60a805f8ada1d981b37be84 | |
parent | WebshareCz: rewritten (diff) | |
download | pyload-039ba73e2ec4f4d2bad052176b63673926eb375b.tar.xz |
ShareonlineBiz: Https fix + code cosmetics
Merged #459
(cherry picked from commit d664484a466f1b9c53b1f2b5ff9a5ffa9a80440b)
-rw-r--r-- | pyload/plugins/hoster/ShareonlineBiz.py | 116 |
1 files changed, 49 insertions, 67 deletions
diff --git a/pyload/plugins/hoster/ShareonlineBiz.py b/pyload/plugins/hoster/ShareonlineBiz.py index 5b7a0e913..d51935d33 100644 --- a/pyload/plugins/hoster/ShareonlineBiz.py +++ b/pyload/plugins/hoster/ShareonlineBiz.py @@ -10,12 +10,14 @@ from time import time, sleep from module.plugins.Hoster import Hoster from module.network.RequestFactory import getURL from module.plugins.Plugin import chunks -from module.plugins.internal.CaptchaService import ReCaptcha as _ReCaptcha +from module.plugins.internal.CaptchaService import ReCaptcha def getInfo(urls): api_url_base = "http://api.share-online.biz/linkcheck.php" + urls = [url.replace("https://", "http://") for url in urls] + for chunk in chunks(urls, 90): api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/", "").rstrip("/") for x in chunk)} # api only supports old style links @@ -37,21 +39,14 @@ def getInfo(urls): yield result -#suppress ocr plugin -class ReCaptcha(_ReCaptcha): - def result(self, server, challenge): - return self.plugin.decryptCaptcha("%simage" % server, get={"c": challenge}, - cookies=True, forceUser=True, imgtype="jpg") - - class ShareonlineBiz(Hoster): __name__ = "ShareonlineBiz" __type__ = "hoster" - __pattern__ = r"https?://[\w\.]*?(share\-online\.biz|egoshare\.com)/(download.php\?id\=|dl/)[\w]+" - __version__ = "0.37" + __pattern__ = r"https?://(?:www\.)?(share-online\.biz|egoshare\.com)/(download.php\?id=|dl/)(?P<ID>\w+)" + __version__ = "0.38" __description__ = """Shareonline.biz Download Hoster""" - __author_name__ = ("spoob", "mkaay", "zoidberg") - __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz") + __author_name__ = ("spoob", "mkaay", "zoidberg", "Walter Purcaro") + __author_mail__ = ("spoob@pyload.org", "mkaay@mkaay.de", "zoidberg@mujmail.cz", "vuolter@gmail.com") ERROR_INFO_PATTERN = r'<p class="b">Information:</p>\s*<div>\s*<strong>(.*?)</strong>' @@ -59,7 +54,7 @@ class ShareonlineBiz(Hoster): # range request not working? # api supports resume, only one chunk # website isn't supporting resuming in first place - self.file_id = re.search(r"(id\=|/dl/)([a-zA-Z0-9]+)", self.pyfile.url).group(2) + self.file_id = re.match(self.__pattern__, self.pyfile.url).group("ID") self.pyfile.url = "http://www.share-online.biz/dl/" + self.file_id self.resumeDownload = self.premium @@ -70,7 +65,7 @@ class ShareonlineBiz(Hoster): def process(self, pyfile): if self.premium: - self.handleAPIPremium() + self.handlePremium() #web-download fallback removed - didn't work anyway else: self.handleFree() @@ -85,10 +80,9 @@ class ShareonlineBiz(Hoster): if self.api_data: self.check_data = {"size": int(self.api_data['size']), "md5": self.api_data['md5']} - def downloadAPIData(self): + def loadAPIData(self): api_url_base = "http://api.share-online.biz/linkcheck.php?md5=1" - api_param_file = {"links": self.pyfile.url.replace( - "http://www.share-online.biz/dl/", "")} # api only supports old style links + api_param_file = {"links": self.file_id} # api only supports old style links src = self.load(api_url_base, cookies=False, post=api_param_file, decode=True) fields = src.split(";") @@ -96,12 +90,13 @@ class ShareonlineBiz(Hoster): "status": fields[1]} if not self.api_data["status"] == "OK": self.offline() - self.api_data["filename"] = fields[2] - self.api_data["size"] = fields[3] # in bytes - self.api_data["md5"] = fields[4].strip().lower().replace("\n\n", "") # md5 + else: + self.api_data["filename"] = fields[2] + self.api_data["size"] = fields[3] # in bytes + self.api_data["md5"] = fields[4].strip().lower().replace("\n\n", "") # md5 def handleFree(self): - self.downloadAPIData() + self.loadAPIData() self.pyfile.name = self.api_data["filename"] self.pyfile.size = int(self.api_data["size"]) @@ -115,7 +110,7 @@ class ShareonlineBiz(Hoster): found = re.search(r'var wait=(\d+);', self.html) recaptcha = ReCaptcha(self) - for i in range(5): + for _ in xrange(5): challenge, response = recaptcha.challenge("6LdatrsSAAAAAHZrB70txiV5p-8Iv8BtVxlTtjKX") self.setWait(int(found.group(1)) if found else 30) response = self.load("%s/free/captcha/%d" % (self.pyfile.url, int(time() * 1000)), post={ @@ -125,7 +120,6 @@ class ShareonlineBiz(Hoster): if not response == '0': break - else: self.fail("No valid captcha solution received") @@ -146,29 +140,7 @@ class ShareonlineBiz(Hoster): elif check == "fail": self.retry(5, 300, "Download failed") - def checkErrors(self): - found = re.search(r"/failure/(.*?)/1", self.req.lastEffectiveURL) - if found: - err = found.group(1) - found = re.search(self.ERROR_INFO_PATTERN, self.html) - msg = found.group(1) if found else "" - self.logError(err, msg or "Unknown error occurred") - - if err in ('freelimit', 'size', 'proxy'): - self.fail(msg or "Premium account needed") - if err in 'invalid': - self.fail(msg or "File not available") - elif err in 'server': - self.setWait(600, False) - elif err in 'expired': - self.setWait(30, False) - else: - self.setWait(300, True) - - self.wait() - self.retry(max_tries=25, reason=msg) - - def handleAPIPremium(self): # should be working better + def handlePremium(self): # should be working better loading (account) api internally self.account.getAccountInfo(self.user, True) src = self.load("http://api.share-online.biz/account.php", {"username": self.user, "password": self.account.accounts[self.user]["password"], @@ -182,28 +154,38 @@ class ShareonlineBiz(Hoster): self.logDebug(dlinfo) if not dlinfo["status"] == "online": self.offline() + else: + self.pyfile.name = dlinfo["name"] + self.pyfile.size = int(dlinfo["size"]) - self.pyfile.name = dlinfo["name"] - self.pyfile.size = int(dlinfo["size"]) + dlLink = dlinfo["url"] + if dlLink == "server_under_maintenance": + self.tempOffline() + else: + self.multiDL = True + self.download(dlLink) - dlLink = dlinfo["url"] - if dlLink == "server_under_maintenance": - self.tempoffline() + def checkErrors(self): + found = re.search(r"/failure/(.*?)/1", self.req.lastEffectiveURL) + if not found: + return + + err = found.group(1) + found = re.search(self.ERROR_INFO_PATTERN, self.html) + msg = found.group(1) if found else "" + self.logError(err, msg or "Unknown error occurred") + + if err in ('invalid'): + self.fail(msg or "File not available") + elif err in ('freelimit', 'size', 'proxy'): + self.fail(msg or "Premium account needed") else: - self.multiDL = True - self.download(dlLink) - - def checksum(self, local_file): - if self.api_data and "md5" in self.api_data and self.api_data["md5"]: - h = hashlib.md5() - f = open(local_file, "rb") - h.update(f.read()) - f.close() - hexd = h.hexdigest() - if hexd == self.api_data["md5"]: - return True, 0 + if err in 'server': + self.setWait(600, False) + elif err in 'expired': + self.setWait(30, False) else: - return False, 1 - else: - self.logWarning("MD5 checksum missing") - return True, 5 + self.setWait(300, True) + + self.wait() + self.retry(max_tries=25, reason=msg) |