diff options
Diffstat (limited to 'module/plugins/hoster')
-rw-r--r-- | module/plugins/hoster/NetloadIn.py | 20 | ||||
-rw-r--r-- | module/plugins/hoster/ShareCx.py | 155 | ||||
-rw-r--r-- | module/plugins/hoster/ShareonlineBiz.py | 31 | ||||
-rw-r--r-- | module/plugins/hoster/YoutubeCom.py | 7 |
4 files changed, 188 insertions, 25 deletions
diff --git a/module/plugins/hoster/NetloadIn.py b/module/plugins/hoster/NetloadIn.py index 9e117fa14..6f0cb9461 100644 --- a/module/plugins/hoster/NetloadIn.py +++ b/module/plugins/hoster/NetloadIn.py @@ -4,8 +4,12 @@ import re from time import sleep + from module.plugins.Hoster import Hoster from module.network.Request import getURL +from module.plugins.Plugin import chunks + + def getInfo(urls): ## returns list of tupels (name, size (in bytes), status (see FileDatabase), url) @@ -14,14 +18,10 @@ def getInfo(urls): apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id=" id_regex = re.compile("http://.*netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)") urls_per_query = 80 - - iterations = len(urls)/urls_per_query - if len(urls)%urls_per_query > 0: - iterations = iterations +1 - - for i in range(iterations): + + for chunk in chunks(urls, urls_per_query): ids = "" - for url in urls[i*urls_per_query:(i+1)*urls_per_query]: + for url in chunk: match = id_regex.search(url) if match: ids = ids + match.group(1) +";" @@ -37,19 +37,17 @@ def getInfo(urls): result = [] - counter = 0 - for r in api.split(): + for i, r in enumerate(api.split()): try: tmp = r.split(";") try: size = int(tmp[2]) except: size = 0 - result.append( (tmp[1], size, 2 if tmp[3] == "online" else 1, urls[(i*80)+counter]) ) + result.append( (tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i] ) ) except: print "Netload prefetch: Error while processing response: " print r - counter = counter +1 yield result diff --git a/module/plugins/hoster/ShareCx.py b/module/plugins/hoster/ShareCx.py new file mode 100644 index 000000000..e64459754 --- /dev/null +++ b/module/plugins/hoster/ShareCx.py @@ -0,0 +1,155 @@ +#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import re
+from module.plugins.Hoster import Hoster
+from module.plugins.Plugin import chunks
+from module.network.Request import getURL
+#from module.BeautifulSoup import BeautifulSoup
+
+def getInfo(urls):
+ api_url = "http://www.share.cx/uapi?do=check&links="
+
+ for chunk in chunks(urls, 90):
+ get = ""
+ for url in chunk:
+ get += ";"+url
+
+ api = getURL(api_url+get[1:])
+ result = []
+
+ for i, link in enumerate(api.split()):
+ url,name,size = link.split(";")
+ if name and size:
+ status = 2
+ else:
+ status = 1
+
+ if not name: name = chunk[i]
+ if not size: size = 0
+
+ result.append( (name, size, status, chunk[i]) )
+
+ yield result
+
+class ShareCx(Hoster):
+ __name__ = "ShareCx"
+ __type__ = "hoster"
+ __pattern__ = r"http://[\w\.]*?share\.cx/(files|videos)/\d+"
+ __version__ = "0.1"
+ __description__ = """Share.cx Download Hoster"""
+ __author_name__ = ("jeix")
+ __author_mail__ = ("jeix@hasnomail.de")
+
+
+ def setup(self):
+ self.multiDL = False
+
+
+ def process(self, pyfile):
+ self.pyfile = pyfile
+ self.download_html()
+ if not self.file_exists():
+ offline()
+
+ pyfile.name = self.get_file_name()
+ self.doDownload()
+
+
+ def download_html(self):
+ self.html = self.load(self.pyfile.url)
+
+ def doDownload(self):
+ """ returns the absolute downloadable filepath
+ """
+ if self.html == None:
+ self.download_html()
+
+ op = re.search(r'name="op" value="(.*?)"', self.html).group(1)
+ usr_login = re.search(r'name="usr_login" value="(.*?)"', self.html).group(1)
+ id = re.search(r'name="id" value="(.*?)"', self.html).group(1)
+ fname = re.search(r'name="fname" value="(.*?)"', self.html).group(1)
+ referer = re.search(r'name="referer" value="(.*?)"', self.html).group(1)
+ method_free = "Datei+herunterladen"
+
+ self.html = self.load(self.pyfile.url, post={
+ "op" : op,
+ "usr_login" : usr_login,
+ "id" : id,
+ "fname" : fname,
+ "referer" : referer,
+ "method_free" : method_free
+ })
+
+
+ m = re.search(r'startTimer\((\d+)\)', self.html)
+ if m != None:
+ wait_time = int(m.group(1))
+ self.setWait(wait_time)
+ self.wantReconnect = True
+ self.log.debug("%s: IP blocked wait %d seconds." % (self.__name__, wait_time))
+ self.wait()
+
+ m = re.search(r'countdown">.*?(\d+).*?</span>', self.html)
+ if m == None:
+ m = re.search(r'id="countdown_str".*?<span id=".*?">.*?(\d+).*?</span', self.html)
+ if m != None:
+ wait_time = int(m.group(1))
+ self.setWait(wait_time)
+ self.wantReconnect = False
+ self.log.debug("%s: Waiting %d seconds." % (self.__name__, wait_time))
+ self.wait()
+
+
+ op = re.search(r'name="op" value="(.*?)"', self.html).group(1)
+ id = re.search(r'name="id" value="(.*?)"', self.html).group(1)
+ rand = re.search(r'name="rand" value="(.*?)"', self.html).group(1)
+ referer = re.search(r'name="referer" value="(.*?)"', self.html).group(1)
+ method_free = re.search(r'name="method_free" value="(.*?)"', self.html).group(1)
+ method_premium = re.search(r'name="method_premium" value="(.*?)"', self.html).group(1)
+ down_script = re.search(r'name="down_script" value="(.*?)"', self.html).group(1)
+
+ data = {
+ "op" : op,
+ "id" : id,
+ "rand" : rand,
+ "referer" : referer,
+ "method_free" : method_free,
+ "method_premium" : method_premium,
+ "down_script" : down_script
+ }
+
+ if '/captchas/' in self.html:
+ captcha_url = re.search(r'(http://(?:[\w\d]+\.)?.*?/captchas/.*?)').group(1)
+ captcha = self.decryptCaptcha(captcha_url)
+ data["code"] = captcha
+
+
+ self.download(self.pyfile.url, post=data)
+
+ # soup = BeautifulSoup(html)
+ # form = soup.find("form")
+ # postfields = {}
+ # for input in form,findall("input"):
+ # postfields[input["name"]] = input["value"]
+ # postfields["method_free"] = "Datei herunterladen"
+
+ def get_file_name(self):
+ if self.html == None:
+ self.download_html()
+
+ name = re.search(r'alt="Download" /></span>(.*?)</h3>', self.html).group(1)
+ return name
+
+ def file_exists(self):
+ """ returns True or False
+ """
+ if self.html == None:
+ self.download_html()
+
+ if re.search(r'File not found<br>It was deleted due to inactivity or abuse request', self.html) != None:
+ return False
+
+ return True
+
+
diff --git a/module/plugins/hoster/ShareonlineBiz.py b/module/plugins/hoster/ShareonlineBiz.py index 8646fcc88..42a2bc560 100644 --- a/module/plugins/hoster/ShareonlineBiz.py +++ b/module/plugins/hoster/ShareonlineBiz.py @@ -13,19 +13,30 @@ from time import sleep from module.plugins.Hoster import Hoster from module.network.Request import getURL +from module.plugins.Plugin import chunks + def getInfo(urls): api_url_base = "http://www.share-online.biz/linkcheck/linkcheck.php" - api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/","") for x in urls)} #api only supports old style links - src = getURL(api_url_base, post=api_param_file) - result = [] - for i, res in enumerate(src.split("\n")): - if not res: - continue - fields = res.split(";") - status = 2 if fields[1] == "OK" else 3 - result.append((fields[2], int(fields[3]), status, urls[i])) - yield result + + for chunk in chunks(urls, 90): + api_param_file = {"links": "\n".join(x.replace("http://www.share-online.biz/dl/","") for x in chunk)} #api only supports old style links + src = getURL(api_url_base, post=api_param_file) + result = [] + for i, res in enumerate(src.split("\n")): + if not res: + continue + fields = res.split(";") + + if fields[1] == "OK": + status = 2 + elif fields[1] in ("DELETED", "NOT FOUND"): + status = 1 + else: + status = 3 + + result.append((fields[2], int(fields[3]), status, chunk[i])) + yield result class ShareonlineBiz(Hoster): __name__ = "ShareonlineBiz" diff --git a/module/plugins/hoster/YoutubeCom.py b/module/plugins/hoster/YoutubeCom.py index e40b0c9ad..d92d8d128 100644 --- a/module/plugins/hoster/YoutubeCom.py +++ b/module/plugins/hoster/YoutubeCom.py @@ -32,9 +32,8 @@ class YoutubeCom(Hoster): if self.getConf("quality") == "hd" or self.getConf("quality") == "hq": file_suffix = ".mp4" - name = re.search(file_name_pattern, html).group(1).replace("/", "") + file_suffix - - pyfile.name = name.replace("&", "&").replace("ö", "oe").replace("ä", "ae").replace("ü", "ue") + name = (re.search(file_name_pattern, html).group(1).replace("/", "") + file_suffix).decode("utf8") + pyfile.name = name #.replace("&", "&").replace("ö", "oe").replace("ä", "ae").replace("ü", "ue") if self.getConf("quality") == "sd": quality = "&fmt=6" @@ -45,4 +44,4 @@ class YoutubeCom(Hoster): file_url = 'http://youtube.com/get_video?video_id=' + videoId + '&t=' + videoHash + quality + "&asv=2" - self.download(file_url)
\ No newline at end of file + self.download(file_url) |