#!/usr/bin/env python # -*- coding: utf-8 -*- import re from time import sleep from module.plugins.Hoster import Hoster from module.network.Request import getURL from module.plugins.Plugin import chunks def getInfo(urls): ## returns list of tupels (name, size (in bytes), status (see FileDatabase), url) apiurl = "http://api.netload.in/info.php?auth=Zf9SnQh9WiReEsb18akjvQGqT0I830e8&bz=1&md5=1&file_id=" id_regex = re.compile("http://.*netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)") urls_per_query = 80 for chunk in chunks(urls, urls_per_query): ids = "" for url in chunk: match = id_regex.search(url) if match: ids = ids + match.group(1) +";" api = getURL(apiurl+ids) if api is None or len(api) < 10: print "Netload prefetch: failed " return if api.find("unknown_auth") >= 0: print "Netload prefetch: Outdated auth code " return result = [] for i, r in enumerate(api.splitlines()): try: tmp = r.split(";") try: size = int(tmp[2]) except: size = 0 result.append( (tmp[1], size, 2 if tmp[3] == "online" else 1, chunk[i] ) ) except: print "Netload prefetch: Error while processing response: " print r yield result class NetloadIn(Hoster): __name__ = "NetloadIn" __type__ = "hoster" __pattern__ = r"http://.*netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)" __version__ = "0.2" __description__ = """Netload.in Download Hoster""" __config__ = [ ("dumpgen", "bool", "Generate debug page dumps on stdout", "False") ] __author_name__ = ("spoob", "RaNaN", "Gregy") __author_mail__ = ("spoob@pyload.org", "ranan@pyload.org", "gregy@gregy.cz") def setup(self): self.multiDL = False if self.account: self.multiDL = True self.req.canContinue = True def process(self, pyfile): self.url = pyfile.url self.prepare() self.pyfile.setStatus("downloading") self.proceed(self.url) def prepare(self): self.download_api_data() if self.api_data and self.api_data["filename"]: self.pyfile.name = self.api_data["filename"] if self.account: self.log.debug("Netload: Use Premium Account") return True if self.download_html(): return True else: self.fail("Failed") return False def download_api_data(self, n=0): url = self.url id_regex = re.compile("http://.*netload\.in/(?:datei(.*?)(?:\.htm|/)|index.php?id=10&file_id=)") match = id_regex.search(url) if match: apiurl = "http://netload.in/share/fileinfos2.php" src = self.load(apiurl, cookies=False, get={"file_id": match.group(1)}).strip() if not src and n <= 3: sleep(0.2) self.download_api_data(n+1) return elif not src: self.fail(_("No API Data was send")) self.log.debug("Netload: APIDATA: "+src) self.api_data = {} if src == "unknown_server_data": self.api_data = False elif src != "unknown file_data": lines = src.split(";") self.api_data["exists"] = True self.api_data["fileid"] = lines[0] self.api_data["filename"] = lines[1] self.api_data["size"] = lines[2] #@TODO formatting? (ex: '2.07 KB') self.api_data["status"] = lines[3] if self.api_data["status"] == "online": self.api_data["checksum"] = lines[4].strip() else: self.offline() else: self.api_data["exists"] = False else: self.api_data = False self.html = [self.load(self.url, cookies=False)] def final_wait(self, page): wait_time = self.get_wait_time(page) self.setWait(wait_time) self.log.debug(_("Netload: final wait %d seconds" % wait_time)) self.wait() self.url = self.get_file_url(page) def download_html(self): self.log.debug("Netload: Entering download_html") page = self.load(self.url, cookies=True) captchawaited = False for i in range(10): self.log.debug(_("Netload: try number %d " % i)) if self.getConf('dumpgen'): print page if re.search(r"(We will prepare your download..)", page) is not None: self.log.debug("Netload: We will prepare your download") self.final_wait(page) return True if re.search(r"(We had a reqeust with the IP)", page) is not None: wait = self.get_wait_time(page) if wait == 0: self.log.debug("Netload: Wait was 0 setting 30") wait = 30 self.log.info(_("Netload: waiting between downloads %d s." % wait)) self.wantReconnect = True self.setWait(wait) self.wait() link = re.search(r"You can download now your next file. Click here for the download", page) if link is not None: self.log.debug("Netload: Using new link found on page") page = self.load("http://netload.in/" + link.group(1).replace("amp;", "")) else: self.log.debug("Netload: No new link found, using old one") page = self.load(self.url, cookies=True) continue self.log.debug("Netload: Trying to find captcha") url_captcha_html = "http://netload.in/" + re.search('(index.php\?id=10&.*&captcha=1)', page).group(1).replace("amp;", "") page = self.load(url_captcha_html, cookies=True) try: captcha_url = "http://netload.in/" + re.search('(share/includes/captcha.php\?t=\d*)', page).group(1) except: open("dump.html", "w").write(page) self.log.debug("Netload: Could not find captcha, try again from beginning") continue file_id = re.search('', page).group(1) if not captchawaited: wait = self.get_wait_time(page) if i == 0: wait = 1 # wait only 1 sec contrary to time on website self.log.info(_("Netload: waiting for captcha %d s." % wait)) self.setWait(wait) self.wait() captchawaited = True captcha = self.decryptCaptcha(captcha_url) page = self.load("http://netload.in/index.php?id=10", post={"file_id": file_id, "captcha_check": captcha}, cookies=True) return False def get_file_url(self, page): try: file_url_pattern = r"Click here" attempt = re.search(file_url_pattern, page) if attempt is not None: return attempt.group(1) else: self.log.debug("Netload: Backup try for final link") file_url_pattern = r"Click here" attempt = re.search(file_url_pattern, page) return "http://netload.in/"+attempt.group(1) except: self.log.debug("Netload: Getting final link failed") return None def get_wait_time(self, page): wait_seconds = int(re.search(r"countdown\((.+),'change\(\)'\)", page).group(1)) / 100 return wait_seconds def proceed(self, url): self.log.debug("Netload: Downloading..") self.download(url, cookies=True)