summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGravatar fragonib <devnull@localhost> 2011-05-15 14:39:19 +0200
committerGravatar fragonib <devnull@localhost> 2011-05-15 14:39:19 +0200
commite771c9bb3cebede886fd37121df909e723d6c1e3 (patch)
tree98beb0e2ed72361596271cac869e40a062a38d4a
parentfix premium and convert to json (diff)
downloadpyload-e771c9bb3cebede886fd37121df909e723d6c1e3.tar.xz
Several Crypters and Hosters fixes, improvements, python2.5 issues...
-rw-r--r--module/plugins/crypter/LinkSaveIn.py167
-rw-r--r--module/plugins/crypter/NCryptIn.py12
-rw-r--r--module/plugins/hoster/MegauploadCom.py127
-rw-r--r--module/plugins/hoster/OneFichierCom.py156
-rw-r--r--module/plugins/hoster/UploadStationCom.py8
5 files changed, 319 insertions, 151 deletions
diff --git a/module/plugins/crypter/LinkSaveIn.py b/module/plugins/crypter/LinkSaveIn.py
index 8809bd7d1..33d49608c 100644
--- a/module/plugins/crypter/LinkSaveIn.py
+++ b/module/plugins/crypter/LinkSaveIn.py
@@ -2,6 +2,7 @@
from Crypto.Cipher import AES
from module.plugins.Crypter import Crypter
+from module.unescape import unescape
import base64
import binascii
import re
@@ -9,8 +10,8 @@ import re
class LinkSaveIn(Crypter):
__name__ = "LinkSaveIn"
__type__ = "crypter"
- __pattern__ = r"http://(www\.)?linksave.in/([a-z0-9]+)$"
- __version__ = "1.01"
+ __pattern__ = r"http://(www\.)?linksave.in/(?P<id>\w+)$"
+ __version__ = "2.0"
__description__ = """LinkSave.in Crypter Plugin"""
__author_name__ = ("fragonib")
__author_mail__ = ("fragonib[AT]yahoo[DOT]es")
@@ -18,64 +19,162 @@ class LinkSaveIn(Crypter):
# Constants
_JK_KEY_ = "jk"
_CRYPTED_KEY_ = "crypted"
+ HOSTER_DOMAIN = "linksave.in"
+
+ def setup(self):
+ self.html = None
+ self.fileid = None
+ self.captcha = False
+ self.package = None
def decrypt(self, pyfile):
- # Request page
- self.html = self.load(pyfile.url)
- if not self.fileExists():
+ # Init
+ self.package = pyfile.package()
+ self.fileid = re.match(self.__pattern__, pyfile.url).group('id')
+ self.req.cj.setCookie(self.HOSTER_DOMAIN, "Linksave_Language", "english")
+
+ # Request package
+ self.html = self.load(self.pyfile.url, cookies=True)
+ if not self.isOnline():
self.offline()
-
- # Handle captcha protection
- self.handleCaptcha()
+
+ # Check for protection
+ if self.isPasswordProtected():
+ self.unlockPasswordProtection()
+ self.handleErrors()
+
+ if self.isCaptchaProtected():
+ self.captcha = True
+ self.unlockCaptchaProtection()
+ self.handleErrors()
# Get package name and folder
- (package_name, folder_name) = self.getPackageNameAndFolder()
+ (package_name, folder_name) = self.getPackageInfo()
- # Get package links
- (crypted, jk) = self.getCipherParams()
- package_links = self.getLinks(crypted, jk)
+ # Extract package links
+ package_links = []
+ package_links.extend(self.handleWebLinks())
+ package_links.extend(self.handleContainers())
+ package_links.extend(self.handleCNL2())
+ package_links = set(package_links)
# Pack
self.packages = [(package_name, package_links, folder_name)]
- def fileExists(self):
- if "<title>LinkSave.in - Error 404</title>" in self.html:
+ def isOnline(self):
+ if "<big>Error 404 - Folder not found!</big>" in self.html:
self.log.debug("%s: File not found" % self.__name__)
return False
return True
- def getPackageNameAndFolder(self):
+ def isPasswordProtected(self):
+ if re.search(r'''<input.*?type="password"''', self.html):
+ self.log.debug("%s: Links are password protected" % self.__name__)
+ return True
+
+ def isCaptchaProtected(self):
+ if "<b>Captcha:</b>" in self.html:
+ self.log.debug("%s: Links are captcha protected" % self.__name__)
+ return True
+ return False
+
+ def unlockPasswordProtection(self):
+ password = self.package.password
+ self.log.debug("%s: Submitting password [%s] for protected links" % (self.__name__, password))
+ post = {"id": self.fileid, "besucherpasswort": self.package.password, 'login': 'submit'}
+ self.html = self.load(self.pyfile.url, post=post)
+
+ def unlockCaptchaProtection(self):
+ hash = re.search(r'name="hash" value="([^"]+)', self.html).group(1)
+ captchaUrl = re.search(r'src=".(/captcha/cap.php\?hsh=[^"]+)', self.html).group(1)
+ code = self.decryptCaptcha("http://linksave.in" + captchaUrl, forceUser=True)
+ self.html = self.load(self.pyfile.url, post={"id": self.fileid, "hash": hash, "code": code})
+
+ def getPackageInfo(self):
name = self.pyfile.package().name
folder = self.pyfile.package().folder
- self.log.debug("%s: Default to pyfile name [%s] and folder [%s] for package" % (self.__name__, name, folder))
+ self.log.debug("%s: Defaulting to pyfile name [%s] and folder [%s] for package" % (self.__name__, name, folder))
return name, folder
-
- def handleCaptcha(self):
- if "<b>Captcha:</b>" in self.html:
- id = re.search(r'name="id" value="([^"]+)', self.html).group(1)
- hash = re.search(r'name="hash" value="([^"]+)', self.html).group(1)
- url = re.search(r'src=".(/captcha/cap.php\?hsh=[^"]+)', self.html).group(1)
- value = self.decryptCaptcha("http://linksave.in" + url, forceUser=True)
- self.html = self.load(self.pyfile.url, post={"id": id, "hash": hash, "code": value})
- def getCipherParams(self):
+ def handleErrors(self):
+ if "The visitorpassword you have entered is wrong" in self.html:
+ self.log.debug("%s: Incorrect password, please set right password on 'Edit package' form and retry" % self.__name__)
+ self.fail("Incorrect password, please set right password on 'Edit package' form and retry")
+
+ if self.captcha:
+ if "Wrong code. Please retry" in self.html:
+ self.log.debug("%s: Invalid captcha, retrying" % self.__name__)
+ self.invalidCaptcha()
+ self.retry()
+ else:
+ self.correctCaptcha()
+
+ def handleWebLinks(self):
+ package_links = []
+ self.log.debug("%s: Handling Web links" % self.__name__)
+
+ #@TODO: Gather paginated web links
+ pattern = r'<a href="http://linksave\.in/(\w{43})"'
+ ids = re.findall(pattern, self.html)
+ self.log.debug("%s: Decrypting %d Web links" % (self.__name__, len(ids)))
+ for i, id in enumerate(ids):
+ try:
+ webLink = "http://linksave.in/%s" % id
+ self.log.debug("%s: Decrypting Web link %d, %s" % (self.__name__, i+1, webLink))
+ fwLink = "http://linksave.in/fw-%s" % id
+ response = self.load(fwLink)
+ jscode = re.findall(r'<script type="text/javascript">(.*)</script>', response)[-1]
+ jseval = self.js.eval("document = { write: function(e) { return e; } }; %s" % jscode)
+ dlLink = re.search(r'http://linksave\.in/dl-\w+', jseval).group(0)
+ self.log.debug("%s: JsEngine returns value [%s] for redirection link" % (self.__name__, dlLink))
+ response = self.load(dlLink)
+ link = unescape(re.search(r'<iframe src="(.+?)"', response).group(1))
+ package_links.append(link)
+ except Exception, detail:
+ self.log.debug("%s: Error decrypting Web link %s, %s" % (self.__name__, webLink, detail))
+ return package_links
+
+ def handleContainers(self):
+ package_links = []
+ self.log.debug("%s: Handling Container links" % self.__name__)
+
+ pattern = r"\('(?:rsdf|ccf|dlc)_link'\).href=unescape\('(.*?\.(?:rsdf|ccf|dlc))'\)"
+ containersLinks = re.findall(pattern, self.html)
+ self.log.debug("%s: Decrypting %d Container links" % (self.__name__, len(containersLinks)))
+ for containerLink in containersLinks:
+ link = "http://linksave.in/%s" % unescape(containerLink)
+ package_links.append(link)
+ return package_links
+
+ def handleCNL2(self):
+ package_links = []
+ self.log.debug("%s: Handling CNL2 links" % self.__name__)
+
+ if 'cnl2_load' in self.html:
+ try:
+ (vcrypted, vjk) = self._getCipherParams()
+ for (crypted, jk) in zip(vcrypted, vjk):
+ package_links.extend(self._getLinks(crypted, jk))
+ except:
+ self.fail("Unable to decrypt CNL2 links")
+ return package_links
+
+ def _getCipherParams(self):
# Get jk
- jk_re = r'<INPUT(.*?)NAME="%s"(.*?)VALUE="(?P<jk>.*?)"' % LinkSaveIn._JK_KEY_
- m = re.search(jk_re, self.html)
- jk = m.group('jk')
+ jk_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._JK_KEY_
+ vjk = re.findall(jk_re, self.html)
# Get crypted
- crypted_re = r'<INPUT(.*?)NAME="%s"(.*?)VALUE="(?P<crypted>.*?)"' % LinkSaveIn._CRYPTED_KEY_
- m = re.search(crypted_re, self.html)
- crypted = m.group('crypted')
+ crypted_re = r'<INPUT.*?NAME="%s".*?VALUE="(.*?)"' % LinkSaveIn._CRYPTED_KEY_
+ vcrypted = re.findall(crypted_re, self.html)
# Log and return
- self.log.debug("%s: Javascript cipher key function [%s]" % (self.__name__, jk))
- return crypted, jk
+ self.log.debug("%s: Detected %d crypted blocks" % (self.__name__, len(vcrypted)))
+ return vcrypted, vjk
- def getLinks(self, crypted, jk):
+ def _getLinks(self, crypted, jk):
# Get key
jreturn = self.js.eval("%s f()" % jk)
diff --git a/module/plugins/crypter/NCryptIn.py b/module/plugins/crypter/NCryptIn.py
index d2438b285..785c98ba8 100644
--- a/module/plugins/crypter/NCryptIn.py
+++ b/module/plugins/crypter/NCryptIn.py
@@ -11,7 +11,7 @@ class NCryptIn(Crypter):
__name__ = "NCryptIn"
__type__ = "crypter"
__pattern__ = r"http://(?:www\.)?ncrypt.in/folder-([^/\?]+)"
- __version__ = "1.2"
+ __version__ = "1.21"
__description__ = """NCrypt.in Crypter Plugin"""
__author_name__ = ("fragonib")
__author_mail__ = ("fragonib[AT]yahoo[DOT]es")
@@ -49,7 +49,7 @@ class NCryptIn(Crypter):
# Extract package links
package_links = []
package_links.extend(self.handleWebLinks())
- package_links.extend(self.handleContainer())
+ package_links.extend(self.handleContainers())
package_links.extend(self.handleCNL2())
package_links = set(package_links)
@@ -152,12 +152,12 @@ class NCryptIn(Crypter):
try:
url = link.replace("link-", "frame-")
link = self.load(url, just_header=True)['location']
- except Exception, e:
- self.log.debug("%s: Error decrypting Web link %s, %s" % (self.__name__, link, e))
- package_links.append(link)
+ package_links.append(link)
+ except Exception, detail:
+ self.log.debug("%s: Error decrypting Web link %s, %s" % (self.__name__, link, detail))
return package_links
- def handleContainer(self):
+ def handleContainers(self):
package_links = []
self.log.debug("%s: Handling Container links" % self.__name__)
diff --git a/module/plugins/hoster/MegauploadCom.py b/module/plugins/hoster/MegauploadCom.py
index 8aa37c47d..4ab476d1e 100644
--- a/module/plugins/hoster/MegauploadCom.py
+++ b/module/plugins/hoster/MegauploadCom.py
@@ -8,56 +8,66 @@ from module.plugins.Hoster import Hoster
from module.network.RequestFactory import getURL
from module.unescape import unescape
+from module.PyFile import statusMap
from pycurl import error
def getInfo(urls):
- url = "http://megaupload.com/mgr_linkcheck.php"
-
- ids = [x.split("=")[-1] for x in urls]
+
+ result = []
- i = 0
+ # MU API request
post = {}
- for id in ids:
- post["id%i"%i] = id
- i += 1
-
- api = getURL(url, {}, post)
- api = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", api)]
+ fileIds = [x.split("=")[-1] for x in urls] # Get ids from urls
+ for i, fileId in enumerate(fileIds):
+ post["id%i" % i] = fileId
+ response = getURL(MegauploadCom.API_URL, post=post)
- result = []
- i=0
- for data in api:
- if data[0].startswith("id"):
- tmp = [x.split("=") for x in data]
- if tmp[0][1] == "0":
- status = 2
- elif tmp[0][1] == "1":
- status = 1
- elif tmp[2][1] == "3":
- status = 3
- else:
- status = 3
-
- name = None
- size = 0
- if status != 1:
- name = unescape(tmp[3][1])
- size = tmp[1][1]
+ # Process API response
+ parts = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", response)]
+ apiHosterMap = dict([elem.split('=') for elem in parts[0]])
+ for entry in parts[1:]:
+ apiFileDataMap = dict([elem.split('=') for elem in entry])
+ apiFileId = [key for key in apiFileDataMap.keys() if key.startswith('id')][0]
+ i = int(apiFileId.replace('id', ''))
- result.append( (name, size, status, urls[i] ) )
- i += 1
+ # File info
+ fileInfo = _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap)
+ url = urls[i]
+ name = fileInfo.get('name', url)
+ size = fileInfo.get('size', 0)
+ status = fileInfo.get('status', statusMap['queued'])
+
+ # Add result
+ result.append( (name, size, status, url ) )
yield result
+
+def _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap):
+
+ # Translate
+ fileInfo = {}
+ try:
+ fileInfo['status'] = MegauploadCom.API_STATUS_MAPPING[apiFileDataMap[apiFileId]]
+ fileInfo['name'] = apiFileDataMap['n']
+ fileInfo['size'] = apiFileDataMap['s']
+ fileInfo['hoster'] = apiHosterMap[apiFileDataMap['d']]
+ except:
+ pass
+
+ return fileInfo
class MegauploadCom(Hoster):
__name__ = "MegauploadCom"
__type__ = "hoster"
__pattern__ = r"http://[\w\.]*?(megaupload)\.com/.*?(\?|&)d=[0-9A-Za-z]+"
- __version__ = "0.22"
+ __version__ = "0.23"
__description__ = """Megaupload.com Download Hoster"""
__author_name__ = ("spoob")
__author_mail__ = ("spoob@pyload.org")
+
+ API_URL = "http://megaupload.com/mgr_linkcheck.php"
+ API_STATUS_MAPPING = {"0": statusMap['online'], "1": statusMap['offline'], "3": statusMap['temp. offline']}
def init(self):
self.html = [None, None]
@@ -167,29 +177,30 @@ class MegauploadCom(Hoster):
def download_api(self):
- url = "http://megaupload.com/mgr_linkcheck.php"
-
- id = self.pyfile.url.split("=")[-1]
-
-
- post = {"id0": id}
-
- api = self.load(url, {}, post)
- self.log.debug("MU API: %s" % api)
- api = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", api)]
-
- for data in api:
- if data[0].startswith("id"):
- tmp = [x.split("=") for x in data]
- if tmp[0][1] == "1":
- self.offline()
-
- name = unescape(tmp[3][1])
- #size = tmp[1][1]
-
- self.api["name"] = name
- self.pyfile.name = name
-
+ # MU API request
+ fileId = self.pyfile.url.split("=")[-1] # Get file id from url
+ apiFileId = "id0"
+ post = {apiFileId: fileId}
+ response = getURL(self.API_URL, post=post)
+ self.log.debug("%s: API response [%s]" % (self.__name__, response))
+
+ # Translate API response
+ parts = [re.split(r"&(?!amp;|#\d+;)", x) for x in re.split(r"&?(?=id[\d]+=)", response)]
+ apiHosterMap = dict([elem.split('=') for elem in parts[0]])
+ apiFileDataMap = dict([elem.split('=') for elem in parts[1]])
+ self.api = _translateAPIFileInfo(apiFileId, apiFileDataMap, apiHosterMap)
+
+ # File info
+ try:
+ self.pyfile.status = self.api['status']
+ self.pyfile.name = self.api['name']
+ self.pyfile.size = self.api['size']
+ except KeyError:
+ self.log.warn("%s: Cannot recover all file [%s] info from API response." % (self.__name__, fileId))
+
+ # Fail if offline
+ if self.pyfile.status == statusMap['offline']:
+ self.offline()
def get_file_url(self):
file_url_pattern = 'id="downloadlink"><a href="(.*)"\s+(?:onclick|class)="'
@@ -197,11 +208,11 @@ class MegauploadCom(Hoster):
return search.group(1).replace(" ", "%20")
def get_file_name(self):
- if not self.api:
+ try:
+ return self.api["name"]
+ except KeyError:
file_name_pattern = 'id="downloadlink"><a href="(.*)" onclick="'
return re.search(file_name_pattern, self.html[1]).group(1).split("/")[-1]
- else:
- return self.api["name"]
def get_wait_time(self):
time = re.search(r"count=(\d+);", self.html[1])
diff --git a/module/plugins/hoster/OneFichierCom.py b/module/plugins/hoster/OneFichierCom.py
index b1ce20fbd..69d6e81c0 100644
--- a/module/plugins/hoster/OneFichierCom.py
+++ b/module/plugins/hoster/OneFichierCom.py
@@ -4,15 +4,63 @@
import re
from module.plugins.Hoster import Hoster
+from module.network.RequestFactory import getURL
+
+
+def getInfo(urls):
+ result = []
+
+ for url in urls:
+
+ # Get file info html
+ id = re.match(OneFichierCom.__pattern__, url).group('id')
+ url = 'http://%s.1fichier.com/en' % id # Force response in english
+ html = getURL(url)
+
+ # Offline?
+ if re.search(OneFichierCom.FILE_OFFLINE_PATTERN, html):
+ result.append((url, 0, 1, url))
+ continue
+
+ # Name
+ for pattern in OneFichierCom.FILE_NAME_PATTERNS:
+ m = re.search(pattern, html)
+ if m is not None:
+ name = m.group('name').strip()
+
+ # Size
+ m = re.search(OneFichierCom.FILE_SIZE_PATTERN, html)
+ value = float(m.group('size'))
+ units = m.group('units')[0].upper()
+ pow = {'K' : 1, 'M' : 2, 'G' : 3}[units]
+ size = int(value*1024**pow)
+
+ # Return info
+ result.append((name, size, 2, url))
+
+ yield result
+
class OneFichierCom(Hoster):
__name__ = "OneFichierCom"
__type__ = "hoster"
- __pattern__ = r"http://[a-z0-9]+\.1fichier\.com/(.*)"
- __version__ = "0.2"
+ __pattern__ = r"http://(?P<id>[a-z0-9]+)\.1fichier\.com(?P<remain>.*)"
+ __version__ = "0.3"
__description__ = """1fichier.com download hoster"""
__author_name__ = ("fragonib")
__author_mail__ = ("fragonib[AT]yahoo[DOT]es")
+
+ FILE_NAME_PATTERNS = (
+ r'">File name :</th>[\t\r\n ]+<td>(?P<name>.*?)</td>',
+ r">Click here to download (?P<name>.*?)</a>",
+ r"content=\"Download the file named (?P<name>.*?)\">",
+ r"<title>Download the file\s*:\s*(?P<name>.*?)</title>"
+ )
+ FILE_SIZE_PATTERN = r"<th>File size :</th>\s+<td>(?P<size>[\d\.]*) (?P<units>\w+)</td>"
+ DOWNLOAD_LINK_PATTERN = r'<br/>&nbsp;<br/>&nbsp;<br/>&nbsp;\s+<a href="(?P<url>http://.*?)"'
+ FILE_OFFLINE_PATTERN = r"(The requested file could not be found|The file may has been deleted by its owner)"
+ PASSWORD_PROTECTED_TOKEN = "protected by password"
+ WAITING_TOKEN = "Please wait a few seconds"
def setup(self):
self.html = None
@@ -20,65 +68,75 @@ class OneFichierCom(Hoster):
def process(self, pyfile):
- self.download_html()
-
- if not self.file_exists():
- self.log.debug("OneFichierCom: File not yet available.")
- self.offline()
+ # Get main page (english version)
+ url = self.getEnglishURL()
+ self.html = self.load(url)
+ self.handleErrors()
- pyfile.name = self.get_file_name()
- pyfile.size = self.get_file_size()
+ # Get file info
+ pyfile.name = self.getFileName()
+ pyfile.size = self.getFileSize()
- url = self.get_file_url()
- self.download(url)
-
- def download_html(self):
- self.html = self.load(self.pyfile.url, cookies=False)
+ # Check for protection
+ if self.isProtected():
+ password = pyfile.package().password
+ self.log.debug("%s: Submitting password [%s]" % (self.__name__, password))
+ self.download(url, post={"password" : password})
+ else:
+ downloadLink = self.getDownloadLink()
+ self.download(downloadLink)
- def file_exists(self):
- warnings = (r"The requested file could not be found",
- r"The file may has been deleted by its owner",
- r"Le fichier demandé n'existe pas\.",
- r"Il a pu être supprimé par son propriétaire\.")
- pattern = '(' + '|'.join(warnings) + ')'
- if re.search(pattern, self.html) is not None:
- return False
- return True
-
- def get_file_url(self):
- file_url_pattern = r"<br/>\&nbsp;<br/>\&nbsp;<br/>\&nbsp;[\t\n\r ]+<a href=\"(?P<url>http://.*?)\""
-
- m = re.search(file_url_pattern, self.html)
- if m is not None:
- url = m.group('url')
- self.log.debug("OneFichierCom: Got file URL [%s]" % url)
- return url
+ # Check download
+ self.handleDownloadedFile()
- def get_file_name(self):
- file_name_patterns = (
- r"\">(Nom du fichier :|File name :)</th>[\t\r\n ]+<td>(?P<name>.*?)</td>",
- r"(>Cliquez ici pour télécharger|>Click here to download) (?P<name>.*?)</a>",
- r"content=\"(Téléchargement du fichier |Download the file named )(?P<name>.*?)\">",
- r"<title>(Téléchargement du fichier|Download the file)\s*:\s*(?P<name>.*?)</title>"
- )
-
- for pattern in file_name_patterns:
+ def getEnglishURL(self):
+ id = re.match(self.__pattern__, self.pyfile.url).group('id')
+ url = 'http://%s.1fichier.com/en' % id
+ return url
+
+ def getFileName(self):
+ for pattern in self.FILE_NAME_PATTERNS:
m = re.search(pattern, self.html)
if m is not None:
name = m.group('name').strip()
- self.log.debug("OneFichierCom: Got file name [%s]" % name)
+ self.log.debug("%s: Got file name [%s]" % (self.__name__, name))
return name
- def get_file_size(self):
- file_size_pattern = r"<th>(Taille :|File size :)</th>[\t\n\r ]+<td>(?P<size>\d*)\s+(?P<units>.*?)</td>"
- m = re.search(file_size_pattern, self.html)
+ def getFileSize(self):
+ m = re.search(self.FILE_SIZE_PATTERN, self.html)
if m is not None:
- size = int(m.group('size'))
+ size = float(m.group('size'))
units = m.group('units')[0].upper()
try:
multiplier = 1024 ** {"K":1, "M":2, "G":3}[units]
except KeyError:
multiplier = 1
- bytes = size * multiplier
- self.log.debug("OneFichierCom: Got file size of [%s] bytes" % bytes)
- return bytes \ No newline at end of file
+ bytes = int(size * multiplier)
+ self.log.debug("%s: Got file size of [%s] bytes" % (self.__name__, bytes))
+ return bytes
+
+ def isProtected(self):
+ if self.PASSWORD_PROTECTED_TOKEN in self.html:
+ self.log.debug("%s: Links are password protected" % self.__name__)
+ return True
+ return False
+
+ def getDownloadLink(self):
+ m = re.search(self.DOWNLOAD_LINK_PATTERN, self.html)
+ if m is not None:
+ url = m.group('url')
+ self.log.debug("%s: Got file URL [%s]" % (self.__name__, url))
+ return url
+
+ def handleErrors(self):
+ if re.search(self.FILE_OFFLINE_PATTERN, self.html) is not None:
+ self.log.debug("%s: File not yet available." % self.__name__)
+ self.offline()
+
+ def handleDownloadedFile(self):
+ check = self.checkDownload({"wait": self.WAITING_TOKEN})
+ if check == "wait":
+ wait = 5
+ self.setWait(wait, True)
+ self.wait()
+ self.retry() \ No newline at end of file
diff --git a/module/plugins/hoster/UploadStationCom.py b/module/plugins/hoster/UploadStationCom.py
index 32ab1972a..65a44b765 100644
--- a/module/plugins/hoster/UploadStationCom.py
+++ b/module/plugins/hoster/UploadStationCom.py
@@ -38,7 +38,7 @@ class UploadStationCom(Hoster):
__name__ = "UploadStationCom"
__type__ = "hoster"
__pattern__ = r"http://(www\.)?uploadstation\.com/file/(?P<id>[A-Za-z0-9]+)"
- __version__ = "0.3"
+ __version__ = "0.31"
__description__ = """UploadStation.Com File Download Hoster"""
__author_name__ = ("fragonib")
__author_mail__ = ("fragonib[AT]yahoo[DOT]es")
@@ -84,8 +84,8 @@ class UploadStationCom(Hoster):
# self.jsPage = self.load("http://uploadstation.com" + jsPage)
# Check download
- response = self.load(self.pyfile.url, post={"checkDownload" : "check"})
- self.log.debug("%s: Checking download, response [%s]" % (self.__name__, response))
+ response = self.load(self.pyfile.url, post={"checkDownload" : "check"}, utf8=True)
+ self.log.debug("%s: Checking download, response [%s]" % (self.__name__, response.encode('ascii', 'ignore')))
self.handleErrors(response)
# We got a captcha?
@@ -98,7 +98,7 @@ class UploadStationCom(Hoster):
post={'recaptcha_challenge_field' : challenge,
'recaptcha_response_field' : code,
'recaptcha_shortencode_field' : self.fileId})
- self.log.debug("%s: Result of captcha resolving [%s]" % (self.__name__, response))
+ self.log.debug("%s: Result of captcha resolving [%s]" % (self.__name__, response.encode('ascii', 'ignore')))
self.handleCaptchaErrors(response)
# Process waiting