summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--module/plugins/crypter/FilecryptCc.py148
-rw-r--r--pyload/plugins/crypter/LinkCryptWs.py63
-rw-r--r--pyload/plugins/hoster/OneFichierCom.py2
-rw-r--r--pyload/plugins/internal/SimpleCrypter.py47
-rw-r--r--pyload/plugins/internal/SimpleHoster.py307
-rw-r--r--pyload/plugins/internal/XFSHoster.py11
6 files changed, 374 insertions, 204 deletions
diff --git a/module/plugins/crypter/FilecryptCc.py b/module/plugins/crypter/FilecryptCc.py
new file mode 100644
index 000000000..db4a8c4ab
--- /dev/null
+++ b/module/plugins/crypter/FilecryptCc.py
@@ -0,0 +1,148 @@
+# -*- coding: utf-8 -*-
+
+import base64
+import binascii
+import re
+
+from Crypto.Cipher import AES
+
+from module.plugins.Crypter import Crypter
+
+
+class FilecryptCc(Crypter):
+ __name__ = "FilecryptCc"
+ __type__ = "crypter"
+ __version__ = "0.04"
+
+ __pattern__ = r'https?://(?:www\.)?filecrypt\.cc/Container/\w+'
+
+ __description__ = """Filecrypt.cc decrypter plugin"""
+ __license__ = "GPLv3"
+ __authors__ = [("zapp-brannigan", "")]
+
+
+ # URL_REPLACEMENTS = [(r'.html$', ""), (r'$', ".html")] #@TODO: Extend SimpleCrypter
+
+ DLC_LINK_PATTERN = r'<button class="dlcdownload" type="button" title="Download \*.dlc" onclick="DownloadDLC\(\'(.+)\'\);"><i></i><span>dlc<'
+ WEBLINK_PATTERN = r"openLink.?'([\w_-]*)',"
+
+ CAPTCHA_PATTERN = r'<img id="nc" src="(.+?)"'
+
+ MIRROR_PAGE_PATTERN = r'"[\w]*" href="(http://filecrypt.cc/Container/\w+\.html\?mirror=\d+)">'
+
+
+ def setup(self):
+ self.links = []
+
+
+ def decrypt(self, pyfile):
+ self.html = self.load(pyfile.url, cookies=True)
+
+ if "content not found" in self.html:
+ self.offline()
+
+ self.handlePasswordProtection()
+ self.handleCaptcha()
+ self.handleMirrorPages()
+
+ for handle in (self.handleCNL, self.handleWeblinks, self.handleDlcContainer):
+ handle()
+ if self.links:
+ self.packages = [(pyfile.package().name, self.links, pyfile.package().name)]
+ return
+
+
+ def handleMirrorPages(self):
+ if "mirror=" not in self.siteWithLinks:
+ return
+
+ mirror = re.findall(self.MIRROR_PAGE_PATTERN, self.siteWithLinks)
+
+ self.logInfo(_("Found %d mirrors") % len(m))
+
+ for i in mirror[1:]:
+ self.siteWithLinks = self.siteWithLinks + self.load(i, cookies=True).decode("utf-8", "replace")
+
+
+ def handlePasswordProtection(self):
+ if '<input type="text" name="password"' not in self.html:
+ return
+
+ self.logInfo(_("Folder is password protected"))
+
+ if not self.pyfile.package().password:
+ self.fail(_("Please enter the password in package section and try again"))
+
+ self.html = self.load(self.pyfile.url, post={"password": self.password}, cookies=True)
+
+
+ def handleCaptcha(self):
+ m = re.search(self.CAPTCHA_PATTERN, self.html)
+
+ if m:
+ self.logDebug("Captcha-URL: %s" % m.group(1))
+ captcha_code = self.decryptCaptcha("http://filecrypt.cc" + m.group(1), forceUser=True, imgtype="gif")
+ self.siteWithLinks = self.load(self.pyfile.url, post={"recaptcha_response_field":captcha_code}, decode=True, cookies=True)
+ else:
+ self.logDebug("No captcha found")
+ self.siteWithLinks = self.html
+
+ if "recaptcha_response_field" in self.siteWithLinks:
+ self.invalidCaptcha()
+ self.retry()
+
+
+ def handleDlcContainer(self):
+ dlc = re.findall(self.DLC_LINK_PATTERN, self.siteWithLinks)
+
+ if not dlc:
+ return
+
+ for i in dlc:
+ self.links.append("http://filecrypt.cc/DLC/%s.dlc" % i)
+
+
+ def handleWeblinks(self):
+ try:
+ weblinks = re.findall(self.WEBLINK_PATTERN, self.siteWithLinks)
+
+ for link in weblinks:
+ response = self.load("http://filecrypt.cc/Link/%s.html" % link, cookies=True)
+ link2 = re.search('<iframe noresize src="(.*)"></iframe>', response)
+ response2 = self.load(link2.group(1), just_header=True, cookies=True)
+ self.links.append(response2['location'])
+
+ except Exception, e:
+ self.logDebug("Error decrypting weblinks: %s" % e)
+
+
+ def handleCNL(self):
+ try:
+ vjk = re.findall('<input type="hidden" name="jk" value="function f\(\){ return \'(.*)\';}">', self.siteWithLinks)
+ vcrypted = re.findall('<input type="hidden" name="crypted" value="(.*)">', self.siteWithLinks)
+
+ for i in range(0, len(vcrypted)):
+ self.links.extend(self._getLinks(vcrypted[i], vjk[i]))
+
+ except Exception, e:
+ self.logDebug("Error decrypting CNL: %s" % e)
+
+
+ def _getLinks(self, crypted, jk):
+ # Get key
+ key = binascii.unhexlify(str(jk))
+
+ # Decode crypted
+ crypted = base64.standard_b64decode(crypted)
+
+ # Decrypt
+ Key = key
+ IV = key
+ obj = AES.new(Key, AES.MODE_CBC, IV)
+ text = obj.decrypt(crypted)
+
+ # Extract links
+ links = filter(lambda x: x != "",
+ text.replace("\x00", "").replace("\r", "").split("\n"))
+
+ return links
diff --git a/pyload/plugins/crypter/LinkCryptWs.py b/pyload/plugins/crypter/LinkCryptWs.py
index 4b4adc8d1..f01c42268 100644
--- a/pyload/plugins/crypter/LinkCryptWs.py
+++ b/pyload/plugins/crypter/LinkCryptWs.py
@@ -15,7 +15,7 @@ from pyload.utils import html_unescape
class LinkCryptWs(Crypter):
__name__ = "LinkCryptWs"
__type__ = "crypter"
- __version__ = "0.05"
+ __version__ = "0.06"
__pattern__ = r'http://(?:www\.)?linkcrypt\.ws/(dir|container)/(?P<ID>\w+)'
@@ -25,22 +25,18 @@ class LinkCryptWs(Crypter):
("glukgluk", None)]
- JK_KEY = "jk"
CRYPTED_KEY = "crypted"
+ JK_KEY = "jk"
def setup(self):
- self.html = None
- self.fileid = None
self.captcha = False
- self.package = None
-
- self.preferred_sources = ['cnl', 'web', 'dlc', 'rsdf', 'ccf', ] #['cnl', 'rsdf', 'ccf', 'dlc', 'web']
+ self.links = []
+ self.sources = ['cnl', 'web', 'dlc', 'rsdf', 'ccf']
def prepare(self):
# Init
- self.package = pyfile.package()
self.fileid = re.match(self.__pattern__, pyfile.url).group('ID')
self.req.cj.setCookie("linkcrypt.ws", "language", "en")
@@ -51,12 +47,9 @@ class LinkCryptWs(Crypter):
def decrypt(self, pyfile):
- #check if we have js
if not self.js:
self.fail(_("Missing JS Engine"))
- package_found = None
-
self.prepare()
if not self.isOnline():
@@ -85,23 +78,15 @@ class LinkCryptWs(Crypter):
self.get_container_html()
# Extract package links
- package_links = []
+ for type in self.sources:
+ links = self.handleLinkSource(type)
- for type_ in self.preferred_sources:
- links = self.handleLinkSource(type_)
if links:
- if isinstance(links, list):
- package_links.extend(links)
- else:
- package_found = True
+ self.links.extend(links)
break
- # Pack
- if package_links:
- self.packages = [(package_name, package_links, folder_name)]
-
- elif package_found:
- self.core.api.deletePackages([self.package.id])
+ if self.links:
+ self.packages = [(package_name, self.links, folder_name)]
def isOnline(self):
@@ -187,18 +172,18 @@ class LinkCryptWs(Crypter):
self.correctCaptcha()
- def handleLinkSource(self, type_):
- if type_ == 'cnl':
+ def handleLinkSource(self, type):
+ if type == 'cnl':
return self.handleCNL2()
- elif type_ == 'web':
+ elif type == 'web':
return self.handleWebLinks()
- elif type_ in ('rsdf', 'ccf', 'dlc'):
- return self.handleContainer(type_)
+ elif type in ('rsdf', 'ccf', 'dlc'):
+ return self.handleContainer(type)
else:
- self.error(_("Unknown source type: %s") % type_)
+ self.fail(_("Unknown source type: %s") % type) #@TODO: Replace with self.error in 0.4.10
def handleWebLinks(self):
@@ -247,17 +232,17 @@ class LinkCryptWs(Crypter):
return self.js.eval(line.replace('{}))',"{}).replace('document.open();document.write','').replace(';document.close();',''))"))
- def handleContainer(self, type_):
+ def handleContainer(self, type):
package_links = []
- type_ = type_.lower()
+ type = type.lower()
- self.logDebug('Search for %s Container links' % type_.upper())
+ self.logDebug('Search for %s Container links' % type.upper())
- if not type_.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
- self.error(_("unknown container type: %s") % type_)
+ if not type.isalnum(): # check to prevent broken re-pattern (cnl2,rsdf,ccf,dlc,web are all alpha-numeric)
+ self.fail(_("Unknown container type: %s") % type) #@TODO: Replace with self.error in 0.4.10
for line in self.container_html:
- if(type_ in line):
+ if type in line:
jseval = self.handle_javascript(line)
clink = re.search(r'href=["\']([^"\']*?)["\']',jseval,re.I)
@@ -267,8 +252,8 @@ class LinkCryptWs(Crypter):
self.logDebug("clink avaible")
package_name, folder_name = self.getPackageInfo()
- self.logDebug("Added package with name %s.%s and container link %s" %( package_name, type_, clink.group(1)))
- self.core.api.uploadContainer( "%s.%s" %(package_name, type_), self.load(clink.group(1)))
+ self.logDebug("Added package with name %s.%s and container link %s" %( package_name, type, clink.group(1)))
+ self.core.api.uploadContainer( "%s.%s" %(package_name, type), self.load(clink.group(1)))
return "Found it"
return package_links
@@ -281,7 +266,7 @@ class LinkCryptWs(Crypter):
cnl_line = None
for line in self.container_html:
- if("cnl" in line):
+ if "cnl" in line:
cnl_line = line
break
diff --git a/pyload/plugins/hoster/OneFichierCom.py b/pyload/plugins/hoster/OneFichierCom.py
index b30358ff8..6e04776b5 100644
--- a/pyload/plugins/hoster/OneFichierCom.py
+++ b/pyload/plugins/hoster/OneFichierCom.py
@@ -8,7 +8,7 @@ from pyload.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class OneFichierCom(SimpleHoster):
__name__ = "OneFichierCom"
__type__ = "hoster"
- __version__ = "0.72"
+ __version__ = "0.73"
__pattern__ = r'https?://(?:www\.)?(?:(?P<ID1>\w+)\.)?(?P<HOST>1fichier\.com|alterupload\.com|cjoint\.net|d(es)?fichiers\.com|dl4free\.com|megadl\.fr|mesfichiers\.org|piecejointe\.net|pjointe\.com|tenvoi\.com)(?:/\?(?P<ID2>\w+))?'
diff --git a/pyload/plugins/internal/SimpleCrypter.py b/pyload/plugins/internal/SimpleCrypter.py
index 460084919..227c93da6 100644
--- a/pyload/plugins/internal/SimpleCrypter.py
+++ b/pyload/plugins/internal/SimpleCrypter.py
@@ -13,7 +13,7 @@ from pyload.utils import fixup, html_unescape
class SimpleCrypter(Crypter):
__name__ = "SimpleCrypter"
__type__ = "crypter"
- __version__ = "0.29"
+ __version__ = "0.30"
__pattern__ = r'^unmatchable$'
__config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config['general']['folder_per_package']
@@ -76,12 +76,16 @@ class SimpleCrypter(Crypter):
if self.LOGIN_PREMIUM and not self.premium:
self.fail(_("Required premium account not found"))
+ self.req.setOption("timeout", 120)
+
if isinstance(self.COOKIES, list):
set_cookies(self.req.cj, self.COOKIES)
self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS)
- self.html = self.load(self.pyfile.url, decode=not self.TEXT_ENCODING, cookies=bool(self.COOKIES))
+
+ def preload(self):
+ self.html = self.load(self.pyfile.url, cookies=bool(self.COOKIES), decode=not self.TEXT_ENCODING)
if isinstance(self.TEXT_ENCODING, basestring):
self.html = unicode(self.html, self.TEXT_ENCODING)
@@ -90,11 +94,12 @@ class SimpleCrypter(Crypter):
def decrypt(self, pyfile):
self.prepare()
+ self.preload()
+
if self.html is None:
self.fail(_("No html retrieved"))
- if not self.info:
- self.getFileInfo()
+ self.checkInfo()
self.links = self.getLinks()
@@ -107,13 +112,8 @@ class SimpleCrypter(Crypter):
self.packages = [(self.info['name'], self.links, self.info['folder'])]
- def getFileInfo(self):
- name, size, status, url = parseFileInfo(self)
-
- if name and name != url:
- self.pyfile.name = name
- else:
- self.pyfile.name = self.info['name'] = urlparse(html_unescape(name)).path.split("/")[-1]
+ def checkStatus(self):
+ status = self.info['status']
if status is 1:
self.offline()
@@ -121,10 +121,31 @@ class SimpleCrypter(Crypter):
elif status is 6:
self.tempOffline()
+
+ def checkName(self):
+ name = self.info['name']
+ url = self.info['url']
+
+ if name and name != url:
+ self.pyfile.name = name
+ else:
+ self.pyfile.name = self.info['name'] = urlparse(html_unescape(name)).path.split('/')[-1]
+
self.info['folder'] = self.pyfile.name
- self.logDebug("FILE NAME: %s" % self.pyfile.name)
- return self.info
+ self.logDebug("File name: %s" % self.pyfile.name)
+
+
+ def checkInfo(self):
+ self.logDebug(_("File info (previous): %s") % self.info)
+
+ info = parseFileInfo(self.pyfile.url, self.html or "")
+ self.info.update(info)
+
+ self.logDebug(_("File info (current): %s") % self.info)
+
+ self.checkName()
+ self.checkStatus()
def getLinks(self):
diff --git a/pyload/plugins/internal/SimpleHoster.py b/pyload/plugins/internal/SimpleHoster.py
index afd61d821..0efcb9c6f 100644
--- a/pyload/plugins/internal/SimpleHoster.py
+++ b/pyload/plugins/internal/SimpleHoster.py
@@ -7,6 +7,7 @@ from urlparse import urlparse
from pycurl import FOLLOWLOCATION
+from pyload.datatype.PyFile import statusMap
from pyload.network.CookieJar import CookieJar
from pyload.network.RequestFactory import getURL
from pyload.plugins.internal.Hoster import Hoster
@@ -57,122 +58,50 @@ def parseHtmlForm(attr_str, html, input_names=None):
continue
elif hasattr(val, "search") and re.match(val, inputs[key]):
continue
- break # attibute value does not match
+ break #: attibute value does not match
else:
- break # attibute name does not match
+ break #: attibute name does not match
else:
- return action, inputs # passed attribute check
+ return action, inputs #: passed attribute check
else:
# no attribute check
return action, inputs
- return {}, None # no matching form found
+ return {}, None #: no matching form found
-def parseFileInfo(self, url="", html=""):
- if not url and hasattr(self, "pyfile"):
- url = self.pyfile.url
+def parseFileInfo(plugin, url="", html=""):
+ info = plugin.getInfo(url, html)
+ return info['name'], info['size'], info['status'], info['url']
- info = {'name': url, 'size': 0, 'status': 3}
- if not html:
- if url:
- return create_getInfo(self)([url]).next()
-
- elif hasattr(self, "req") and self.req.http.code == '404':
- info['status'] = 1
-
- elif hasattr(self, "html"):
- html = self.html
-
- if html:
- if hasattr(self, "OFFLINE_PATTERN") and re.search(self.OFFLINE_PATTERN, html):
- info['status'] = 1
-
- elif hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, html):
- info['status'] = 6
-
- else:
- online = False
- try:
- info.update(re.match(self.__pattern__, url).groupdict())
- except:
- pass
-
- for pattern in ("INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN"):
- try:
- info.update(re.search(getattr(self, pattern), html).groupdict())
- online = True
- except AttributeError:
- continue
-
- if online:
- # File online, return name and size
- info['status'] = 2
+def create_getInfo(plugin):
+ return lambda urls: list(plugin.parseInfo(urls))
- if 'N' in info:
- info['name'] = replace_patterns(info['N'].strip(), self.NAME_REPLACEMENTS)
- if 'S' in info:
- size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'], self.SIZE_REPLACEMENTS)
- info['size'] = parseFileSize(size)
+def timestamp():
+ return int(time() * 1000)
- elif isinstance(info['size'], basestring):
- unit = info['units'] if 'units' in info else None
- info['size'] = parseFileSize(info['size'], unit)
- if hasattr(self, "html") and self.html is None:
- self.html = html
+#@TODO: Move to hoster class in 0.4.10
+def _getDirectLink(self, url):
+ self.req.http.c.setopt(FOLLOWLOCATION, 0)
- if hasattr(self, "info"):
- try:
- self.logDebug(_("File info (before update): %s") % self.info)
- except:
- pass
+ html = self.load(url, ref=True, decode=True)
- self.info.update(info)
+ self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ if self.getInfo(url, html)['status'] is not 2:
try:
- self.logDebug(_("File info (after update): %s") % self.info)
+ return re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I).group(1).rstrip() #@TODO: Remove .rstrip() in 0.4.10
except:
pass
- return info['name'], info['size'], info['status'], url
-
-
-def create_getInfo(plugin):
-
- def getInfo(urls):
- for url in urls:
- if hasattr(plugin, "COOKIES") and isinstance(plugin.COOKIES, list):
- cj = CookieJar(plugin.__name__)
- set_cookies(cj, plugin.COOKIES)
- else:
- cj = None
-
- if hasattr(plugin, "URL_REPLACEMENTS"):
- url = replace_patterns(url, plugin.URL_REPLACEMENTS)
-
- if hasattr(plugin, "TEXT_ENCODING"):
- html = getURL(url, cookies=bool(cj), decode=not plugin.TEXT_ENCODING)
- if isinstance(plugin.TEXT_ENCODING, basestring):
- html = unicode(html, plugin.TEXT_ENCODING)
- else:
- html = getURL(url, cookies=bool(cj), decode=True)
-
- yield parseFileInfo(plugin, url, html)
-
- return getInfo
-
-
-def timestamp():
- return int(time() * 1000)
-
class SimpleHoster(Hoster):
__name__ = "SimpleHoster"
__type__ = "hoster"
- __version__ = "0.54"
+ __version__ = "0.56"
__pattern__ = r'^unmatchable$'
@@ -217,13 +146,74 @@ class SimpleHoster(Hoster):
SIZE_REPLACEMENTS = []
URL_REPLACEMENTS = []
- TEXT_ENCODING = False #: Set to True or encoding name if encoding in http header is not correct
- COOKIES = True #: or False or list of tuples [(domain, name, value)]
+ TEXT_ENCODING = False #: Set to True or encoding name if encoding value in http header is not correct
+ COOKIES = True #: or False or list of tuples [(domain, name, value)]
FORCE_CHECK_TRAFFIC = False #: Set to True to force checking traffic left for premium account
+ CHECK_DIRECT_LINK = None #: Set to None self-set to True if self.account else to False
+ CONTENT_DISPOSITION = False #: Set to True to replace file name with content-disposition value in http header
+
+
+ @classmethod
+ def parseInfo(cls, urls):
+ for url in urls:
+ url = replace_patterns(url, cls.URL_REPLACEMENTS)
+ yield cls.getInfo(cls, url)
+
+
+ @classmethod
+ def getInfo(cls, url="", html=""):
+ info = {'name': url or _("Unknown"), 'size': 0, 'status': 3, 'url': url}
+
+ if not html:
+ if url:
+ html = getURL(url, cookies=cls.COOKIES, decode=not cls.TEXT_ENCODING)
+ if isinstance(cls.TEXT_ENCODING, basestring):
+ html = unicode(html, cls.TEXT_ENCODING)
+ else:
+ return info
+
+ online = False
+
+ if hasattr(cls, "OFFLINE_PATTERN") and re.search(cls.OFFLINE_PATTERN, html):
+ info['status'] = 1
+
+ elif hasattr(cls, "TEMP_OFFLINE_PATTERN") and re.search(cls.TEMP_OFFLINE_PATTERN, html):
+ info['status'] = 6
+
+ else:
+ try:
+ info.update(re.match(cls.__pattern__, url).groupdict())
+ except:
+ pass
+
+ for pattern in ("INFO_PATTERN", "NAME_PATTERN", "SIZE_PATTERN"):
+ try:
+ attr = getattr(cls, pattern)
+ info.update(re.search(attr, html).groupdict())
+ except AttributeError:
+ continue
+ else:
+ online = True
+
+ if online:
+ info['status'] = 2
+
+ if 'N' in info:
+ info['name'] = replace_patterns(info['N'].strip(), cls.NAME_REPLACEMENTS)
+
+ if 'S' in info:
+ size = replace_patterns(info['S'] + info['U'] if 'U' in info else info['S'], cls.SIZE_REPLACEMENTS)
+ info['size'] = parseFileSize(size)
+
+ elif isinstance(info['size'], basestring):
+ unit = info['units'] if 'units' in info else None
+ info['size'] = parseFileSize(info['size'], unit)
+
+ return info
def init(self):
- self.info = {}
+ self.link = "" #@TODO: Move to hoster class in 0.4.10
def setup(self):
@@ -231,90 +221,117 @@ class SimpleHoster(Hoster):
def prepare(self):
- if isinstance(self.COOKIES, list):
- set_cookies(self.req.cj, self.COOKIES)
+ if self.CHECK_DIRECT_LINK is None:
+ self.CHECK_DIRECT_LINK = bool(self.account)
self.req.setOption("timeout", 120)
+ if isinstance(self.COOKIES, list):
+ set_cookies(self.req.cj, self.COOKIES)
+
self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS)
- if self.premium:
- self.logDebug(_("Looking for direct download link..."))
- direct_link = self.getDirectLink(self.pyfile.url)
- if direct_link:
- return direct_link
- else:
- self.logDebug(_("No direct download link found"))
- self.html = self.load(self.pyfile.url, decode=not self.TEXT_ENCODING, cookies=bool(self.COOKIES))
+ def preload(self):
+ self.html = self.load(self.pyfile.url, cookies=bool(self.COOKIES), decode=not self.TEXT_ENCODING)
if isinstance(self.TEXT_ENCODING, basestring):
self.html = unicode(self.html, self.TEXT_ENCODING)
def process(self, pyfile):
- direct_link = self.prepare()
+ self.prepare()
- if isinstance(direct_link, basestring):
- self.logInfo(_("Direct download link detected"))
- self.download(direct_link, ref=True, cookies=True, disposition=True)
+ if self.CHECK_DIRECT_LINK:
+ self.logDebug("Looking for direct download link...")
+ self.handleDirect()
- elif self.html is None:
- self.fail(_("No html retrieved"))
+ if not self.link:
+ self.preload()
+
+ #@TODO: Remove in 0.4.10
+ if self.html is None:
+ self.fail(_("No html retrieved"))
+
+ info = self.getInfo(pyfile.url, self.html)
+ self._updateInfo(info)
+
+ self.checkNameSize()
- else:
premium_only = hasattr(self, 'PREMIUM_ONLY_PATTERN') and re.search(self.PREMIUM_ONLY_PATTERN, self.html)
if not premium_only: #: Usually premium only pages doesn't show any file information
- self.getFileInfo()
+ self.checkStatus()
if self.premium and (not self.FORCE_CHECK_TRAFFIC or self.checkTrafficLeft()):
- self.logDebug("Handle as premium download")
+ self.logDebug("Handled as premium download")
self.handlePremium()
+
elif premium_only:
self.fail(_("Link require a premium account to be handled"))
+
else:
- self.logDebug("Handle as free download")
+ self.logDebug("Handled as free download")
self.handleFree()
+ if self.link:
+ self.download(self.link, disposition=self.CONTENT_DISPOSITION)
- def getDirectLink(self, url):
- self.req.http.c.setopt(FOLLOWLOCATION, 0)
- html = self.load(url, ref=True, decode=True)
+ def checkStatus(self):
+ status = self.info['status']
- self.req.http.c.setopt(FOLLOWLOCATION, 1)
+ if status is 1:
+ self.offline()
- if parseFileInfo(self, url, html)[2] is not 2:
- try:
- return re.search(r'Location\s*:\s*(.+)', self.req.http.header, re.I).group(1)
- except:
- pass
+ elif status is 6:
+ self.tempOffline()
+
+ elif status is not 2:
+ self.error(_("File status: %s") % filter(lambda key, val: val == status, statusMap.iteritems())[0],
+ _("File info: %s") % self.info)
- def getFileInfo(self):
- name, size, status, url = parseFileInfo(self, html=self.html)
+ def checkNameSize(self):
+ name = self.info['name']
+ size = self.info['size']
+ url = self.info['url']
if name and name != url:
self.pyfile.name = name
else:
- self.pyfile.name = self.info['name'] = urlparse(html_unescape(name)).path.split("/")[-1]
+ self.pyfile.name = self.info['name'] = urlparse(html_unescape(name)).path.split('/')[-1]
- if status is 1:
- self.offline()
+ if size > 0:
+ self.pyfile.size = size
+ else:
+ self.logError(_("File size not found"))
- elif status is 6:
- self.tempOffline()
+ self.logDebug("File name: %s" % self.pyfile.name, "File size: %s" % self.pyfile.size or _("Unknown"))
- elif status is not 2:
- self.error(_("File info: %s") % self.info)
- if size:
- self.pyfile.size = size
- else:
- self.logError(_("File size not parsed"))
+ def checkInfo(self):
+ self._updateInfo(self.getInfo(self.pyfile.url, self.html or ""))
+ self.checkNameSize()
+ self.checkStatus()
+
+
+ def _updateInfo(self, info)
+ self.logDebug(_("File info (previous): %s") % self.info)
+ self.info.update(info)
+ self.logDebug(_("File info (current): %s") % self.info)
+
+
+ def handleDirect(self):
+ self.link = _getDirectLink(self, self.pyfile.url)
+
+ if self.link:
+ self.logInfo(_("Direct download link detected"))
+
+ self._updateInfo(self.getInfo(self.pyfile.url))
+ self.checkNameSize()
- self.logDebug("FILE NAME: %s" % self.pyfile.name, "FILE SIZE: %d" % self.pyfile.size or _("Unknown"))
- return self.info
+ else:
+ self.logDebug(_("Direct download link not found"))
def handleFree(self):
@@ -326,11 +343,10 @@ class SimpleHoster(Hoster):
if m is None:
self.error(_("Free download link not found"))
- link = m.group(1)
+ self.link = m.group(1)
+
except Exception, e:
self.fail(str(e))
- else:
- self.download(link, ref=True, cookies=True, disposition=True)
def handlePremium(self):
@@ -342,19 +358,18 @@ class SimpleHoster(Hoster):
if m is None:
self.error(_("Premium download link not found"))
- link = m.group(1)
+ self.link = m.group(1)
+
except Exception, e:
self.fail(str(e))
- else:
- self.download(link, ref=True, cookies=True, disposition=True)
def longWait(self, wait_time=None, max_tries=3):
if wait_time and isinstance(wait_time, (int, long, float)):
- time_str = "%dh %dm" % divmod(wait_time / 60, 60)
+ time_str = "%dh %dm" % divmod(wait_time / 60, 60)
else:
wait_time = 900
- time_str = _("(unknown time)")
+ time_str = _("(unknown time)")
max_tries = 100
self.logInfo(_("Download limit reached, reconnect or wait %s") % time_str)
diff --git a/pyload/plugins/internal/XFSHoster.py b/pyload/plugins/internal/XFSHoster.py
index 87ddfab3f..a9fe2d0ad 100644
--- a/pyload/plugins/internal/XFSHoster.py
+++ b/pyload/plugins/internal/XFSHoster.py
@@ -16,7 +16,7 @@ from pyload.utils import html_unescape
class XFSHoster(SimpleHoster):
__name__ = "XFSHoster"
__type__ = "hoster"
- __version__ = "0.15"
+ __version__ = "0.17"
__pattern__ = r'^unmatchable$'
@@ -41,7 +41,7 @@ class XFSHoster(SimpleHoster):
OFFLINE_PATTERN = r'>\s*\w+ (Not Found|file (was|has been) removed)'
TEMP_OFFLINE_PATTERN = r'>\s*\w+ server (is in )?(maintenance|maintainance)'
- WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>'
+ WAIT_PATTERN = r'<span id="countdown_str">.*?>(\d+)</span>|id="countdown" value=".*?(\d+).*?"'
OVR_LINK_PATTERN = r'<h2>Download Link</h2>\s*<textarea[^>]*>([^<]+)'
LINK_PATTERN = None #: final download url pattern
@@ -71,6 +71,9 @@ class XFSHoster(SimpleHoster):
pattern = r'(https?://(www\.)?([^/]*?%s|\d+\.\d+\.\d+\.\d+)(\:\d+)?(/d/|(/files)?/\d+/\w+/).+?)["\'<]'
self.LINK_PATTERN = pattern % self.HOSTER_DOMAIN.replace('.', '\.')
+ if self.CHECK_DIRECT_LINK is None:
+ self.CHECK_DIRECT_LINK = bool(self.premium)
+
self.captcha = None
self.errmsg = None
self.passwords = self.getPassword().splitlines()
@@ -122,8 +125,6 @@ class XFSHoster(SimpleHoster):
data = self.getPostParameters()
- # sleep(10)
-
self.req.http.c.setopt(FOLLOWLOCATION, 0)
self.html = self.load(self.pyfile.url, post=data, ref=True, decode=True)
@@ -266,7 +267,7 @@ class XFSHoster(SimpleHoster):
self.logDebug(inputs)
- if 'op' in inputs and inputs['op'] in ("download2", "download3"):
+ if 'op' in inputs and inputs['op'] in ("download1", "download2", "download3"):
if "password" in inputs:
if self.passwords:
inputs['password'] = self.passwords.pop(0)