summaryrefslogtreecommitdiffstats
path: root/module/plugins/hoster/FilefactoryCom.py
diff options
context:
space:
mode:
Diffstat (limited to 'module/plugins/hoster/FilefactoryCom.py')
-rw-r--r--module/plugins/hoster/FilefactoryCom.py139
1 files changed, 52 insertions, 87 deletions
diff --git a/module/plugins/hoster/FilefactoryCom.py b/module/plugins/hoster/FilefactoryCom.py
index 98a97121c..30c1b85ec 100644
--- a/module/plugins/hoster/FilefactoryCom.py
+++ b/module/plugins/hoster/FilefactoryCom.py
@@ -1,118 +1,83 @@
# -*- coding: utf-8 -*-
-############################################################################
-# This program is free software: you can redistribute it and/or modify #
-# it under the terms of the GNU Affero General Public License as #
-# published by the Free Software Foundation, either version 3 of the #
-# License, or (at your option) any later version. #
-# #
-# This program is distributed in the hope that it will be useful, #
-# but WITHOUT ANY WARRANTY; without even the implied warranty of #
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
-# GNU Affero General Public License for more details. #
-# #
-# You should have received a copy of the GNU Affero General Public License #
-# along with this program. If not, see <http://www.gnu.org/licenses/>. #
-############################################################################
import re
-from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
+from urlparse import urljoin
+
from module.network.RequestFactory import getURL
+from module.plugins.internal.SimpleHoster import SimpleHoster, parseFileInfo
def getInfo(urls):
for url in urls:
h = getURL(url, just_header=True)
m = re.search(r'Location: (.+)\r\n', h)
- if m and not re.match(m.group(1), FilefactoryCom.__pattern__): # It's a direct link! Skipping
+ if m and not re.match(m.group(1), FilefactoryCom.__pattern__): #: It's a direct link! Skipping
yield (url, 0, 3, url)
- else: # It's a standard html page
- file_info = parseFileInfo(FilefactoryCom, url, getURL(url))
- yield file_info
+ else: #: It's a standard html page
+ yield parseFileInfo(FilefactoryCom, url, getURL(url))
class FilefactoryCom(SimpleHoster):
- __name__ = "FilefactoryCom"
- __type__ = "hoster"
- __pattern__ = r'https?://(?:www\.)?filefactory\.com/file/(?P<id>[a-zA-Z0-9]+)'
- __version__ = "0.50"
+ __name__ = "FilefactoryCom"
+ __type__ = "hoster"
+ __version__ = "0.53"
+
+ __pattern__ = r'https?://(?:www\.)?filefactory\.com/(file|trafficshare/\w+)/\w+'
+
__description__ = """Filefactory.com hoster plugin"""
- __author_name__ = "stickell"
- __author_mail__ = "l.stickell@yahoo.it"
+ __license__ = "GPLv3"
+ __authors__ = [("stickell", "l.stickell@yahoo.it"),
+ ("Walter Purcaro", "vuolter@gmail.com")]
- FILE_INFO_PATTERN = r'<div id="file_name"[^>]*>\s*<h2>(?P<N>[^<]+)</h2>\s*<div id="file_info">\s*(?P<S>[\d.]+) (?P<U>\w+) uploaded'
- LINK_PATTERN = r'<a href="(https?://[^"]+)"[^>]*><i[^>]*></i> Download with FileFactory Premium</a>'
+
+ INFO_PATTERN = r'<div id="file_name"[^>]*>\s*<h2>(?P<N>[^<]+)</h2>\s*<div id="file_info">\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+) uploaded'
OFFLINE_PATTERN = r'<h2>File Removed</h2>|This file is no longer available'
- PREMIUM_ONLY_PATTERN = r'>Premium Account Required<'
- SH_COOKIES = [(".filefactory.com", "locale", "en_US.utf8")]
+ LINK_FREE_PATTERN = LINK_PREMIUM_PATTERN = r'"([^"]+filefactory\.com/get.+?)"'
+
+ WAIT_PATTERN = r'<div id="countdown_clock" data-delay="(\d+)">'
+ PREMIUM_ONLY_PATTERN = r'>Premium Account Required'
+ COOKIES = [("filefactory.com", "locale", "en_US.utf8")]
- def handleFree(self):
- self.html = self.load(self.pyfile.url, decode=True)
+
+ def handleFree(self, pyfile):
if "Currently only Premium Members can download files larger than" in self.html:
- self.fail("File too large for free download")
+ self.fail(_("File too large for free download"))
elif "All free download slots on this server are currently in use" in self.html:
- self.retry(50, 15 * 60, "All free slots are busy")
+ self.retry(50, 15 * 60, _("All free slots are busy"))
+
+ m = re.search(self.LINK_FREE_PATTERN, self.html)
+ if m is None:
+ self.error(_("Free download link not found"))
- m = re.search(r'data-href(?:-direct)?="(http://[^"]+)"', self.html)
+ dl_link = m.group(1)
+
+ m = re.search(self.WAIT_PATTERN, self.html)
if m:
- t = re.search(r'<div id="countdown_clock" data-delay="(\d+)">', self.html)
- if t:
- t = t.group(1)
- else:
- self.logDebug("Unable to detect countdown duration. Guessing 60 seconds")
- t = 60
- self.wait(t)
- direct = m.group(1)
- else: # This section could be completely useless now
- # Load the page that contains the direct link
- url = re.search(r"document\.location\.host \+\s*'(.+)';", self.html)
- if url is None:
- self.parseError('Unable to detect free link')
- url = 'http://www.filefactory.com' + url.group(1)
- self.html = self.load(url, decode=True)
-
- # Free downloads wait time
- waittime = re.search(r'id="startWait" value="(\d+)"', self.html)
- if not waittime:
- self.parseError('Unable to detect wait time')
- self.wait(int(waittime.group(1)))
-
- # Parse the direct link and download it
- direct = re.search(r'data-href(?:-direct)?="(.*)" class="button', self.html)
- if not direct:
- self.parseError('Unable to detect free direct link')
- direct = direct.group(1)
-
- self.logDebug('DIRECT LINK: ' + direct)
- self.download(direct, disposition=True)
-
- check = self.checkDownload({"multiple": "You are currently downloading too many files at once.",
- "error": '<div id="errorMessage">'})
+ self.wait(m.group(1))
+
+ self.download(dl_link, disposition=True)
+
+ check = self.checkDownload({'multiple': "You are currently downloading too many files at once.",
+ 'error' : '<div id="errorMessage">'})
if check == "multiple":
self.logDebug("Parallel downloads detected; waiting 15 minutes")
- self.retry(wait_time=15 * 60, reason="Parallel downloads")
+ self.retry(wait_time=15 * 60, reason=_("Parallel downloads"))
+
elif check == "error":
- self.fail("Unknown error")
-
- def handlePremium(self):
- header = self.load(self.pyfile.url, just_header=True)
- if 'location' in header:
- url = header['location'].strip()
- if not url.startswith("http://"):
- url = "http://www.filefactory.com" + url
- elif 'content-disposition' in header:
- url = self.pyfile.url
- else:
- self.logInfo('You could enable "Direct Downloads" on http://filefactory.com/account/')
- html = self.load(self.pyfile.url)
- m = re.search(self.LINK_PATTERN, html)
+ self.error(_("Unknown error"))
+
+
+ def handlePremium(self, pyfile):
+ self.link = self.directLink(self.load(pyfile.url, just_header=True))
+
+ if not self.link:
+ html = self.load(pyfile.url)
+ m = re.search(self.LINK_PREMIUM_PATTERN, html)
if m:
- url = m.group(1)
+ self.link = m.group(1)
else:
- self.parseError('Unable to detect premium direct link')
-
- self.logDebug('DIRECT PREMIUM LINK: ' + url)
- self.download(url, disposition=True)
+ self.error(_("Premium download link not found"))