diff options
Diffstat (limited to 'module/plugins/internal/SimpleCrypter.py')
-rw-r--r-- | module/plugins/internal/SimpleCrypter.py | 190 |
1 files changed, 121 insertions, 69 deletions
diff --git a/module/plugins/internal/SimpleCrypter.py b/module/plugins/internal/SimpleCrypter.py index dc93bef4d..b2622b5e0 100644 --- a/module/plugins/internal/SimpleCrypter.py +++ b/module/plugins/internal/SimpleCrypter.py @@ -2,117 +2,169 @@ import re +from urlparse import urljoin, urlparse + from module.plugins.Crypter import Crypter -from module.utils import html_unescape -from module.plugins.internal.SimpleHoster import PluginParseError, replace_patterns, set_cookies +from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo, replace_patterns, set_cookies +from module.utils import fixup -class SimpleCrypter(Crypter): - __name__ = "SimpleCrypter" - __version__ = "0.10" - __type__ = "crypter" +class SimpleCrypter(Crypter, SimpleHoster): + __name__ = "SimpleCrypter" + __type__ = "crypter" + __version__ = "0.42" - __pattern__ = None + __pattern__ = r'^unmatchable$' + __config__ = [("use_subfolder", "bool", "Save package to subfolder", True), #: Overrides core.config['general']['folder_per_package'] + ("subfolder_per_package", "bool", "Create a subfolder for each package", True)] __description__ = """Simple decrypter plugin""" - __author_name__ = ("stickell", "zoidberg", "Walter Purcaro") - __author_mail__ = ("l.stickell@yahoo.it", "zoidberg@mujmail.cz", "vuolter@gmail.com") + __license__ = "GPLv3" + __authors__ = [("stickell", "l.stickell@yahoo.it"), + ("zoidberg", "zoidberg@mujmail.cz"), + ("Walter Purcaro", "vuolter@gmail.com")] + """ Following patterns should be defined by each crypter: - LINK_PATTERN: group(1) must be a download link or a regex to catch more links - example: LINK_PATTERN = r'<div class="link"><a href="(http://speedload.org/\w+)' + LINK_PATTERN: Download link or regex to catch links in group(1) + example: LINK_PATTERN = r'<div class="link"><a href="(.+?)"' - TITLE_PATTERN: (optional) The group defined by 'title' should be the title - example: TITLE_PATTERN = r'<title>Files of: (?P<title>[^<]+) folder</title>' + NAME_PATTERN: (optional) folder name or page title + example: NAME_PATTERN = r'<title>Files of: (?P<N>[^<]+) folder</title>' - OFFLINE_PATTERN: (optional) Checks if the file is yet available online + OFFLINE_PATTERN: (optional) Checks if the page is unreachable example: OFFLINE_PATTERN = r'File (deleted|not found)' - TEMP_OFFLINE_PATTERN: (optional) Checks if the file is temporarily offline + TEMP_OFFLINE_PATTERN: (optional) Checks if the page is temporarily unreachable example: TEMP_OFFLINE_PATTERN = r'Server maintainance' - If it's impossible to extract the links using the LINK_PATTERN only you can override the getLinks method. + You can override the getLinks method if you need a more sophisticated way to extract the links. + + + If the links are splitted on multiple pages you can define the PAGES_PATTERN regex: - If the links are disposed on multiple pages you need to define a pattern: + PAGES_PATTERN: (optional) group(1) should be the number of overall pages containing the links + example: PAGES_PATTERN = r'Pages: (\d+)' - PAGES_PATTERN: The group defined by 'pages' must be the total number of pages - example: PAGES_PATTERN = r'Pages: (?P<pages>\d+)' + and its loadPage method: - and a function: - loadPage(self, page_n): - return the html of the page number 'page_n' + def loadPage(self, page_n): + return the html of the page number page_n """ - URL_REPLACEMENTS = [] + LINK_PATTERN = None - SH_COOKIES = True # or False or list of tuples [(domain, name, value)] + NAME_REPLACEMENTS = [("&#?\w+;", fixup)] + URL_REPLACEMENTS = [] + TEXT_ENCODING = False #: Set to True or encoding name if encoding in http header is not correct + COOKIES = True #: or False or list of tuples [(domain, name, value)] - def setup(self): - if isinstance(self.SH_COOKIES, list): - set_cookies(self.req.cj, self.SH_COOKIES) + LOGIN_ACCOUNT = False + LOGIN_PREMIUM = False - def decrypt(self, pyfile): - pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS) - self.html = self.load(pyfile.url, decode=True) + #@TODO: Remove in 0.4.10 + def init(self): + account_name = (self.__name__ + ".py").replace("Folder.py", "").replace(".py", "") + account = self.pyfile.m.core.accountManager.getAccountPlugin(account_name) + + if account and account.canUse(): + self.user, data = account.selectAccount() + self.req = account.getAccountRequest(self.user) + self.premium = account.isPremium(self.user) + + self.account = account + + + def prepare(self): + self.pyfile.error = "" #@TODO: Remove in 0.4.10 + + self.info = {} + self.html = "" + self.links = [] #@TODO: Move to hoster class in 0.4.10 + + if self.LOGIN_PREMIUM and not self.premium: + self.fail(_("Required premium account not found")) + + if self.LOGIN_ACCOUNT and not self.account: + self.fail(_("Required account not found")) + + self.req.setOption("timeout", 120) + + if isinstance(self.COOKIES, list): + set_cookies(self.req.cj, self.COOKIES) + + self.pyfile.url = replace_patterns(self.pyfile.url, self.URL_REPLACEMENTS) - self.checkOnline() - package_name, folder_name = self.getPackageNameAndFolder() + def decrypt(self, pyfile): + self.prepare() + + self.preload() + self.checkInfo() - self.package_links = self.getLinks() + self.links = self.getLinks() if hasattr(self, 'PAGES_PATTERN') and hasattr(self, 'loadPage'): - self.handleMultiPages() + self.handlePages(pyfile) + + self.logDebug("Package has %d links" % len(self.links)) + + if self.links: + self.packages = [(self.info['name'], self.links, self.info['folder'])] + + elif not self.urls and not self.packages: #@TODO: Remove in 0.4.10 + self.fail(_("No link grabbed")) + + + def checkNameSize(self, getinfo=True): + if not self.info or getinfo: + self.logDebug("File info (BEFORE): %s" % self.info) + self.info.update(self.getInfo(self.pyfile.url, self.html)) + self.logDebug("File info (AFTER): %s" % self.info) + + try: + url = self.info['url'] + name = self.info['name'] + if name and name != url: + self.pyfile.name = name - self.logDebug('Package has %d links' % len(self.package_links)) + except Exception: + pass + + try: + folder = self.info['folder'] = self.pyfile.name + + except Exception: + pass + + self.logDebug("File name: %s" % self.pyfile.name, + "File folder: %s" % self.pyfile.name) - if self.package_links: - self.packages = [(package_name, self.package_links, folder_name)] - else: - self.fail('Could not extract any links') def getLinks(self): """ Returns the links extracted from self.html You should override this only if it's impossible to extract links using only the LINK_PATTERN. """ - return re.findall(self.LINK_PATTERN, self.html) - - def checkOnline(self): - if hasattr(self, "OFFLINE_PATTERN") and re.search(self.OFFLINE_PATTERN, self.html): - self.offline() - elif hasattr(self, "TEMP_OFFLINE_PATTERN") and re.search(self.TEMP_OFFLINE_PATTERN, self.html): - self.tempOffline() - - def getPackageNameAndFolder(self): - if hasattr(self, 'TITLE_PATTERN'): - m = re.search(self.TITLE_PATTERN, self.html) - if m: - name = folder = html_unescape(m.group('title').strip()) - self.logDebug("Found name [%s] and folder [%s] in package info" % (name, folder)) - return name, folder - - name = self.pyfile.package().name - folder = self.pyfile.package().folder - self.logDebug("Package info not found, defaulting to pyfile name [%s] and folder [%s]" % (name, folder)) - return name, folder - - def handleMultiPages(self): - pages = re.search(self.PAGES_PATTERN, self.html) - if pages: - pages = int(pages.group('pages')) - else: + url_p = urlparse(self.pyfile.url) + baseurl = "%s://%s" % (url_p.scheme, url_p.netloc) + + return [urljoin(baseurl, link) if not urlparse(link).scheme else link \ + for link in re.findall(self.LINK_PATTERN, self.html)] + + + def handlePages(self, pyfile): + try: + pages = int(re.search(self.PAGES_PATTERN, self.html).group(1)) + except Exception: pages = 1 for p in xrange(2, pages + 1): self.html = self.loadPage(p) - self.package_links += self.getLinks() - - def parseError(self, msg): - raise PluginParseError(msg) + self.links += self.getLinks() |