diff options
30 files changed, 510 insertions, 196 deletions
@@ -19,13 +19,10 @@ module/config/gui.xml module/config/core.xml module/config/plugin.xml links.txt -module/links.pkl -module/cookies.txt ssl.crt ssl.key cert.pem module/web/pyload.db -webserver.pid *.svg *.prefs *.po @@ -35,4 +32,5 @@ pyload/* dist/* build/* setup.py -paver-minilib.zip
\ No newline at end of file +paver-minilib.zip +env/* diff --git a/module/Api.py b/module/Api.py index 9aa6f86bb..11072b2cd 100644 --- a/module/Api.py +++ b/module/Api.py @@ -733,8 +733,7 @@ class Api(Iface): :return: list of deleted package ids """ - deleted = self.core.files.deleteFinishedLinks() - return deleted + return self.core.files.deleteFinishedLinks() @permission(PERMS.MODIFY) def restartFailed(self): diff --git a/module/HookManager.py b/module/HookManager.py index 88ce62da6..25f84831e 100644 --- a/module/HookManager.py +++ b/module/HookManager.py @@ -208,11 +208,10 @@ class HookManager: self.dispatchEvent("coreExiting") @lock - def downloadStarts(self, pyfile): - #TODO: rename to downloadPreparing + def downloadPreparing(self, pyfile): for plugin in self.plugins: if plugin.isActivated(): - plugin.downloadStarts(pyfile) + plugin.downloadPreparing(pyfile) self.dispatchEvent("downloadPreparing", pyfile) @@ -265,10 +264,6 @@ class HookManager: self.dispatchEvent("afterReconnecting", ip) - @lock - def unrarFinished(self, folder, fname): - self.dispatchEvent("unrarFinished", folder, fname) - def startThread(self, function, *args, **kwargs): t = HookThread(self.core.threadManager, function, args, kwargs) diff --git a/module/PluginThread.py b/module/PluginThread.py index 03db68100..c4bdd59c8 100644 --- a/module/PluginThread.py +++ b/module/PluginThread.py @@ -183,7 +183,7 @@ class DownloadThread(PluginThread): self.m.log.info(_("Download starts: %s" % pyfile.name)) # start download - self.m.core.hookManager.downloadStarts(pyfile) + self.m.core.hookManager.downloadPreparing(pyfile) pyfile.plugin.preprocessing(self) self.m.log.info(_("Download finished: %s") % pyfile.name) diff --git a/module/database/FileDatabase.py b/module/database/FileDatabase.py index 91213d8a6..1df8998b0 100644 --- a/module/database/FileDatabase.py +++ b/module/database/FileDatabase.py @@ -848,7 +848,7 @@ class FileMethods(): @style.queue def deleteFinished(self): - self.c.execute("DELETE FROM links WHERE status=0") + self.c.execute("DELETE FROM links WHERE status IN (0,4)") self.c.execute("DELETE FROM packages WHERE NOT EXISTS(SELECT 1 FROM links WHERE packages.id=links.package)") diff --git a/module/network/Browser.py b/module/network/Browser.py index 822e2ed6d..23cf7666b 100644 --- a/module/network/Browser.py +++ b/module/network/Browser.py @@ -8,7 +8,6 @@ from HTTPDownload import HTTPDownload class Browser(object): - __slots__ = ("log", "options", "bucket", "cj", "_size", "http", "dl") def __init__(self, bucket=None, options={}): @@ -20,9 +19,14 @@ class Browser(object): self.cj = None # needs to be setted later self._size = 0 - self.http = HTTPRequest(self.cj, options) + self.renewHTTPRequest() self.dl = None + + def renewHTTPRequest(self): + if hasattr(self, "http"): self.http.close() + self.http = HTTPRequest(self.cj, self.options) + def setLastURL(self, val): self.http.lastURL = val @@ -80,7 +84,7 @@ class Browser(object): """ this can also download ftp """ self._size = 0 self.dl = HTTPDownload(url, filename, get, post, self.lastEffectiveURL if ref else None, - self.cj if cookies else None, self.bucket, self.options, progressNotify, disposition) + self.cj if cookies else None, self.bucket, self.options, progressNotify, disposition) name = self.dl.download(chunks, resume) self._size = self.dl.size @@ -96,6 +100,18 @@ class Browser(object): """ add a header to the request """ self.http.putHeader(name, value) + def addAuth(self, pwd): + """Adds user and pw for http auth + + :param pwd: string, user:password + """ + self.options["auth"] = pwd + self.renewHTTPRequest() #we need a new request + + def removeAuth(self): + if "auth" in self.options: del self.options["auth"] + self.renewHTTPRequest() + def clearHeaders(self): self.http.clearHeaders() diff --git a/module/network/HTTPChunk.py b/module/network/HTTPChunk.py index 680b982d3..69eedb19c 100644 --- a/module/network/HTTPChunk.py +++ b/module/network/HTTPChunk.py @@ -137,7 +137,7 @@ class HTTPChunk(HTTPRequest): self.fp = None #file handle self.initHandle() - self.setInterface(self.p.options["interface"], self.p.options["proxies"], self.p.options["ipv6"]) + self.setInterface(self.p.options) self.BOMChecked = False # check and remove byte order mark diff --git a/module/network/HTTPDownload.py b/module/network/HTTPDownload.py index 3edf56d98..1a2886332 100644 --- a/module/network/HTTPDownload.py +++ b/module/network/HTTPDownload.py @@ -136,8 +136,7 @@ class HTTPDownload(): #remove old handles for chunk in self.chunks: - self.m.remove_handle(chunk.c) - chunk.close() + self.closeChunk(chunk) return self._download(chunks, False) else: @@ -211,7 +210,7 @@ class HTTPDownload(): curl, errno, msg = c #test if chunk was finished, otherwise raise the exception if errno != 23 or "0 !=" not in msg: - raise + raise pycurl.error(errno, msg) #@TODO KeyBoardInterrupts are seen as finished chunks, #but normally not handled to this process, only in the testcase @@ -266,11 +265,18 @@ class HTTPDownload(): if self.progressNotify: self.progressNotify(self.percent) + def closeChunk(self, chunk): + try: + self.m.remove_handle(chunk.c) + except pycurl.error: + self.log.debug("Error removing chunk") + finally: + chunk.close() + def close(self): """ cleanup """ for chunk in self.chunks: - self.m.remove_handle(chunk.c) - chunk.close() + self.closeChunk(chunk) self.chunks = [] if hasattr(self, "m"): diff --git a/module/network/HTTPRequest.py b/module/network/HTTPRequest.py index d8d57e76f..6672a58e6 100644 --- a/module/network/HTTPRequest.py +++ b/module/network/HTTPRequest.py @@ -19,7 +19,7 @@ import pycurl -from codecs import getincrementaldecoder +from codecs import getincrementaldecoder, lookup, BOM_UTF8 from urllib import quote, urlencode from httplib import responses from logging import getLogger @@ -28,11 +28,12 @@ from cStringIO import StringIO from module.plugins.Plugin import Abort def myquote(url): - return quote(url, safe="%/:=&?~#+!$,;'@()*[]") + return quote(url, safe="%/:=&?~#+!$,;'@()*[]") + class BadHeader(Exception): def __init__(self, code, content=""): - Exception.__init__(self, "Bad server response: %s %s"% (code, responses[int(code)])) + Exception.__init__(self, "Bad server response: %s %s" % (code, responses[int(code)])) self.code = code self.content = content @@ -54,7 +55,7 @@ class HTTPRequest(): self.headers = [] #temporary request header self.initHandle() - self.setInterface(options["interface"], options["proxies"], options["ipv6"]) + self.setInterface(options) self.c.setopt(pycurl.WRITEFUNCTION, self.write) self.c.setopt(pycurl.HEADERFUNCTION, self.writeHeader) @@ -77,16 +78,21 @@ class HTTPRequest(): #self.c.setopt(pycurl.VERBOSE, 1) - self.c.setopt(pycurl.USERAGENT, "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0") + self.c.setopt(pycurl.USERAGENT, + "Mozilla/5.0 (Windows NT 6.1; Win64; x64;en; rv:5.0) Gecko/20110619 Firefox/5.0") if pycurl.version_info()[7]: self.c.setopt(pycurl.ENCODING, "gzip, deflate") self.c.setopt(pycurl.HTTPHEADER, ["Accept: */*", - "Accept-Language: en-US,en", - "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7", - "Connection: keep-alive", - "Keep-Alive: 300"]) + "Accept-Language: en-US,en", + "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7", + "Connection: keep-alive", + "Keep-Alive: 300", + "Expect:"]) + + def setInterface(self, options): + + interface, proxy, ipv6 = options["interface"], options["proxies"], options["ipv6"] - def setInterface(self, interface, proxy, ipv6=False): if interface and interface.lower() != "none": self.c.setopt(pycurl.INTERFACE, str(interface)) @@ -97,7 +103,7 @@ class HTTPRequest(): self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_SOCKS5) else: self.c.setopt(pycurl.PROXYTYPE, pycurl.PROXYTYPE_HTTP) - + self.c.setopt(pycurl.PROXY, str(proxy["address"])) self.c.setopt(pycurl.PROXYPORT, proxy["port"]) @@ -109,6 +115,9 @@ class HTTPRequest(): else: self.c.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) + if "auth" in options: + self.c.setopt(pycurl.USERPWD, str(options["auth"])) + def addCookies(self): """ put cookies from curl handle to cj """ if self.cj: @@ -145,14 +154,14 @@ class HTTPRequest(): pass else: post = urlencode(post) - + self.c.setopt(pycurl.POSTFIELDS, post) else: - post = [(x, str(quote(y)) if type(y) in (str, unicode) else y ) for x,y in post.iteritems()] + post = [(x, str(quote(y)) if type(y) in (str, unicode) else y ) for x, y in post.iteritems()] self.c.setopt(pycurl.HTTPPOST, post) else: self.c.setopt(pycurl.POST, 0) - + if referer and self.lastURL: self.c.setopt(pycurl.REFERER, str(self.lastURL)) @@ -198,7 +207,7 @@ class HTTPRequest(): def verifyHeader(self): """ raise an exceptions on bad headers """ code = int(self.c.getinfo(pycurl.RESPONSE_CODE)) - if code in range(400,404) or code in range(405,418) or code in range(500,506): + if code in range(400, 404) or code in range(405, 418) or code in range(500, 506): #404 will NOT raise an exception raise BadHeader(code, self.getResponse()) return code @@ -218,7 +227,7 @@ class HTTPRequest(): for line in header: line = line.lower().replace(" ", "") - if not line.startswith("content-type:") or \ + if not line.startswith("content-type:") or\ ("text" not in line and "application" not in line): continue @@ -230,6 +239,9 @@ class HTTPRequest(): try: #self.log.debug("Decoded %s" % encoding ) + if lookup(encoding).name == 'utf-8' and rep.startswith(BOM_UTF8): + encoding = 'utf-8-sig' + decoder = getincrementaldecoder(encoding)("replace") rep = decoder.decode(rep, True) diff --git a/module/network/RequestFactory.py b/module/network/RequestFactory.py index 774249a70..5b1528281 100644 --- a/module/network/RequestFactory.py +++ b/module/network/RequestFactory.py @@ -54,9 +54,11 @@ class RequestFactory(): self.lock.release() return req - def getHTTPRequest(self): + def getHTTPRequest(self, **kwargs): """ returns a http request, dont forget to close it ! """ - return HTTPRequest(CookieJar(None), self.getOptions()) + options = self.getOptions() + options.update(kwargs) # submit kwargs as additional options + return HTTPRequest(CookieJar(None), options) def getURL(self, *args, **kwargs): """ see HTTPRequest for argument list """ diff --git a/module/plugins/Hook.py b/module/plugins/Hook.py index 51ebd1aec..5efd08bae 100644 --- a/module/plugins/Hook.py +++ b/module/plugins/Hook.py @@ -129,7 +129,7 @@ class Hook(Base): def coreExiting(self): pass - def downloadStarts(self, pyfile): + def downloadPreparing(self, pyfile): pass def downloadFinished(self, pyfile): diff --git a/module/plugins/accounts/HellshareCz.py b/module/plugins/accounts/HellshareCz.py index eb44ee47f..fc44e9307 100644 --- a/module/plugins/accounts/HellshareCz.py +++ b/module/plugins/accounts/HellshareCz.py @@ -13,7 +13,7 @@ You should have received a copy of the GNU General Public License along with this program; if not, see <http://www.gnu.org/licenses/>. - + @author: zoidberg """ @@ -22,33 +22,33 @@ import re class HellshareCz(Account): __name__ = "HellshareCz" - __version__ = "0.1" + __version__ = "0.11" __type__ = "account" __description__ = """hellshare.cz account plugin""" __author_name__ = ("zoidberg") __author_mail__ = ("zoidberg@mujmail.cz") - - CREDIT_LEFT_PATTERN = r'<a class="button-amount-1" href="/buy-credit/" title="Your current credit">([^<]+)</a>' + + CREDIT_LEFT_PATTERN = r'<div class="credit-link">\s*<table>\s*<tr>\s*<th>(\d+)</th>' def loadAccountInfo(self, user, req): self.relogin(user) html = req.load("http://www.hellshare.com/") - + found = re.search(self.CREDIT_LEFT_PATTERN, html) if found is None: credits = 0 else: credits = int(found.group(1)) * 1024 - + return {"validuntil": -1, "trafficleft": credits} - + def login(self, user, data, req): - + html = req.load('http://www.hellshare.com/login?do=loginForm-submit', post={ "login": "Log in", "password": data["password"], "username": user }) - + if "<p>You input a wrong user name or wrong password</p>" in html: - self.wrongPassword() + self.wrongPassword()
\ No newline at end of file diff --git a/module/plugins/crypter/FilesonicComFolder.py b/module/plugins/crypter/FilesonicComFolder.py new file mode 100644 index 000000000..7bf1df381 --- /dev/null +++ b/module/plugins/crypter/FilesonicComFolder.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import re +from module.plugins.Crypter import Crypter + +class FilesonicComFolder(Crypter): + __name__ = "FilesonicComFolder" + __type__ = "crypter" + __pattern__ = r"http://(\w*\.)?(sharingmatrix|filesonic|wupload)\.[^/]*/folder/\d+/?" + __version__ = "0.10" + __description__ = """Filesonic.com/Wupload.com Folder Plugin""" + __author_name__ = ("zoidberg") + __author_mail__ = ("zoidberg@mujmail.cz") + + FOLDER_PATTERN = r'<table>\s*<caption>Files Folder</caption>(.*?)</table>' + LINK_PATTERN = r'<a href="([^"]+)">' + + def decrypt(self, pyfile): + html = self.load(self.pyfile.url) + + new_links = [] + + folder = re.search(self.FOLDER_PATTERN, html, re.DOTALL) + if not folder: self.fail("Parse error (FOLDER)") + + new_links.extend(re.findall(self.LINK_PATTERN, folder.group(1))) + + if new_links: + self.core.files.addLinks(new_links, self.pyfile.package().id) + else: + self.fail('Could not extract any links')
\ No newline at end of file diff --git a/module/plugins/crypter/LinkdecrypterCom.py b/module/plugins/crypter/LinkdecrypterCom.py index befbfe4e6..e2fc55150 100644 --- a/module/plugins/crypter/LinkdecrypterCom.py +++ b/module/plugins/crypter/LinkdecrypterCom.py @@ -22,7 +22,7 @@ from module.plugins.Crypter import Crypter class LinkdecrypterCom(Crypter): __name__ = "LinkdecrypterCom" __type__ = "crypter" - __pattern__ = r"http://(\w*\.)?(10001mb\.com|123link\.it|1cl\.in|1kh\.de|1zh\.us|2joy\.de|2so\.be|3\.ly|5\.gp|6nc\.net|7li\.in|9\.bb|adf\.ly|adflav\.com|adfoc\.us|allanalpass\.com|alturl\.com|amy\.gs|any\.gs|apurl\.ru|aurl\.es|b23\.ru|baberepublic\.com|bat5\.com|bax\.li|beam\.to|bit\.ly|blu\.cc|c\.ly|capourl\.com|cc\.st|cd\.vg|cloneurl\.com|convertircodigo\.com|crypt-it\.com|crypt\.to|cryptlink\.ws|deb\.gs|digzip\.com|djurl\.com|dl-protect\.com|dl\.dropbox\.com|doiop\.com|ehe\.me|embedupload\.com|encript\.in|encurtador\.com|enlacs\.com|evg\.in|extreme-protect\.com|fa\.by|faja\.me|fapoff\.com|fdnlinks\.com|fea\.me|fff\.to|filedeck\.net|filemirrorupload\.com|fileupster\.com|flameupload\.com|freetexthost\.com|fwd4\.me|fyad\.org|goandgrab\.info|goblig\.com|goo\.gl|h-url\.in|hasurl\.co\.cc|hide-url\.net|hidemyass\.com|hides\.at|hideurl\.biz|ho\.io|hornywood\.tv|href\.hu|id2\.tryjav\.com|ilix\.in|ily\.me|ino\.me|interupload\.com|is\.gd|ivpaste\.com|j\.mp|je\.pl|jheberg\.com|just\.as|kickupload\.com|klnk\.de|knoffl\.com|kodo\.ameoto\.com|ks\.gs|latwy\.pl|link-go\.info|link-protector\.com|link-safe\.net|link4jo\.com|linkanonimo\.com|linkbabes\.com|linkbank\.eu|linkbee\.com|linkblur\.com|linkbucks\.com|linkcrypt\.com|linkcrypt\.ws|linkencrypter\.com|linkhide\.com\.ar|linkhide\.in|linkoculto\.net|linkok\.org|linkprivado\.com|linkprivate\.net|linkprotect\.in|links-protect\.com|links-protect\.info|links\.tc|linksafe\.me|linksaver\.info|linkse\.info|linkseguro\.com\.ar|linkseguro\.org|linksole\.com|linksprotegidos\.info|linkto\.net|linkweb\.dk|linkx\.in|linkzip\.net|listedfiles\.com|littleurl\.net|lixk\.me|ljv2\.com|ll11\.org|lnk\.cm|lnk\.co|longr\.us|lovelink\.in|mcaf\.ee|megaline\.co|megaupper\.com|mhz\.me|migre\.me|miniurls\.co|minu\.me|mir\.cr|mirrorcreator\.com|mo\.by|multi-uploadeur\.com|multiupload\.com|murl\.kz|musicalmente\.info|mypaqe\.com|mypl\.us|myrapidlinks\.com|myref\.de|myurl\.in|nbanews\.us|okconsolas\.com|oneddl\.canhaz\.it|ow\.ly|p4p\.com\.es|p6l\.org|paste\.frubar\.net|paste\.hotfile-bb\.com|paste\.ubuntu\.com|paste2\.org|paste21\.info|pastebin\.com|paylesssofts\.net|poontown\.net|pqueno\.com|priva\.us|protec-link\.com|protect-ddl\.com|protect-my-links\.com|protected\.socadvnet\.com|protectlinks\.com|protectlinks\.net|protectlk\.com|protege-mes-liens\.com|ptl\.li|qooy\.com|qqc\.co|qvvo\.com|rapidfolder\.com|rapidsafe\.de|rapidsafe\.org|rapidshare\.mu|realfiles\.net|redir\.ec|ref\.so|relinka\.net|rexwo\.com|rqq\.co|rs-layer\.com|rsmonkey\.com|s2l\.biz|saf\.li|safe\.mn|safelinking\.net|saferlinks\.com|sealed\.in|seclnk\.in|seriousfiles\.com|sharebee\.com|short-link\.fr|shortlink\.ca|shorturlscript\.net|simurl\.com|sinl\.es|skroc\.pl|slexy\.org|slnky\.net|smsdl\.com|sn\.im|sonofertas\.es|spedr\.com|spreadlink\.us|star-cyber\.com|subedlc\.com|subirfacil\.com|syl\.me|szort\.pl|takemyfile\.com|takemylinks\.com|textsnip\.com|thecow\.me|thesefiles\.com|tilien\.net|tiny\.cc|tiny\.lt|tinylinks\.co|tinypaste\.com|tinyurl\.com|tinyurlscript\.info|tmf\.myegy\.com|togoto\.us|tot\.to|tra\.kz|u\.to|uberpicz\.com|ulinks\.net|ultra-protect\.com|ultrafiles\.net|undeadlink\.com|uploadjockey\.com|uploadmirrors\.com|uploadonall\.com|upmirror\.com|upsafe\.org|ur\.ly|url-go\.com|url-site\.com|url4t\.com|urla\.in|urlbeat\.net|urlcash\.net|urlcrypt\.com|urlcut\.com|urlcut\.in|urldefender\.com|urln\.tk|urlpulse\.net|urlspy\.co\.cc|urwij|uselink\.info|uucc\.cc|uze\.in|wcrypt\.in|webtooljungle\.com|weepax\.com|whackyvidz\.com|x-ls\.ru|x\.co|xa\.ly|xc\.io|xr\.com|xtreemhost\.com|xurl\.cn|xxs\.ru|ysu\.me|yyv\.co|zff\.co|zio\.in|zpag\.es)/.*" + __pattern__ = r"http://(\w*\.)?(10001mb\.com|123link\.it|1cl\.in|1kh\.de|1zh\.us|2joy\.de|2so\.be|3\.ly|5\.gp|6nc\.net|7li\.in|9\.bb|adf\.ly|adflav\.com|adfoc\.us|allanalpass\.com|alturl\.com|amy\.gs|any\.gs|apurl\.ru|aurl\.es|b23\.ru|baberepublic\.com|bat5\.com|bax\.li|beam\.to|bit\.ly|blu\.cc|c\.ly|capourl\.com|cc\.st|cd\.vg|cloneurl\.com|convertircodigo\.com|crypt-it\.com|crypt\.to|cryptlink\.ws|deb\.gs|digzip\.com|djurl\.com|dl-protect\.com|doiop\.com|ehe\.me|embedupload\.com|encript\.in|encurtador\.com|enlacs\.com|evg\.in|extreme-protect\.com|fa\.by|faja\.me|fapoff\.com|fdnlinks\.com|fea\.me|fff\.to|filedeck\.net|filemirrorupload\.com|fileupster\.com|flameupload\.com|freetexthost\.com|fwd4\.me|fyad\.org|goandgrab\.info|goblig\.com|goo\.gl|h-url\.in|hasurl\.co\.cc|hide-url\.net|hidemyass\.com|hides\.at|hideurl\.biz|ho\.io|hornywood\.tv|href\.hu|id2\.tryjav\.com|ilix\.in|ily\.me|ino\.me|interupload\.com|is\.gd|ivpaste\.com|j\.mp|je\.pl|jheberg\.com|just\.as|kickupload\.com|klnk\.de|knoffl\.com|kodo\.ameoto\.com|ks\.gs|latwy\.pl|link-go\.info|link-protector\.com|link-safe\.net|link4jo\.com|linkanonimo\.com|linkbabes\.com|linkbank\.eu|linkbee\.com|linkblur\.com|linkbucks\.com|linkcrypt\.com|linkcrypt\.ws|linkencrypter\.com|linkhide\.com\.ar|linkhide\.in|linkoculto\.net|linkok\.org|linkprivado\.com|linkprivate\.net|linkprotect\.in|links-protect\.com|links-protect\.info|links\.tc|linksafe\.me|linksaver\.info|linkse\.info|linkseguro\.com\.ar|linkseguro\.org|linksole\.com|linksprotegidos\.info|linkto\.net|linkweb\.dk|linkx\.in|linkzip\.net|listedfiles\.com|littleurl\.net|lixk\.me|ljv2\.com|ll11\.org|lnk\.cm|lnk\.co|longr\.us|lovelink\.in|mcaf\.ee|megaline\.co|megaupper\.com|mhz\.me|migre\.me|miniurls\.co|minu\.me|mir\.cr|mirrorcreator\.com|mo\.by|multi-uploadeur\.com|multiupload\.com|murl\.kz|musicalmente\.info|mypaqe\.com|mypl\.us|myrapidlinks\.com|myref\.de|myurl\.in|nbanews\.us|okconsolas\.com|oneddl\.canhaz\.it|ow\.ly|p4p\.com\.es|p6l\.org|paste\.frubar\.net|paste\.hotfile-bb\.com|paste\.ubuntu\.com|paste2\.org|paste21\.info|pastebin\.com|paylesssofts\.net|poontown\.net|pqueno\.com|priva\.us|protec-link\.com|protect-ddl\.com|protect-my-links\.com|protected\.socadvnet\.com|protectlinks\.com|protectlinks\.net|protectlk\.com|protege-mes-liens\.com|ptl\.li|qooy\.com|qqc\.co|qvvo\.com|rapidfolder\.com|rapidsafe\.de|rapidsafe\.org|rapidshare\.mu|realfiles\.net|redir\.ec|ref\.so|relinka\.net|rexwo\.com|rqq\.co|rs-layer\.com|rsmonkey\.com|s2l\.biz|saf\.li|safe\.mn|safelinking\.net|saferlinks\.com|sealed\.in|seclnk\.in|seriousfiles\.com|sharebee\.com|short-link\.fr|shortlink\.ca|shorturlscript\.net|simurl\.com|sinl\.es|skroc\.pl|slexy\.org|slnky\.net|smsdl\.com|sn\.im|sonofertas\.es|spedr\.com|spreadlink\.us|star-cyber\.com|subedlc\.com|subirfacil\.com|syl\.me|szort\.pl|takemyfile\.com|takemylinks\.com|textsnip\.com|thecow\.me|thesefiles\.com|tilien\.net|tiny\.cc|tiny\.lt|tinylinks\.co|tinypaste\.com|tinyurl\.com|tinyurlscript\.info|tmf\.myegy\.com|togoto\.us|tot\.to|tra\.kz|u\.to|uberpicz\.com|ulinks\.net|ultra-protect\.com|ultrafiles\.net|undeadlink\.com|uploadjockey\.com|uploadmirrors\.com|uploadonall\.com|upmirror\.com|upsafe\.org|ur\.ly|url-go\.com|url-site\.com|url4t\.com|urla\.in|urlbeat\.net|urlcash\.net|urlcrypt\.com|urlcut\.com|urlcut\.in|urldefender\.com|urln\.tk|urlpulse\.net|urlspy\.co\.cc|urwij|uselink\.info|uucc\.cc|uze\.in|wcrypt\.in|webtooljungle\.com|weepax\.com|whackyvidz\.com|x-ls\.ru|x\.co|xa\.ly|xc\.io|xr\.com|xtreemhost\.com|xurl\.cn|xxs\.ru|ysu\.me|yyv\.co|zff\.co|zio\.in|zpag\.es)/.*" __version__ = "0.2" __description__ = """linkdecrypter.com""" __author_name__ = ("zoidberg") @@ -80,4 +80,4 @@ class LinkdecrypterCom(Crypter): else: self.fail('Could not extract any links') -
\ No newline at end of file + diff --git a/module/plugins/hooks/ExternalScripts.py b/module/plugins/hooks/ExternalScripts.py index e8d929b71..2e77f1dae 100644 --- a/module/plugins/hooks/ExternalScripts.py +++ b/module/plugins/hooks/ExternalScripts.py @@ -79,7 +79,7 @@ class ExternalScripts(Hook): except Exception, e: self.logError(_("Error in %(script)s: %(error)s") % { "script" :basename(script), "error": str(e)}) - def downloadStarts(self, pyfile): + def downloadPreparing(self, pyfile): for script in self.scripts['download_preparing']: self.callScript(script, pyfile.pluginname, pyfile.url, pyfile.id) diff --git a/module/plugins/hooks/ExtractArchive.py b/module/plugins/hooks/ExtractArchive.py index d48496c14..74721d166 100644 --- a/module/plugins/hooks/ExtractArchive.py +++ b/module/plugins/hooks/ExtractArchive.py @@ -66,6 +66,9 @@ class WrongPassword(Exception): class ExtractArchive(Hook): + """ + Provides: unrarFinished (folder, filename) + """ __name__ = "ExtractArchive" __version__ = "0.1" __description__ = "Extract different kind of archives" @@ -95,6 +98,14 @@ class ExtractArchive(Hook): names.append(p) self.plugins.append(klass) + except OSError, e: + if e.errno == 2: + self.logInfo(_("No %s installed") % p) + else: + self.logWarning(_("Could not activate %s") % p, str(e)) + if self.core.debug: + print_exc() + except Exception, e: self.logWarning(_("Could not activate %s") % p, str(e)) if self.core.debug: @@ -140,6 +151,7 @@ class ExtractArchive(Hook): #iterate packages -> plugins -> targets for pid in ids: p = self.core.files.getPackage(pid) + self.logInfo(_("Check package %s") % p.name) if not p: continue # determine output folder @@ -148,7 +160,8 @@ class ExtractArchive(Hook): if self.getConfig("destination") and self.getConfig("destination").lower() != "none": if exists(self.getConfig("destination")): - out = save_join(self.getConfig("destination"), "") + out = save_join(dl, p.folder, self.getConfig("destination"), "") + #relative to package folder if destination is relative, otherwise absolute path overwrites them files_ids = [(save_join(dl, p.folder, x["name"]), x["id"]) for x in p.getChildren().itervalues()] @@ -158,7 +171,7 @@ class ExtractArchive(Hook): for plugin in self.plugins: targets = plugin.getTargets(files_ids) - self.logDebug("Targets: %s" % targets) + if targets: self.logDebug("Targets: %s" % targets) for target, fid in targets: if target in extracted: self.logDebug(basename(target), "skipped") @@ -201,12 +214,12 @@ class ExtractArchive(Hook): else: self.logInfo(basename(plugin.file), _("Password protected")) self.logDebug("Passwords: %s" % str(passwords)) - + pwlist = copy(self.getPasswords()) #remove already supplied pws from list (only local) for pw in passwords: if pw in pwlist: pwlist.remove(pw) - + for pw in passwords + pwlist: try: self.logDebug("Try password: %s" % pw) @@ -233,7 +246,7 @@ class ExtractArchive(Hook): else: self.logDebug("%s does not exists" % f) self.logInfo(basename(plugin.file), _("Extracting finished")) - self.core.hookManager.unrarFinished(plugin.out, plugin.file) + self.manager.dispatchEvent("unrarFinished", plugin.out, plugin.file) return plugin.getExtractedFiles() @@ -327,7 +340,7 @@ class AbtractExtractor: def __init__(self, m, file, out, fullpath, overwrite, renice): - """Initialize extractor for specifiy file + """Initialize extractor for specific file :param m: ExtractArchive Hook plugin :param file: Absolute filepath diff --git a/module/plugins/hoster/BasePlugin.py b/module/plugins/hoster/BasePlugin.py index 71c61942f..15e35ce24 100644 --- a/module/plugins/hoster/BasePlugin.py +++ b/module/plugins/hoster/BasePlugin.py @@ -1,21 +1,24 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- +from urlparse import urlparse +from re import search +from urllib import unquote -import re +from module.network.HTTPRequest import BadHeader from module.plugins.Hoster import Hoster -from module.utils import html_unescape +from module.utils import html_unescape, removeChars class BasePlugin(Hoster): __name__ = "BasePlugin" __type__ = "hoster" __pattern__ = r"^unmatchable$" - __version__ = "0.11" + __version__ = "0.14" __description__ = """Base Plugin when any other didnt fit""" __author_name__ = ("RaNaN") __author_mail__ = ("RaNaN@pyload.org") def setup(self): - self.chunkLimit = 3 + self.chunkLimit = -1 self.resumeDownload = True def process(self, pyfile): @@ -39,8 +42,48 @@ class BasePlugin(Hoster): # return if pyfile.url.startswith("http"): - pyfile.name = html_unescape(re.findall("([^/=]+)", pyfile.url)[-1]) - self.download(pyfile.url, disposition=True) - + try: + self.downloadFile(pyfile) + except BadHeader, e: + if e.code in (401, 403): + self.logDebug("Auth required") + + pwd = pyfile.package().password.strip() + if ":" not in pwd: + self.fail(_("Authorization required (username:password)")) + + self.req.addAuth(pwd) + self.downloadFile(pyfile) + else: + raise + + else: + self.fail("No Plugin matched and not a downloadable url.") + + + def downloadFile(self, pyfile): + header = self.load(pyfile.url, just_header = True) + #self.logDebug(header) + + if 'location' in header: + self.logDebug("Location: " + header['location']) + url = unquote(header['location']) else: - self.fail("No Plugin matched and not a downloadable url.")
\ No newline at end of file + url = pyfile.url + + name = html_unescape(urlparse(url).path.split("/")[-1]) + + if 'content-disposition' in header: + self.logDebug("Content-Disposition: " + header['content-disposition']) + m = search("filename(?P<type>=|\*=(?P<enc>.+)'')(?P<name>.*)", header['content-disposition']) + if m: + disp = m.groupdict() + self.logDebug(disp) + if not disp['enc']: disp['enc'] = 'utf-8' + name = removeChars(disp['name'], "\"';").strip() + name = unicode(unquote(name), disp['enc']) + + if not name: name = url + pyfile.name = name + self.logDebug("Filename: %s" % pyfile.name) + self.download(url, disposition=True)
\ No newline at end of file diff --git a/module/plugins/hoster/DepositfilesCom.py b/module/plugins/hoster/DepositfilesCom.py index 4f2cc9fc4..81e6aa4d6 100644 --- a/module/plugins/hoster/DepositfilesCom.py +++ b/module/plugins/hoster/DepositfilesCom.py @@ -4,37 +4,64 @@ import re import urllib from module.plugins.Hoster import Hoster +from module.network.RequestFactory import getURL +from module.plugins.ReCaptcha import ReCaptcha + +def getInfo(urls): + result = [] + + for url in urls: + html = getURL(re.sub(r"\.com(/.*?)?/files", ".com/en/files", url), decode=True) + if re.search(DepositfilesCom.FILE_OFFLINE_PATTERN, html): + # File offline + result.append((url, 0, 1, url)) + else: + # Get file info + name, size = url, 0 + + found = re.search(DepositfilesCom.FILE_INFO_PATTERN, html) + if found is not None: + name, size, units = found.groups() + size = float(size) * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[units] + result.append((name, size, 2, url)) + yield result class DepositfilesCom(Hoster): __name__ = "DepositfilesCom" __type__ = "hoster" __pattern__ = r"http://[\w\.]*?depositfiles\.com(/\w{1,3})?/files/[\w]+" - __version__ = "0.32" + __version__ = "0.34" __description__ = """Depositfiles.com Download Hoster""" - __author_name__ = ("spoob") - __author_mail__ = ("spoob@pyload.org") + __author_name__ = ("spoob", "zoidberg") + __author_mail__ = ("spoob@pyload.org", "zoidberg@mujmail.cz") + + FILE_INFO_PATTERN = r'File name: <b title="([^"]+)">.*\s*<span class="nowrap">File size: <b>([0-9.]+) (KB|MB|GB)</b>' + FILE_OFFLINE_PATTERN = r'<span class="html_download_api-not_exists"></span>' + RECAPTCHA_PATTERN = r"Recaptcha.create\('([^']+)', this\);" + DOWNLOAD_LINK_PATTERN = r'<form action="(http://.+?\.depositfiles.com/.+?)" method="get"' def setup(self): self.resumeDownload = self.multiDL = True if self.account else False - self.pyfile.url = re.sub(r"\.com(/.*?)?/files", ".com/de/files", self.pyfile.url) - + self.pyfile.url = re.sub(r"\.com(/.*?)?/files", ".com/en/files", self.pyfile.url) + def process(self, pyfile): if re.search(r"(.*)\.html", self.pyfile.url): self.pyfile.url = re.search(r"(.*)\.html", self.pyfile.url).group(1) - self.html = self.load(self.pyfile.url, cookies=True if self.account else False) - - if '<span class="html_download_api-not_exists"></span>' in self.html: + self.html = self.load(self.pyfile.url, cookies=True if self.account else False, decode = True) + + if self.FILE_OFFLINE_PATTERN in self.html: self.offline() - - - - return_url = self.req.lastEffectiveURL.split("/", 3)[3] - self.html = self.load(r'http://depositfiles.com/switch_lang.php?return_url=%s&lang=de' % return_url) - pyfile.name = re.search('(?s)Dateiname: <b title=\"(.*?)\">.*?</b>', self.html).group(1) + pyfile.name, size, units = re.search(self.FILE_INFO_PATTERN, self.html).groups() + pyfile.size = float(size) * 1024 ** {'KB': 1, 'MB': 2, 'GB': 3}[units] + self.logDebug ("FILENAME: %s" % pyfile.name) + #return_url = self.req.lastEffectiveURL.split("/", 3)[3] + #self.html = self.load(r'http://depositfiles.com/switch_lang.php?return_url=%s&lang=en' % return_url) + + #pyfile.name = re.search('(?s)Dateiname: <b title=\"(.*?)\">.*?</b>', self.html).group(1) if self.account: self.handlePremium() @@ -50,7 +77,7 @@ class DepositfilesCom(Hoster): self.setWait(61) self.wait() self.retry() - + wait = re.search(r'html_download_api-limit_interval\">(\d+)</span>', self.html) if wait: wait_time = int(wait.group(1)) @@ -70,20 +97,44 @@ class DepositfilesCom(Hoster): if wait: self.setWait(int(wait.group(1))) - self.wait() - - form = re.search(r"\$\('#download_container'\)\.load\('([^']+)", self.html) + found = re.search(r"var fid = '(\w+)';", self.html) + if not found: self.retry(wait_time=5) + params = {'fid' : found.group(1)} + self.logDebug ("FID: %s" % params['fid']) - self.html = self.load("http://depositfiles.com/"+ form.group(1)) + captcha_key = None + found = re.search(self.RECAPTCHA_PATTERN, self.html) + if found: captcha_key = found.group(1) + self.logDebug ("CAPTCHA_KEY: %s" % captcha_key) - link = urllib.unquote(re.search('<form action="(http://.+?\.depositfiles.com/.+?)" method="get"', self.html).group(1)) - self.download(link) + self.wait() + recaptcha = ReCaptcha(self) + + for i in range(5): + self.html = self.load("http://depositfiles.com/get_file.php", get = params) + if '<input type=button value="Continue" onclick="check_recaptcha' in self.html: + if not captcha_key: self.fail('Parse error (Captcha key)') + if 'response' in params: self.invalidCaptcha() + params['challenge'], params['response'] = recaptcha.challenge(captcha_key) + self.logDebug(params) + continue + + found = re.search(self.DOWNLOAD_LINK_PATTERN, self.html) + if found: + if 'response' in params: self.correctCaptcha() + link = urllib.unquote(found.group(1)) + self.logDebug ("LINK: %s" % link) + break + else: + self.fail('Parse error (Download link)') + else: + self.fail('No valid captcha response received') - - #wait_time = int(re.search(r'<span id="download_waiter_remain">(.*?)</span>', self.html).group(1)) - #self.setWait(wait_time) - #self.log.debug("DepositFiles.com: Waiting %d seconds." % wait_time) + try: + self.download(link) + except: + self.retry(wait_time = 60) def handlePremium(self): link = urllib.unquote(re.search('<div id="download_url">\s*<a href="(http://.+?\.depositfiles.com/.+?)"', self.html).group(1)) - self.download(link) + self.download(link)
\ No newline at end of file diff --git a/module/plugins/hoster/HellshareCz.py b/module/plugins/hoster/HellshareCz.py index 7ff5e5367..8c90e8099 100644 --- a/module/plugins/hoster/HellshareCz.py +++ b/module/plugins/hoster/HellshareCz.py @@ -31,94 +31,94 @@ def getInfo(urls): result.append((url, 0, 1, url)) else: # Get file info + found = re.search(HellshareCz.FILE_SIZE_PATTERN, html) + if found is not None: + size, units = found.groups() + size = float(size) * 1024 ** {'kB': 1, 'KB': 1, 'MB': 2, 'GB': 3}[units] + found = re.search(HellshareCz.FILE_NAME_PATTERN, html) if found is not None: name = found.group(1) + + if found or size > 0: result.append((name, 0, 2, url)) yield result - class HellshareCz(Hoster): __name__ = "HellshareCz" __type__ = "hoster" __pattern__ = r"http://(.*\.)*hellshare\.(cz|com|sk|hu)/.*" - __version__ = "0.6" + __version__ = "0.71" __description__ = """Hellshare.cz""" __author_name__ = ("zoidberg") - FREE_URL_PATTERN = r'<a id="button-download-free" href="([^"]*)"' - PREMIUM_URL_PATTERN = r'<a onclick="return launchFullDownload[^>]*href="(http://[^/]+/([^/]+)/[^"]+)" target="full-download-iframe">' - FILE_NAME_PATTERN = r'<strong id="FileName_master">([^<]+)</strong>' - FILE_OFFLINE_PATTERN = r'<h1>Soubor nenalezen</h1>' - SERVER_PATTERN = r'<form method="post" action="([^"]+)">' - CAPTCHA_PATTERN = r'<p class="text-center marg-off"><img id="captcha-img" class="va-middle" src="([^"]+)"' - FILE_CREDITS_PATTERN = r'<strong style="font-size:20px;">(\d+)\s*credits</strong>' - CREDIT_LEFT_PATTERN = r'<a class="button-amount-1" href="[^>]*/buy-credit/" title="Your current credit">([^<]+)</a>' - DOWNLOAD_AGAIN_PATTERN = r'Soubor jste ji. stahoval - opakovan. download prob.hne zdarma. Pokra.ovat' + FREE_URL_PATTERN = r'<h3>I\'ll wait.*\s*<form action="([^"]*)"' + PREMIUM_URL_PATTERN = r"launchFullDownload\('([^']*)'\);" + FILE_NAME_PATTERN = r'<h1 id="filename">([^<]+)</h1>' + FILE_SIZE_PATTERN = r'<td><span>Size</span></td>\s*<th><span>([0-9.]*) (kB|KB|MB|GB)</span></th>' + FILE_OFFLINE_PATTERN = r'<h1>File not found.</h1>' + CAPTCHA_PATTERN = r'<img class="left" id="captcha-img"src="([^"]*)" />' + FILE_CREDITS_PATTERN = r'<strong class="filesize">(\d+) MB</strong>' + CREDIT_LEFT_PATTERN = r'<p>After downloading this file you will have (\d+) MB for future downloads.' + DOWNLOAD_AGAIN_PATTERN = r'<p>This file you downloaded already and re-download is for free. </p>' def setup(self): self.resumeDownload = self.multiDL = True if self.account else False self.chunkLimit = 1 def process(self, pyfile): - if self.premium and self.account is not None: + if self.account: self.account.relogin(self.user) - self.getFileInfo(pyfile) + + pyfile.url = re.search(r'([^?]*)', pyfile.url).group(1) + self.html = self.load(pyfile.url, get = {"do" : "fileDownloadButton-showDownloadWindow"}, decode=True) + self.getFileInfo(pyfile) + + if self.account: self.handlePremium() else: - self.getFileInfo(pyfile) self.handleFree() def getFileInfo(self, pyfile): - self.html = self.load(pyfile.url, decode=True) - #marks the file as "offline" when the pattern was found on the html-page if re.search(self.FILE_OFFLINE_PATTERN, self.html) is not None: self.offline() - # parse the name from the site and set attribute in pyfile + # parse the name from the site and set attribute in pyfile found = re.search(self.FILE_NAME_PATTERN, self.html) if found is None: self.fail("Parse error (Filename") pyfile.name = found.group(1) - def handleFree(self): - # parse free download url - found = re.search(self.FREE_URL_PATTERN, self.html) - if found is None: - self.fail("Parse error (URL)") - - parsed_url = found.group(1) + found = re.search(self.FILE_SIZE_PATTERN, self.html) + if found is not None: + size, units = found.groups() + pyfile.size = float(size) * 1024 ** {'kB': 1, 'KB': 1, 'MB': 2, 'GB': 3}[units] - if parsed_url == "": + def handleFree(self): + # hellshare is very generous + if "You exceeded your today's limit for free download. You can download only 1 files per 24 hours." in self.html: t = datetime.datetime.today().replace(hour=1, minute=0, second=0) + datetime.timedelta( days=1) - datetime.datetime.today() self.setWait(t.seconds, True) self.wait() self.retry() - # get download ticket and parse html - self.logDebug("PARSED_URL:" + parsed_url) - self.html = self.load(parsed_url) - - found = re.search(self.SERVER_PATTERN, self.html) - if found is None: - self.fail("Parse error (Server)") - download_url = found.group(1) + # parse free download url + found = re.search(self.FREE_URL_PATTERN, self.html) + if found is None: self.fail("Parse error (URL)") + parsed_url = found.group(1) + self.logDebug("Free URL: %s" % parsed_url) + # decrypt captcha found = re.search(self.CAPTCHA_PATTERN, self.html) - if found is None: - self.fail("Parse error (Captcha)") + if found is None: self.fail("Parse error (Captcha)") captcha_url = found.group(1) - # get and decrypt captcha captcha = self.decryptCaptcha(captcha_url) self.logDebug('CAPTCHA_URL:' + captcha_url + ' CAPTCHA:' + captcha) - # download the file, destination is determined by pyLoad - self.download(download_url, post={ - "captcha": captcha - }) + self.download(parsed_url, post = {"captcha" : captcha, "submit" : "Download"}) # check download check = self.checkDownload({ @@ -130,32 +130,31 @@ class HellshareCz(Hoster): self.retry() def handlePremium(self): - found = re.search(self.FILE_CREDITS_PATTERN, self.html) - if found is None: - self.fail("Parse error (Credits)") - file_credits = int(found.group(1)) - - found = re.search(self.CREDIT_LEFT_PATTERN, self.html) - if found is None: - self.fail("Parse error (Credits left)") - credits_left = int(found.group(1)) + # get premium download url + found = re.search(self.PREMIUM_URL_PATTERN, self.html) + if found is None: self.fail("Parse error (URL)") + download_url = found.group(1) - self.logInfo("Premium download for %i credits" % file_credits) - self.logInfo("User %s has %i credits left" % (self.user, credits_left)) + # check credit + if self.DOWNLOAD_AGAIN_PATTERN in self.html: + self.logInfo("Downloading again for free") + else: + found = re.search(self.CREDIT_LEFT_PATTERN, self.html) + if not found: + self.fail("Not enough credit left. Trying to download as free user.") + self.resetAccount() + credits_left = int(found.group(1)) - if file_credits > credits_left and not re.search(self.DOWNLOAD_AGAIN_PATTERN, self.html): - self.resetAccount() + found = re.search(self.FILE_CREDITS_PATTERN, self.html) + if found: + self.file_credits = found.group(1) + else: + self.logError("Parse error: file credits") + self.file_credits = "???" - found = re.search(self.PREMIUM_URL_PATTERN, self.html) - if found is None: - self.fail("Parse error (URL)") - download_url = found.group(1) + self.logInfo("Downloading file for %s credits, %d credits left" % (self.file_credits, credits_left)) self.download(download_url) info = self.account.getAccountInfo(self.user, True) - self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"] / 1024)) - - - - + self.logInfo("User %s has %i credits left" % (self.user, info["trafficleft"] / 1024))
\ No newline at end of file diff --git a/module/plugins/hoster/IfolderRu.py b/module/plugins/hoster/IfolderRu.py index 063ebfbff..c0e45a87a 100644 --- a/module/plugins/hoster/IfolderRu.py +++ b/module/plugins/hoster/IfolderRu.py @@ -45,14 +45,14 @@ class IfolderRu(Hoster): __name__ = "IfolderRu" __type__ = "hoster" __pattern__ = r"http://(?:\w*\.)?ifolder.ru/(\d+).*" - __version__ = "0.3" + __version__ = "0.31" __description__ = """ifolder.ru""" __author_name__ = ("zoidberg") __author_mail__ = ("zoidberg@mujmail.cz") - FILE_NAME_PATTERN = ur'^\s*(?:<div><span>)?Название:(?:</span>)? <b>([^<]+)</b><(?:/div|br)>' - FILE_SIZE_PATTERN = ur'^\s*(?:<div><span>)?Размер:(?:</span>)? <b>([0-9.]+) ([^<]+)</b><(?:/div|br)>' + FILE_NAME_PATTERN = ur'(?:<div><span>)?Название:(?:</span>)? <b>([^<]+)</b><(?:/div|br)>' + FILE_SIZE_PATTERN = ur'(?:<div><span>)?Размер:(?:</span>)? <b>([0-9.]+) ([^<]+)</b><(?:/div|br)>' SESSION_ID_PATTERN = r'<a href=(http://ints.ifolder.ru/ints/sponsor/\?bi=\d*&session=([^&]+)&u=[^>]+)>' FORM1_PATTERN = r'<form method=post name="form1" ID="Form1" style="margin-bottom:200px">(.*?)</form>' FORM_INPUT_PATTERN = r'<input[^>]* name="?([^" ]+)"? value="?([^" ]+)"?[^>]*>' diff --git a/module/plugins/hoster/LoadTo.py b/module/plugins/hoster/LoadTo.py new file mode 100644 index 000000000..b1204cb2d --- /dev/null +++ b/module/plugins/hoster/LoadTo.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +""" + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, + or (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + See the GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, see <http://www.gnu.org/licenses/>. + + @author: halfman +""" + +import re +from module.plugins.Hoster import Hoster +from module.network.RequestFactory import getURL + +def getInfo(urls): + result = [] + + for url in urls: + + html = getURL(url, decode=True) + if re.search(LoadTo.FILE_OFFLINE_PATTERN, html): + # File offline + result.append((url, 0, 1, url)) + else: + # Get file info + name = re.search(LoadTo.FILE_NAME_PATTERN, html) + size = re.search(LoadTo.SIZE_PATTERN, html) + if name is not None: + name = name.group(1) + size = size.group(1) + result.append((name, size, 2, url)) + yield result + +class LoadTo(Hoster): + __name__ = "LoadTo" + __type__ = "hoster" + __pattern__ = r"http://.*load.to/.*" + __version__ = "0.1" + __description__ = """load.to""" + __author_name__ = ("halfman") + __author_mail__ = ("Pulpan3@gmail.com") + + FILE_NAME_PATTERN = r'<div class="toolarge"><h1>([^<]+)</h1></div>' + URL_PATTERN = r'<form method="post" action="([^"]+)"' + SIZE_PATTERN = r'<div class="download_table_right">(\d+) Bytes</div>' + FILE_OFFLINE_PATTERN = r'Can\'t find file. Please check URL.<br />' + WAIT_PATTERN = r'type="submit" value="Download \((\d+)\)"' + + def setup(self): + self.multiDL = False + + def process(self, pyfile): + + self.html = self.load(pyfile.url, decode=True) + + if re.search(self.FILE_OFFLINE_PATTERN, self.html): + self.offline() + + timmy = re.search(self.WAIT_PATTERN, self.html) + if timmy: + self.setWait(timmy.group(1)) + self.wait() + + found = re.search(self.FILE_NAME_PATTERN, self.html) + if found is None: + self.fail("Parse error (NAME)") + pyfile.name = found.group(1) + + found = re.search(self.URL_PATTERN, self.html) + if found is None: + self.fail("Parse error (URL)") + download_url = found.group(1) + + self.download(download_url)
\ No newline at end of file diff --git a/module/plugins/hoster/RapidshareCom.py b/module/plugins/hoster/RapidshareCom.py index c9aba7898..0d927c525 100644 --- a/module/plugins/hoster/RapidshareCom.py +++ b/module/plugins/hoster/RapidshareCom.py @@ -50,7 +50,7 @@ class RapidshareCom(Hoster): __name__ = "RapidshareCom" __type__ = "hoster" __pattern__ = r"https?://[\w\.]*?rapidshare.com/(?:files/(?P<id>\d*?)/(?P<name>[^?]+)|#!download\|(?:\w+)\|(?P<id_new>\d+)\|(?P<name_new>[^|]+))" - __version__ = "1.36" + __version__ = "1.37" __description__ = """Rapidshare.com Download Hoster""" __config__ = [["server", "Cogent;Deutsche Telekom;Level(3);Level(3) #2;GlobalCrossing;Level(3) #3;Teleglobe;GlobalCrossing #2;TeliaSonera #2;Teleglobe #2;TeliaSonera #3;TeliaSonera", "Preferred Server", "None"]] __author_name__ = ("spoob", "RaNaN", "mkaay") @@ -103,6 +103,8 @@ class RapidshareCom(Hoster): elif self.api_data["status"] in ("0","4","5"): self.offline() + elif self.api_data["status"] == "3": + self.tempOffline() else: self.fail("Unknown response code.") diff --git a/module/plugins/hoster/ShareRapidCom.py b/module/plugins/hoster/ShareRapidCom.py index ce1912d38..46818a84c 100644 --- a/module/plugins/hoster/ShareRapidCom.py +++ b/module/plugins/hoster/ShareRapidCom.py @@ -4,6 +4,7 @@ import re from pycurl import HTTPHEADER from module.network.RequestFactory import getRequest +from module.network.HTTPRequest import BadHeader from module.plugins.Hoster import Hoster def getInfo(urls): @@ -41,8 +42,8 @@ def getInfo(urls): class ShareRapidCom(Hoster): __name__ = "ShareRapidCom" __type__ = "hoster" - __pattern__ = r"http://(?:www\.)?((share(-?rapid\.(biz|com|cz|info|eu|net|org|pl|sk)|-(central|credit|free|net)\.cz|-ms\.net)|(s-?rapid|rapids)\.(cz|sk))|(e-stahuj|mediatack|premium-rapidshare|rapidshare-premium|qiuck)\.cz|kadzet\.com|stahuj-zdarma\.eu|strelci\.net|universal-share\.com)/.*" - __version__ = "0.4" + __pattern__ = r"http://(?:www\.)?((share(-?rapid\.(biz|com|cz|info|eu|net|org|pl|sk)|-(central|credit|free|net)\.cz|-ms\.net)|(s-?rapid|rapids)\.(cz|sk))|(e-stahuj|mediatack|premium-rapidshare|rapidshare-premium|qiuck)\.cz|kadzet\.com|stahuj-zdarma\.eu|strelci\.net|universal-share\.com)/(stahuj/.+)" + __version__ = "0.42" __description__ = """Share-rapid.com plugin - premium only""" __author_name__ = ("MikyWoW", "zoidberg") __author_mail__ = ("MikyWoW@seznam.cz", "zoidberg@mujmail.cz") @@ -61,14 +62,20 @@ class ShareRapidCom(Hoster): def process(self, pyfile): if not self.account: self.fail("User not logged in") + url = "http://share-rapid.com/" + re.search(self.__pattern__, pyfile.url).groups()[-1] + self.logDebug("URL: " + url) - self.html = self.load(pyfile.url, decode=True) + try: + self.html = self.load(url, decode=True) + except BadHeader, e: + self.account.relogin(self.user) + self.retry(3, 0, str(e)) + size, units = re.search(self.FILE_SIZE_PATTERN, self.html).groups() pyfile.size = float(size) * 1024 ** {'kB': 1, 'MB': 2, 'GB': 3}[units] found = re.search(self.DOWNLOAD_URL_PATTERN, self.html) if found is not None: - self.logDebug(found) link, pyfile.name = found.groups() self.logInfo("Downloading file: %s (%s %s)" % (pyfile.name, size, units)) self.logInfo("Premium link: %s" % link) diff --git a/module/plugins/internal/UnRar.py b/module/plugins/internal/UnRar.py index 3f27c2e9e..1943f69e0 100644 --- a/module/plugins/internal/UnRar.py +++ b/module/plugins/internal/UnRar.py @@ -18,16 +18,14 @@ """ import os +import re from os.path import join from glob import glob from subprocess import Popen, PIPE - from module.plugins.hooks.ExtractArchive import AbtractExtractor from module.utils import save_join, decode -import re - class UnRar(AbtractExtractor): __name__ = "UnRar" __version__ = "0.1" @@ -36,16 +34,25 @@ class UnRar(AbtractExtractor): re_splitfile = re.compile(r"(.*)\.part(\d+)\.rar$") re_filelist = re.compile(r"(.+)\s+(\d+)\s+(\d+)\s+") re_wrongpwd = re.compile("(Corrupt file or wrong password|password incorrect)") + CMD = "unrar" @staticmethod def checkDeps(): if os.name == "nt": - cmd = join(pypath, "UnRAR.exe") + UnRar.CMD = join(pypath, "UnRAR.exe") + p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) + p.communicate() else: - cmd = "unrar" + try: + p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) + p.communicate() + except OSError: + + #fallback to rar + UnRar.CMD = "rar" + p = Popen([UnRar.CMD], stdout=PIPE, stderr=PIPE) + p.communicate() - p = Popen([cmd], stdout=PIPE, stderr=PIPE) - p.communicate() return True @staticmethod @@ -129,7 +136,7 @@ class UnRar(AbtractExtractor): def getDeleteFiles(self): if ".part" in self.file: - return glob(self.file.replace("0", "*").replace("1", "*")) + return glob(re.sub("(?<=\.part)([01]+)", "*", self.file, re.IGNORECASE)) return [self.file] def listContent(self): @@ -153,13 +160,7 @@ class UnRar(AbtractExtractor): def call_unrar(self, command, *xargs, **kwargs): - if os.name == "nt": - cmd = join(pypath, "UnRAR.exe") - else: - cmd = "unrar" - args = [] - #overwrite flag args.append("-o+") if self.overwrite else args.append("-o-") @@ -174,7 +175,7 @@ class UnRar(AbtractExtractor): #NOTE: return codes are not reliable, some kind of threading, cleanup whatever issue - call = [cmd, command] + args + list(xargs) + call = [self.CMD, command] + args + list(xargs) self.m.logDebug(" ".join(call)) p = Popen(call, stdout=PIPE, stderr=PIPE) diff --git a/module/remote/thriftbackend/pyload.thrift b/module/remote/thriftbackend/pyload.thrift index 03eb1d4eb..5d828854c 100644 --- a/module/remote/thriftbackend/pyload.thrift +++ b/module/remote/thriftbackend/pyload.thrift @@ -254,7 +254,7 @@ service Pyload { void orderPackage(1: PackageID pid, 2: i16 position), void orderFile(1: FileID fid, 2: i16 position), void setPackageData(1: PackageID pid, 2: map<string, string> data) throws (1: PackageDoesNotExists e), - void deleteFinished(), + list<PackageID> deleteFinished(), void restartFailed(), diff --git a/module/remote/thriftbackend/thriftgen/pyload/Pyload-remote b/module/remote/thriftbackend/thriftgen/pyload/Pyload-remote index e2e56d5ca..854b1589e 100755 --- a/module/remote/thriftbackend/thriftgen/pyload/Pyload-remote +++ b/module/remote/thriftbackend/thriftgen/pyload/Pyload-remote @@ -74,7 +74,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print ' void orderPackage(PackageID pid, i16 position)' print ' void orderFile(FileID fid, i16 position)' print ' void setPackageData(PackageID pid, data)' - print ' void deleteFinished()' + print ' deleteFinished()' print ' void restartFailed()' print ' bool isCaptchaWaiting()' print ' CaptchaTask getCaptchaTask(bool exclusive)' diff --git a/module/remote/thriftbackend/thriftgen/pyload/Pyload.py b/module/remote/thriftbackend/thriftgen/pyload/Pyload.py index 677e4afe2..a1bc63f75 100644 --- a/module/remote/thriftbackend/thriftgen/pyload/Pyload.py +++ b/module/remote/thriftbackend/thriftgen/pyload/Pyload.py @@ -1873,7 +1873,7 @@ class Client(Iface): def deleteFinished(self, ): self.send_deleteFinished() - self.recv_deleteFinished() + return self.recv_deleteFinished() def send_deleteFinished(self, ): self._oprot.writeMessageBegin('deleteFinished', TMessageType.CALL, self._seqid) @@ -1892,7 +1892,9 @@ class Client(Iface): result = deleteFinished_result() result.read(self._iprot) self._iprot.readMessageEnd() - return + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "deleteFinished failed: unknown result"); def restartFailed(self, ): self.send_restartFailed() @@ -3085,7 +3087,7 @@ class Processor(Iface, TProcessor): args.read(iprot) iprot.readMessageEnd() result = deleteFinished_result() - self._handler.deleteFinished() + result.success = self._handler.deleteFinished() oprot.writeMessageBegin("deleteFinished", TMessageType.REPLY, seqid) result.write(oprot) oprot.writeMessageEnd() @@ -4904,13 +4906,22 @@ class deleteFinished_args(TBase): class deleteFinished_result(TBase): + """ + Attributes: + - success + """ __slots__ = [ + 'success', ] thrift_spec = ( + (0, TType.LIST, 'success', (TType.I32,None), None, ), # 0 ) + def __init__(self, success=None,): + self.success = success + class restartFailed_args(TBase): diff --git a/module/web/media/js/package_ui.js b/module/web/media/js/package_ui.js index 17f6bcfdd..3ea965649 100644 --- a/module/web/media/js/package_ui.js +++ b/module/web/media/js/package_ui.js @@ -74,7 +74,7 @@ var PackageUI = new Class({ method: 'get', url: '/api/deleteFinished', onSuccess: function(data) { - if (data.del.length > 0) { + if (data.length > 0) { window.location.reload() } else { this.packages.each(function(pack) { diff --git a/pavement.py b/pavement.py index b7ece2bd4..92fbef573 100644 --- a/pavement.py +++ b/pavement.py @@ -22,8 +22,8 @@ if sys.version_info <= (2, 5): setup( name="pyload", version="0.4.9", - description='description', - long_description='', + description='Fast, lightweight and full featured download manager.', + long_description=open(PROJECT_DIR / "README").read(), keywords='', url="http://pyload.org", download_url='http://pyload.org/download', @@ -35,7 +35,7 @@ setup( #package_data=find_package_data(), #data_files=[], include_package_data=True, - exclude_package_data={'pyload': ['docs*', 'scripts*']}, + exclude_package_data={'pyload': ['docs*', 'scripts*']}, #exluced from build but not from sdist #leaving out thrift 0.8.0 since its not statisfiable install_requires=['BeautifulSoup>=3.2, <3.3', 'jinja2', 'pycurl', 'Beaker', 'bottle >= 0.9.0'] + extradeps, extras_require={ @@ -47,12 +47,20 @@ setup( #setup_requires=["setuptools_hg"], entry_points={ 'console_scripts': [ - 'pyLoadCore = pyLoadCore:main_func', - 'pyLoadCli = pyLoadCli:some_func', ], - 'gui_scripts': [ - 'pyLoadGui = my_package_gui.start_func', - ]}, - zip_safe=False + 'pyLoadCore = pyLoadCore:main', + 'pyLoadCli = pyLoadCli:main' + ]}, + zip_safe=False, + classifiers=[ + "Development Status :: 5 - Production/Stable", + "Topic :: Internet :: WWW/HTTP", + "Environment :: Console", + "Environment :: Web Environment", + "Intended Audience :: End Users/Desktop", + "License :: OSI Approved :: GNU General Public License (GPL)", + "Operating System :: OS Independent", + "Programming Language :: Python :: 2" + ] ) options( @@ -68,6 +76,11 @@ options( thrift=Bunch( path="../thrift/trunk/compiler/cpp/thrift", gen="" + ), + virtualenv=Bunch( + dir="env", + python="python2", + virtual="virtualenv2", ) ) @@ -215,6 +228,32 @@ def generate_locale(): @task +def virtualenv(options): + """Setup virtual environment""" + if path(options.dir).exists(): + return + + call([options.virtual, "--no-site-packages", "--python", options.python, options.dir]) + print "$ source %s/bin/activate" % options.dir + + +@task +def clean_env(): + """Deletes the virtual environment""" + env = path(options.virtualenv.dir) + if env.exists(): + env.rmtree() + + +@task +@needs('generate_setup', 'minilib', 'get_source', 'virtualenv') +def env_install(): + """Install pyLoad into the virtualenv""" + venv = options.virtualenv + call([path(venv.dir) / "bin" / "easy_install", "."]) + + +@task def clean(): """Cleans build directories""" path("build").rmtree() diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..51c054a75 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,7 @@ +[build_sphinx] +source-dir = doc +build-dir = doc/_build +all_files = 1 + +[upload_sphinx] +upload-dir = doc/_build/html |