summaryrefslogtreecommitdiffstats
path: root/module/network
diff options
context:
space:
mode:
authorGravatar Walter Purcaro <vuolter@gmail.com> 2014-11-22 20:04:29 +0100
committerGravatar Walter Purcaro <vuolter@gmail.com> 2014-11-22 20:04:29 +0100
commit0de6e675bb0c5a4adb79d16df55ada3071825ad5 (patch)
tree9ff782ba4c41873c8c7d06b239166d8a0806b335 /module/network
parentRevert plugins to stable (diff)
downloadpyload-0de6e675bb0c5a4adb79d16df55ada3071825ad5.tar.xz
Revert remaining modules
Diffstat (limited to 'module/network')
-rw-r--r--module/network/HTTPChunk.py4
-rw-r--r--module/network/HTTPDownload.py9
-rw-r--r--module/network/HTTPRequest.py60
-rw-r--r--module/network/RequestFactory.py15
-rw-r--r--module/network/XDCCRequest.py2
5 files changed, 35 insertions, 55 deletions
diff --git a/module/network/HTTPChunk.py b/module/network/HTTPChunk.py
index d42744cf4..b637aef32 100644
--- a/module/network/HTTPChunk.py
+++ b/module/network/HTTPChunk.py
@@ -208,7 +208,7 @@ class HTTPChunk(HTTPRequest):
# as first chunk, we will parse the headers
if not self.range and self.header.endswith("\r\n\r\n"):
self.parseHeader()
- elif not self.range and buf.startswith("150") and "data connection" in buf.lower(): #: ftp file size parsing
+ elif not self.range and buf.startswith("150") and "data connection" in buf: #ftp file size parsing
size = search(r"(\d+) bytes", buf)
if size:
self.p.size = int(size.group(1))
@@ -290,4 +290,4 @@ class HTTPChunk(HTTPRequest):
""" closes everything, unusable after this """
if self.fp: self.fp.close()
self.c.close()
- if hasattr(self, "p"): del self.p
+ if hasattr(self, "p"): del self.p \ No newline at end of file
diff --git a/module/network/HTTPDownload.py b/module/network/HTTPDownload.py
index 3f32295b4..fe8075539 100644
--- a/module/network/HTTPDownload.py
+++ b/module/network/HTTPDownload.py
@@ -63,7 +63,7 @@ class HTTPDownload():
except IOError:
self.info = ChunkInfo(filename)
- self.chunkSupport = True
+ self.chunkSupport = None
self.m = pycurl.CurlMulti()
#needed for speed calculation
@@ -130,7 +130,7 @@ class HTTPDownload():
except pycurl.error, e:
#code 33 - no resume
code = e.args[0]
- if resume is True and code == 33:
+ if code == 33:
# try again without resume
self.log.debug("Errno 33 -> Restart without resume")
@@ -151,7 +151,6 @@ class HTTPDownload():
if not resume:
self.info.clear()
self.info.addChunk("%s.chunk0" % self.filename, (0, 0)) #create an initial entry
- self.info.save()
self.chunks = []
@@ -165,8 +164,8 @@ class HTTPDownload():
chunksDone = set() # list of curl handles that are finished
chunksCreated = False
done = False
- if self.info.getCount() is 0: # This is a resume, if we were chunked originally assume still can
- self.chunkSupport = False
+ if self.info.getCount() > 1: # This is a resume, if we were chunked originally assume still can
+ self.chunkSupport = True
while 1:
#need to create chunks
diff --git a/module/network/HTTPRequest.py b/module/network/HTTPRequest.py
index 67ede2854..4747d937f 100644
--- a/module/network/HTTPRequest.py
+++ b/module/network/HTTPRequest.py
@@ -13,7 +13,7 @@
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
-
+
@author: RaNaN
"""
@@ -25,11 +25,11 @@ from httplib import responses
from logging import getLogger
from cStringIO import StringIO
-from module.plugins.Plugin import Abort, Fail
+from module.plugins.Plugin import Abort
def myquote(url):
return quote(url.encode('utf_8') if isinstance(url, unicode) else url, safe="%/:=&?~#+!$,;'@()*[]")
-
+
def myurlencode(data):
data = dict(data)
return urlencode(dict((x.encode('utf_8') if isinstance(x, unicode) else x, \
@@ -79,7 +79,7 @@ class HTTPRequest():
if hasattr(pycurl, "AUTOREFERER"):
self.c.setopt(pycurl.AUTOREFERER, 1)
self.c.setopt(pycurl.SSL_VERIFYPEER, 0)
- self.c.setopt(pycurl.LOW_SPEED_TIME, 60)
+ self.c.setopt(pycurl.LOW_SPEED_TIME, 30)
self.c.setopt(pycurl.LOW_SPEED_LIMIT, 5)
#self.c.setopt(pycurl.VERBOSE, 1)
@@ -181,7 +181,7 @@ class HTTPRequest():
self.getCookies()
- def load(self, url, get={}, post={}, referer=True, cookies=True, just_header=False, multipart=False, decode=False, follow_location=True, save_cookies=True):
+ def load(self, url, get={}, post={}, referer=True, cookies=True, just_header=False, multipart=False, decode=False):
""" load and returns a given page """
self.setRequestContext(url, get, post, referer, cookies, multipart)
@@ -190,27 +190,24 @@ class HTTPRequest():
self.c.setopt(pycurl.HTTPHEADER, self.headers)
- if not follow_location:
- self.c.setopt(pycurl.FOLLOWLOCATION, 0)
-
if just_header:
+ self.c.setopt(pycurl.FOLLOWLOCATION, 0)
self.c.setopt(pycurl.NOBODY, 1)
+ self.c.perform()
+ rep = self.header
- self.c.perform()
- rep = self.header if just_header else self.getResponse()
-
- if not follow_location:
self.c.setopt(pycurl.FOLLOWLOCATION, 1)
-
- if just_header:
self.c.setopt(pycurl.NOBODY, 0)
+ else:
+ self.c.perform()
+ rep = self.getResponse()
+
self.c.setopt(pycurl.POSTFIELDS, "")
self.lastEffectiveURL = self.c.getinfo(pycurl.EFFECTIVE_URL)
self.code = self.verifyHeader()
- if save_cookies:
- self.addCookies()
+ self.addCookies()
if decode:
rep = self.decodeResponse(rep)
@@ -231,13 +228,11 @@ class HTTPRequest():
def getResponse(self):
""" retrieve response from string io """
- if self.rep is None:
- return ""
- else:
- value = self.rep.getvalue()
- self.rep.close()
- self.rep = StringIO()
- return value
+ if self.rep is None: return ""
+ value = self.rep.getvalue()
+ self.rep.close()
+ self.rep = StringIO()
+ return value
def decodeResponse(self, rep):
""" decode with correct encoding, relies on header """
@@ -260,7 +255,7 @@ class HTTPRequest():
#self.log.debug("Decoded %s" % encoding )
if lookup(encoding).name == 'utf-8' and rep.startswith(BOM_UTF8):
encoding = 'utf-8-sig'
-
+
decoder = getincrementaldecoder(encoding)("replace")
rep = decoder.decode(rep, True)
@@ -268,7 +263,6 @@ class HTTPRequest():
except LookupError:
self.log.debug("No Decoder foung for %s" % encoding)
-
except Exception:
self.log.debug("Error when decoding string from %s." % encoding)
@@ -278,15 +272,13 @@ class HTTPRequest():
""" writes response """
if self.rep.tell() > 1000000 or self.abort:
rep = self.getResponse()
+ if self.abort: raise Abort()
+ f = open("response.dump", "wb")
+ f.write(rep)
+ f.close()
+ raise Exception("Loaded Url exceeded limit")
- if self.abort:
- raise Abort()
-
- with open("response.dump", "wb") as f:
- f.write(rep)
- raise Fail("Loaded url exceeded size limit")
- else:
- self.rep.write(buf)
+ self.rep.write(buf)
def writeHeader(self, buf):
""" writes header """
@@ -311,4 +303,4 @@ if __name__ == "__main__":
url = "http://pyload.org"
c = HTTPRequest()
print c.load(url)
-
+
diff --git a/module/network/RequestFactory.py b/module/network/RequestFactory.py
index 750f37dc9..5b1528281 100644
--- a/module/network/RequestFactory.py
+++ b/module/network/RequestFactory.py
@@ -62,23 +62,12 @@ class RequestFactory():
def getURL(self, *args, **kwargs):
""" see HTTPRequest for argument list """
- cj = None
-
- if 'cookies' in kwargs:
- if isinstance(kwargs['cookies'], CookieJar):
- cj = kwargs['cookies']
- elif isinstance(kwargs['cookies'], list):
- cj = CookieJar(None)
- for cookie in kwargs['cookies']:
- if isinstance(cookie, tuple) and len(cookie) == 3:
- cj.setCookie(*cookie)
-
- h = HTTPRequest(cj, self.getOptions())
+ h = HTTPRequest(None, self.getOptions())
try:
rep = h.load(*args, **kwargs)
finally:
h.close()
-
+
return rep
def getCookieJar(self, pluginName, account=None):
diff --git a/module/network/XDCCRequest.py b/module/network/XDCCRequest.py
index e395e01f0..f03798c17 100644
--- a/module/network/XDCCRequest.py
+++ b/module/network/XDCCRequest.py
@@ -127,7 +127,7 @@ class XDCCRequest():
return filename
- def _keepAlive(self, sock, *readbuffer):
+ def _keepAlive(self, sock, readbuffer):
fdset = select([sock], [], [], 0)
if sock not in fdset[0]:
return