summaryrefslogtreecommitdiffstats
path: root/pyload
diff options
context:
space:
mode:
Diffstat (limited to 'pyload')
-rw-r--r--pyload/network/Bucket.py29
-rw-r--r--pyload/plugins/network/CurlChunk.py14
-rw-r--r--pyload/plugins/network/CurlRequest.py1
3 files changed, 11 insertions, 33 deletions
diff --git a/pyload/network/Bucket.py b/pyload/network/Bucket.py
index db67faa4a..40d8c8071 100644
--- a/pyload/network/Bucket.py
+++ b/pyload/network/Bucket.py
@@ -1,24 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-"""
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 3 of the License,
- or (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- See the GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, see <http://www.gnu.org/licenses/>.
-
- @author: RaNaN
-"""
-
from time import time
-from threading import Lock
# 10kb minimum rate
MIN_RATE = 10240
@@ -28,31 +10,24 @@ class Bucket:
self.rate = 0 # bytes per second, maximum targeted throughput
self.tokens = 0
self.timestamp = time()
- self.lock = Lock()
def __nonzero__(self):
return False if self.rate < MIN_RATE else True
def setRate(self, rate):
- self.lock.acquire()
self.rate = int(rate)
- self.lock.release()
def consumed(self, amount):
""" return the time the process has to sleep, after it consumed a specified amount """
if self.rate < MIN_RATE: return 0 #May become unresponsive otherwise
- self.lock.acquire()
self.calc_tokens()
self.tokens -= amount
if self.tokens < 0:
- time = -self.tokens/float(self.rate)
+ return -self.tokens/float(self.rate)
else:
- time = 0
-
- self.lock.release()
- return time
+ return 0
def calc_tokens(self):
if self.tokens < self.rate:
diff --git a/pyload/plugins/network/CurlChunk.py b/pyload/plugins/network/CurlChunk.py
index 871cf7f39..75be9ce6c 100644
--- a/pyload/plugins/network/CurlChunk.py
+++ b/pyload/plugins/network/CurlChunk.py
@@ -25,7 +25,7 @@ import codecs
import pycurl
from pyload.utils import remove_chars
-from pyload.utils.fs import fs_encode
+from pyload.utils.fs import fs_encode, fs_decode
from CurlRequest import CurlRequest
@@ -35,7 +35,7 @@ class WrongFormat(Exception):
class ChunkInfo():
def __init__(self, name):
- self.name = unicode(name)
+ self.name = fs_decode(name)
self.size = 0
self.resume = False
self.chunks = []
@@ -153,6 +153,8 @@ class CurlChunk(CurlRequest):
self.sleep = 0.000
self.lastSize = 0
+ # next to last size
+ self.nLastSize = 0
def __repr__(self):
return "<CurlChunk id=%d, size=%d, arrived=%d>" % (self.id, self.size, self.arrived)
@@ -228,6 +230,8 @@ class CurlChunk(CurlRequest):
self.BOMChecked = True
size = len(buf)
+ self.nLastSize = self.lastSize
+ self.lastSize = size
self.arrived += size
@@ -235,7 +239,9 @@ class CurlChunk(CurlRequest):
if self.p.bucket:
sleep(self.p.bucket.consumed(size))
- else:
+
+ # if the buffer sizes are stable no sleep will be made
+ elif size != self.lastSize or size != self.nLastSize:
# Avoid small buffers, increasing sleep time slowly if buffer size gets smaller
# otherwise reduce sleep time percentile (values are based on tests)
# So in general cpu time is saved without reducing bandwidth too much
@@ -245,8 +251,6 @@ class CurlChunk(CurlRequest):
else:
self.sleep *= 0.7
- self.lastSize = size
-
sleep(self.sleep)
if self.range and self.arrived > self.size:
diff --git a/pyload/plugins/network/CurlRequest.py b/pyload/plugins/network/CurlRequest.py
index 8d1f22450..717590ac5 100644
--- a/pyload/plugins/network/CurlRequest.py
+++ b/pyload/plugins/network/CurlRequest.py
@@ -187,7 +187,6 @@ class CurlRequest(Request):
if "auth" in self.options:
self.c.setopt(pycurl.USERPWD, str(self.options["auth"]))
-
def load(self, url, get={}, post={}, referer=True, cookies=True, just_header=False, multipart=False, decode=False):
""" load and returns a given page """