summaryrefslogtreecommitdiffstats
path: root/module/lib
diff options
context:
space:
mode:
Diffstat (limited to 'module/lib')
-rw-r--r--module/lib/BeautifulSoup.py57
-rw-r--r--module/lib/Getch.py2
-rw-r--r--module/lib/MultipartPostHandler.py10
-rw-r--r--module/lib/Unzip.py6
-rw-r--r--module/lib/beaker/crypto/pbkdf2.py2
-rw-r--r--module/lib/beaker/ext/memcached.py2
-rw-r--r--module/lib/beaker/session.py2
-rw-r--r--module/lib/beaker/util.py2
-rw-r--r--module/lib/bottle.py24
-rw-r--r--module/lib/feedparser.py94
-rw-r--r--module/lib/jinja2/filters.py2
-rw-r--r--module/lib/jinja2/nodes.py2
-rw-r--r--module/lib/jinja2/utils.py2
-rw-r--r--module/lib/simplejson/__init__.py4
-rw-r--r--module/lib/thrift/Thrift.py4
-rw-r--r--module/lib/thrift/protocol/TBinaryProtocol.py2
-rw-r--r--module/lib/thrift/protocol/TProtocol.py4
-rw-r--r--module/lib/thrift/transport/TTransport.py4
-rw-r--r--module/lib/thrift/transport/TTwisted.py2
-rw-r--r--module/lib/thrift/transport/TZlibTransport.py2
20 files changed, 115 insertions, 114 deletions
diff --git a/module/lib/BeautifulSoup.py b/module/lib/BeautifulSoup.py
index 55567f588..9f61c3ca0 100644
--- a/module/lib/BeautifulSoup.py
+++ b/module/lib/BeautifulSoup.py
@@ -483,15 +483,15 @@ class Tag(PageElement):
def _invert(h):
"Cheap function to invert a hash."
i = {}
- for k,v in h.items():
+ for k, v in h.items():
i[v] = k
return i
- XML_ENTITIES_TO_SPECIAL_CHARS = { "apos" : "'",
- "quot" : '"',
- "amp" : "&",
- "lt" : "<",
- "gt" : ">" }
+ XML_ENTITIES_TO_SPECIAL_CHARS = {"apos": "'",
+ "quot": '"',
+ "amp": "&",
+ "lt": "<",
+ "gt": ">"}
XML_SPECIAL_CHARS_TO_ENTITIES = _invert(XML_ENTITIES_TO_SPECIAL_CHARS)
@@ -929,7 +929,7 @@ class SoupStrainer:
markupAttrMap = markupAttrs
else:
markupAttrMap = {}
- for k,v in markupAttrs:
+ for k, v in markupAttrs:
markupAttrMap[k] = v
attrValue = markupAttrMap.get(attr)
if not self._matches(attrValue, matchAgainst):
@@ -1018,7 +1018,7 @@ def buildTagMap(default, *args):
for portion in args:
if hasattr(portion, 'items'):
#It's a map. Merge it.
- for k,v in portion.items():
+ for k, v in portion.items():
built[k] = v
elif hasattr(portion, '__iter__'): # is a list
#It's a list. Map each item to the default.
@@ -1150,7 +1150,7 @@ class BeautifulStoneSoup(Tag, SGMLParser):
n = int(name)
except ValueError:
return
- if not 0 <= n <= 127 : # ASCII ends at 127, not 255
+ if not 0 <= n <= 127: # ASCII ends at 127, not 255
return
return self.convert_codepoint(n)
@@ -1758,7 +1758,7 @@ class UnicodeDammit:
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
- CHARSET_ALIASES = { "macintosh" : "mac-roman",
+ CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis" : "shift-jis" }
def __init__(self, markup, overrideEncodings=[],
@@ -1947,23 +1947,23 @@ class UnicodeDammit:
def _ebcdic_to_ascii(self, s):
c = self.__class__
if not c.EBCDIC_TO_ASCII_MAP:
- emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
- 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
- 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
- 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
- 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
- 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
- 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
- 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
- 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,
- 201,202,106,107,108,109,110,111,112,113,114,203,204,205,
- 206,207,208,209,126,115,116,117,118,119,120,121,122,210,
- 211,212,213,214,215,216,217,218,219,220,221,222,223,224,
- 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72,
- 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81,
- 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89,
- 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57,
- 250,251,252,253,254,255)
+ emap = (0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
+ 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
+ 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
+ 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
+ 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
+ 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
+ 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200,
+ 201, 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205,
+ 206, 207, 208, 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210,
+ 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224,
+ 225, 226, 227, 228, 229, 230, 231, 123, 65, 66, 67, 68, 69, 70, 71, 72,
+ 73, 232, 233, 234, 235, 236, 237, 125, 74, 75, 76, 77, 78, 79, 80, 81,
+ 82, 238, 239, 240, 241, 242, 243, 92, 159, 83, 84, 85, 86, 87, 88, 89,
+ 90, 244, 245, 246, 247, 248, 249, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 250, 251, 252, 253, 254, 255)
import string
c.EBCDIC_TO_ASCII_MAP = string.maketrans( \
''.join(map(chr, range(256))), ''.join(map(chr, emap)))
@@ -2000,7 +2000,8 @@ class UnicodeDammit:
'\x9c' : ('oelig', '153'),
'\x9d' : '?',
'\x9e' : ('#x17E', '17E'),
- '\x9f' : ('Yuml', ''),}
+ '\x9f' : ('Yuml', '')
+ }
#######################################################################
diff --git a/module/lib/Getch.py b/module/lib/Getch.py
index c88e65089..a052f619e 100644
--- a/module/lib/Getch.py
+++ b/module/lib/Getch.py
@@ -65,7 +65,7 @@ class _GetchMacCarbon:
else:
#
# The event contains the following info:
- # (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
+ # (what, msg, when, where, mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
diff --git a/module/lib/MultipartPostHandler.py b/module/lib/MultipartPostHandler.py
index 94aee0193..4c0271c25 100644
--- a/module/lib/MultipartPostHandler.py
+++ b/module/lib/MultipartPostHandler.py
@@ -32,8 +32,8 @@ Example:
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
- params = { "username" : "bob", "password" : "riviera",
- "file" : open("filename", "rb") }
+ params = {"username": "bob", "password": "riviera",
+ "file": open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
Further Example:
@@ -123,9 +123,9 @@ def main():
def validateFile(url):
temp = tempfile.mkstemp(suffix=".html")
write(temp[0], opener.open(url).read())
- params = { "ss" : "0", # show source
- "doctype" : "Inline",
- "uploaded_file" : open(temp[1], "rb") }
+ params = {"ss": "0", # show source
+ "doctype": "Inline",
+ "uploaded_file": open(temp[1], "rb") }
print opener.open(validatorURL, params).read()
remove(temp[1])
diff --git a/module/lib/Unzip.py b/module/lib/Unzip.py
index 6d2ada8f2..c04b9abe8 100644
--- a/module/lib/Unzip.py
+++ b/module/lib/Unzip.py
@@ -18,8 +18,8 @@ class Unzip:
for i, name in enumerate(zf.namelist()):
if not name.endswith('/') and not name.endswith("config"):
- print "extracting", name.replace("pyload/","")
- outfile = open(os.path.join(dir, name.replace("pyload/","")), 'wb')
+ print "extracting", name.replace("pyload/", "")
+ outfile = open(os.path.join(dir, name.replace("pyload/", "")), 'wb')
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
@@ -44,7 +44,7 @@ class Unzip:
for name in zf.namelist():
if name.endswith('/'):
- dirs.append(name.replace("pyload/",""))
+ dirs.append(name.replace("pyload/", ""))
dirs.sort()
return dirs
diff --git a/module/lib/beaker/crypto/pbkdf2.py b/module/lib/beaker/crypto/pbkdf2.py
index 6c683ef30..5f40a5c78 100644
--- a/module/lib/beaker/crypto/pbkdf2.py
+++ b/module/lib/beaker/crypto/pbkdf2.py
@@ -78,7 +78,7 @@ def strxor(a, b):
return "".join([chr(ord(x) ^ ord(y)) for (x, y) in zip(a, b)])
class PBKDF2(object):
- """PBKDF2.py : PKCS#5 v2.0 Password-Based Key Derivation
+ """PBKDF2.py: PKCS#5 v2.0 Password-Based Key Derivation
This implementation takes a passphrase and a salt (and optionally an
iteration count, a digest module, and a MAC module) and provides a
diff --git a/module/lib/beaker/ext/memcached.py b/module/lib/beaker/ext/memcached.py
index 2f367c36d..60360ae0a 100644
--- a/module/lib/beaker/ext/memcached.py
+++ b/module/lib/beaker/ext/memcached.py
@@ -45,7 +45,7 @@ class MemcachedNamespaceManager(NamespaceManager):
def get_creation_lock(self, key):
return file_synchronizer(
- identifier="memcachedcontainer/funclock/%s" % self.namespace,lock_dir = self.lock_dir)
+ identifier="memcachedcontainer/funclock/%s" % self.namespace, lock_dir = self.lock_dir)
def _format_key(self, key):
return self.namespace + '_' + key.replace(' ', '\302\267')
diff --git a/module/lib/beaker/session.py b/module/lib/beaker/session.py
index 7957be66b..a1461ebdd 100644
--- a/module/lib/beaker/session.py
+++ b/module/lib/beaker/session.py
@@ -15,7 +15,7 @@ from base64 import b64encode, b64decode
__all__ = ['SignedCookie', 'Session']
-getpid = hasattr(os, 'getpid') and os.getpid or (lambda : '')
+getpid = hasattr(os, 'getpid') and os.getpid or (lambda: '')
class SignedCookie(Cookie.BaseCookie):
"""Extends python cookie to give digital signature support"""
diff --git a/module/lib/beaker/util.py b/module/lib/beaker/util.py
index 21a467e42..0bd82bb85 100644
--- a/module/lib/beaker/util.py
+++ b/module/lib/beaker/util.py
@@ -16,7 +16,7 @@ import warnings
import sys
py3k = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0)
-py24 = sys.version_info < (2,5)
+py24 = sys.version_info < (2, 5)
jython = sys.platform.startswith('java')
if py3k or jython:
diff --git a/module/lib/bottle.py b/module/lib/bottle.py
index f4e93de9d..b0111e870 100644
--- a/module/lib/bottle.py
+++ b/module/lib/bottle.py
@@ -88,7 +88,7 @@ except ImportError: # pragma: no cover
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
-py3k = sys.version_info >= (3,0,0)
+py3k = sys.version_info >= (3, 0, 0)
NCTextIOWrapper = None
if py3k: # pragma: no cover
@@ -98,7 +98,7 @@ if py3k: # pragma: no cover
def touni(x, enc='utf8', err='strict'):
""" Convert anything to unicode """
return str(x, enc, err) if isinstance(x, bytes) else str(x)
- if sys.version_info < (3,2,0):
+ if sys.version_info < (3, 2, 0):
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
''' Garbage collecting an io.TextIOWrapper(buffer) instance closes
@@ -394,7 +394,7 @@ class Router(object):
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
- url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
+ url = ''.join([f(query.pop(n)) if n else f for (n, f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError, e:
raise RouteBuildError('Missing URL argument: %r' % e.args[0])
@@ -425,7 +425,7 @@ class Router(object):
if 'GET' in allowed and 'HEAD' not in allowed:
allowed.append('HEAD')
raise HTTPError(405, "Method not allowed.",
- header=[('Allow',",".join(allowed))])
+ header=[('Allow', ",".join(allowed))])
@@ -1876,12 +1876,12 @@ def parse_date(ims):
def parse_auth(header):
- """ Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
+ """ Parse rfc2617 HTTP authentication header string (basic) and return (user, pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
#TODO: Add 2to3 save base64[encode/decode] functions.
- user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
+ user, pwd = touni(base64.b64decode(tob(data))).split(':', 1)
return user, pwd
except (KeyError, ValueError):
return None
@@ -2049,7 +2049,7 @@ class ServerAdapter(object):
pass
def __repr__(self):
- args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
+ args = ', '.join(['%s=%s'%(k, repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
@@ -2598,9 +2598,9 @@ class SimpleTALTemplate(BaseTemplate):
for dictarg in args: kwargs.update(dictarg)
# TODO: maybe reuse a context instead of always creating one
context = simpleTALES.Context()
- for k,v in self.defaults.items():
+ for k, v in self.defaults.items():
context.addGlobal(k, v)
- for k,v in kwargs.items():
+ for k, v in kwargs.items():
context.addGlobal(k, v)
output = StringIO()
self.tpl.expand(context, output)
@@ -2691,7 +2691,7 @@ class SimpleTemplate(BaseTemplate):
if m: self.encoding = m.group(1)
if m: line = line.replace('coding','coding (removed)')
if line.strip()[:2].count('%') == 1:
- line = line.split('%',1)[1].lstrip() # Full line following the %
+ line = line.split('%', 1)[1].lstrip() # Full line following the %
cline = self.split_comment(line).strip()
cmd = re.split(r'[^a-zA-Z0-9_]', cline)[0]
flush() # You are actually reading this? Good luck, it's a mess :)
@@ -2749,7 +2749,7 @@ class SimpleTemplate(BaseTemplate):
subtpl, rargs = env['_rebase']
rargs['_base'] = _stdout[:] #copy stdout
del _stdout[:] # clear stdout
- return self.subtemplate(subtpl,_stdout,rargs)
+ return self.subtemplate(subtpl, _stdout, rargs)
return env
def render(self, *args, **kwargs):
@@ -2839,7 +2839,7 @@ HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
-_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.iteritems())
+_HTTP_STATUS_LINES = dict((k, '%d %s'%(k, v)) for (k, v) in HTTP_CODES.iteritems())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
diff --git a/module/lib/feedparser.py b/module/lib/feedparser.py
index 32f9d2dd7..8d3fec678 100644
--- a/module/lib/feedparser.py
+++ b/module/lib/feedparser.py
@@ -52,7 +52,7 @@ USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__
# HTTP "Accept" header to send to servers when downloading feeds. If you don't
# want to send an Accept header, set this to None.
-ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1"
+ACCEPT_HEADER = "application/atom+xml, application/rdf+xml, application/rss+xml, application/x-netcdf, application/xml;q=0.9, text/xml;q=0.2,*/*;q=0.1"
# List of preferred XML parsers, by SAX driver name. These will be tried first,
# but if they're not installed, Python will keep searching through its own list
@@ -167,7 +167,7 @@ try:
_XML_AVAILABLE = 1
except:
_XML_AVAILABLE = 0
- def _xmlescape(data,entities={}):
+ def _xmlescape(data, entities={}):
data = data.replace('&', '&amp;')
data = data.replace('>', '&gt;')
data = data.replace('<', '&lt;')
@@ -203,7 +203,7 @@ except:
import htmlentitydefs
name2codepoint={}
codepoint2name={}
- for (name,codepoint) in htmlentitydefs.entitydefs.iteritems():
+ for (name, codepoint) in htmlentitydefs.entitydefs.iteritems():
if codepoint.startswith('&#'): codepoint=unichr(int(codepoint[2:-1]))
name2codepoint[name]=ord(codepoint)
codepoint2name[ord(codepoint)]=name
@@ -235,8 +235,8 @@ if sgmllib.endbracket.search(' <').start(0):
# Overriding the built-in sgmllib.endbracket regex allows the
# parser to find angle brackets embedded in element attributes.
self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''')
- def search(self,string,index=0):
- match = self.endbracket.match(string,index)
+ def search(self, string, index=0):
+ match = self.endbracket.match(string, index)
if match is not None:
# Returning a new object in the calling thread's context
# resolves a thread-safety.
@@ -299,7 +299,7 @@ class FeedParserDict(UserDict):
if key == 'category':
return UserDict.__getitem__(self, 'tags')[0]['term']
if key == 'enclosures':
- norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel'])
+ norel = lambda link: FeedParserDict([(name, value) for (name, value) in link.items() if name!='rel'])
return [norel(link) for link in UserDict.__getitem__(self, 'links') if link['rel']=='enclosure']
if key == 'license':
for link in UserDict.__getitem__(self, 'links'):
@@ -378,22 +378,22 @@ def _ebcdic_to_ascii(s):
global _ebcdic_to_ascii_map
if not _ebcdic_to_ascii_map:
emap = (
- 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15,
- 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31,
- 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7,
- 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26,
- 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33,
- 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94,
- 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63,
- 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34,
- 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201,
- 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208,
- 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215,
- 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,
- 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237,
- 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243,
- 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249,
- 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255
+ 0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
+ 128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
+ 144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
+ 32, 160, 161, 162, 163, 164, 165, 166, 167, 168, 91, 46, 60, 40, 43, 33,
+ 38, 169, 170, 171, 172, 173, 174, 175, 176, 177, 93, 36, 42, 41, 59, 94,
+ 45, 47, 178, 179, 180, 181, 182, 183, 184, 185, 124, 44, 37, 95, 62, 63,
+ 186, 187, 188, 189, 190, 191, 192, 193, 194, 96, 58, 35, 64, 39, 61, 34,
+ 195, 97, 98, 99, 100, 101, 102, 103, 104, 105, 196, 197, 198, 199, 200, 201,
+ 202, 106, 107, 108, 109, 110, 111, 112, 113, 114, 203, 204, 205, 206, 207, 208,
+ 209, 126, 115, 116, 117, 118, 119, 120, 121, 122, 210, 211, 212, 213, 214, 215,
+ 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231,
+ 123, 65, 66, 67, 68, 69, 70, 71, 72, 73, 232, 233, 234, 235, 236, 237,
+ 125, 74, 75, 76, 77, 78, 79, 80, 81, 82, 238, 239, 240, 241, 242, 243,
+ 92, 159, 83, 84, 85, 86, 87, 88, 89, 90, 244, 245, 246, 247, 248, 249,
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 250, 251, 252, 253, 254, 255
)
_ebcdic_to_ascii_map = _maketrans( \
_l2bytes(range(256)), _l2bytes(emap))
@@ -780,7 +780,7 @@ class _FeedParserMixin:
return data
def strattrs(self, attrs):
- return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'&quot;'})) for t in attrs])
+ return ''.join([' %s="%s"' % (t[0], _xmlescape(t[1], {'"':'&quot;'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
@@ -796,9 +796,9 @@ class _FeedParserMixin:
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
- while pieces and len(pieces)>1 and not pieces[-1].strip():
+ while pieces and len(pieces)> 1 and not pieces[-1].strip():
del pieces[-1]
- while pieces and len(pieces)>1 and not pieces[0].strip():
+ while pieces and len(pieces)> 1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
@@ -1271,7 +1271,7 @@ class _FeedParserMixin:
else:
author, email = context.get(key), None
if not author: return
- emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
+ emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1, 3}\.[0-9]{1, 3}\.[0-9]{1, 3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2, 4}|[0-9]{1, 3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
@@ -1756,7 +1756,7 @@ if _XML_AVAILABLE:
if prefix:
localname = prefix.lower() + ':' + localname
elif namespace and not qname: #Expat
- for name,value in self.namespacesInUse.items():
+ for name, value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
@@ -1786,7 +1786,7 @@ if _XML_AVAILABLE:
if prefix:
localname = prefix + ':' + localname
elif namespace and not qname: #Expat
- for name,value in self.namespacesInUse.items():
+ for name, value in self.namespacesInUse.items():
if name and value == namespace:
localname = name + ':' + localname
break
@@ -1827,10 +1827,10 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
else:
return '<' + tag + '></' + tag + '>'
- def parse_starttag(self,i):
+ def parse_starttag(self, i):
j=sgmllib.SGMLParser.parse_starttag(self, i)
if self._type == 'application/xhtml+xml':
- if j>2 and self.rawdata[j-2:j]=='/>':
+ if j > 2 and self.rawdata[j-2:j]=='/>':
self.unknown_endtag(self.lasttag)
return j
@@ -1902,7 +1902,7 @@ class _BaseHTMLProcessor(sgmllib.SGMLParser):
# called for each character reference, e.g. for '&#160;', ref will be '160'
# Reconstruct the original character reference.
if ref.startswith('x'):
- value = unichr(int(ref[1:],16))
+ value = unichr(int(ref[1:], 16))
else:
value = unichr(int(ref))
@@ -2000,7 +2000,7 @@ class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor):
return data
def strattrs(self, attrs):
- return ''.join([' %s="%s"' % (n,v.replace('"','&quot;')) for n,v in attrs])
+ return ''.join([' %s="%s"' % (n, v.replace('"','&quot;')) for n, v in attrs])
class _MicroformatsParser:
STRING = 1
@@ -2010,7 +2010,7 @@ class _MicroformatsParser:
EMAIL = 5
known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me']
- known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
+ known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv']
def __init__(self, data, baseuri, encoding):
self.document = BeautifulSoup.BeautifulSoup(data)
@@ -2545,7 +2545,7 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
'transparent', 'underline', 'white', 'yellow']
valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' +
- '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
+ '\d{0, 2}\.?\d{0, 2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$')
mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math',
'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded',
@@ -2651,25 +2651,25 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
self.svg_elements = lower
self.svg_elem_map = dict([(a.lower(),a) for a in mix])
acceptable_attributes = self.svg_attributes
- tag = self.svg_elem_map.get(tag,tag)
+ tag = self.svg_elem_map.get(tag, tag)
keymap = self.svg_attr_map
elif not tag in self.acceptable_elements:
return
# declare xlink namespace, if needed
if self.mathmlOK or self.svgOK:
- if filter(lambda (n,v): n.startswith('xlink:'),attrs):
+ if filter(lambda (n, v): n.startswith('xlink:'), attrs):
if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs:
attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink'))
clean_attrs = []
for key, value in self.normalize_attrs(attrs):
if key in acceptable_attributes:
- key=keymap.get(key,key)
- clean_attrs.append((key,value))
+ key=keymap.get(key, key)
+ clean_attrs.append((key, value))
elif key=='style':
clean_value = self.sanitize_style(value)
- if clean_value: clean_attrs.append((key,clean_value))
+ if clean_value: clean_attrs.append((key, clean_value))
_BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs)
def unknown_endtag(self, tag):
@@ -2679,7 +2679,7 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
if self.mathmlOK and tag in self.mathml_elements:
if tag == 'math' and self.mathmlOK: self.mathmlOK -= 1
elif self.svgOK and tag in self.svg_elements:
- tag = self.svg_elem_map.get(tag,tag)
+ tag = self.svg_elem_map.get(tag, tag)
if tag == 'svg' and self.svgOK: self.svgOK -= 1
else:
return
@@ -2705,7 +2705,7 @@ class _HTMLSanitizer(_BaseHTMLProcessor):
if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): return ''
clean = []
- for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
+ for prop, value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.acceptable_css_properties:
clean.append(prop + ': ' + value + ';')
@@ -2875,7 +2875,7 @@ def _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, h
# iri support
try:
- if isinstance(url_file_stream_or_string,unicode):
+ if isinstance(url_file_stream_or_string, unicode):
url_file_stream_or_string = url_file_stream_or_string.encode('idna').decode('utf-8')
else:
url_file_stream_or_string = url_file_stream_or_string.decode('utf-8').encode('idna').decode('utf-8')
@@ -2932,7 +2932,7 @@ def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_h
if ACCEPT_HEADER:
request.add_header('Accept', ACCEPT_HEADER)
# use this for whatever -- cookies, special headers, etc
- # [('Cookie','Something'),('x-special-header','Another Value')]
+ # [('Cookie','Something'), ('x-special-header','Another Value')]
for header_name, header_value in request_headers.items():
request.add_header(header_name, header_value)
request.add_header('A-IM', 'feed') # RFC 3229 support
@@ -3391,8 +3391,8 @@ def _getCharacterEncoding(http_headers, xml_data):
Furthermore, discussion on the atom-syntax mailing list with the
author of RFC 3023 leads me to the conclusion that any document
served with a Content-Type of text/* and no charset parameter
- must be treated as us-ascii. (We now do this.) And also that it
- must always be flagged as non-well-formed. (We now do this too.)
+ must be treated as us-ascii. (We now do this.) And also that it
+ must always be flagged as non-well-formed. (We now do this too.)
If Content-Type is unspecified (input was local file or non-HTTP source)
or unrecognized (server just got it totally wrong), then go by the
@@ -3570,7 +3570,7 @@ def _stripDoctype(data):
'''
start = re.search(_s2bytes('<\w'), data)
start = start and start.start() or -1
- head,data = data[:start+1], data[start+1:]
+ head, data = data[:start+1], data[start+1:]
entity_pattern = re.compile(_s2bytes(r'^\s*<!ENTITY([^>]*?)>'), re.MULTILINE)
entity_results=entity_pattern.findall(head)
@@ -3587,7 +3587,7 @@ def _stripDoctype(data):
replacement=_s2bytes('')
if len(doctype_results)==1 and entity_results:
safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"'))
- safe_entities=filter(lambda e: safe_pattern.match(e),entity_results)
+ safe_entities=filter(lambda e: safe_pattern.match(e), entity_results)
if safe_entities:
replacement=_s2bytes('<!DOCTYPE feed [\n <!ENTITY') + _s2bytes('>\n <!ENTITY ').join(safe_entities) + _s2bytes('>\n]>')
data = doctype_pattern.sub(replacement, head) + data
diff --git a/module/lib/jinja2/filters.py b/module/lib/jinja2/filters.py
index d1848e434..97f06ed7d 100644
--- a/module/lib/jinja2/filters.py
+++ b/module/lib/jinja2/filters.py
@@ -101,7 +101,7 @@ def do_xmlattr(_eval_ctx, d, autospace=True):
.. sourcecode:: html+jinja
- <ul{{ {'class': 'my_list', 'missing': none,
+ <ul{{{'class': 'my_list', 'missing': none,
'id': 'list-%d'|format(variable)}|xmlattr }}>
...
</ul>
diff --git a/module/lib/jinja2/nodes.py b/module/lib/jinja2/nodes.py
index 6446c70ea..f6cdc0d89 100644
--- a/module/lib/jinja2/nodes.py
+++ b/module/lib/jinja2/nodes.py
@@ -507,7 +507,7 @@ class Keyword(Helper):
class CondExpr(Expr):
- """A conditional expression (inline if expression). (``{{
+ """A conditional expression (inline if expression). (``{{
foo if bar else baz }}``)
"""
fields = ('test', 'expr1', 'expr2')
diff --git a/module/lib/jinja2/utils.py b/module/lib/jinja2/utils.py
index 7b77b8eb7..186fc6c44 100644
--- a/module/lib/jinja2/utils.py
+++ b/module/lib/jinja2/utils.py
@@ -269,7 +269,7 @@ def urlize(text, trim_url_limit=None, nofollow=False):
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
- and (x[:limit] + (len(x) >=limit and '...'
+ and (x[:limit] + (len(x) >= limit and '...'
or '')) or x
words = _word_split_re.split(unicode(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
diff --git a/module/lib/simplejson/__init__.py b/module/lib/simplejson/__init__.py
index 03748041d..368d4202b 100644
--- a/module/lib/simplejson/__init__.py
+++ b/module/lib/simplejson/__init__.py
@@ -31,8 +31,8 @@ Encoding basic Python object hierarchies::
Compact encoding::
>>> import simplejson as json
- >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
- '[1,2,3,{"4":5,"6":7}]'
+ >>> json.dumps([1, 2, 3, {'4': 5, '6': 7}], separators=(',',':'))
+ '[1, 2, 3, {"4": 5, "6": 7}]'
Pretty printing::
diff --git a/module/lib/thrift/Thrift.py b/module/lib/thrift/Thrift.py
index 1d271fcff..8c270ea28 100644
--- a/module/lib/thrift/Thrift.py
+++ b/module/lib/thrift/Thrift.py
@@ -74,8 +74,8 @@ class TException(Exception):
"""Base class for all thrift exceptions."""
- # BaseException.message is deprecated in Python v[2.6,3.0)
- if (2,6,0) <= sys.version_info < (3,0):
+ # BaseException.message is deprecated in Python v[2.6, 3.0)
+ if (2, 6, 0) <= sys.version_info < (3, 0):
def _get_message(self):
return self._message
def _set_message(self, message):
diff --git a/module/lib/thrift/protocol/TBinaryProtocol.py b/module/lib/thrift/protocol/TBinaryProtocol.py
index 50c6aa896..0cc44178c 100644
--- a/module/lib/thrift/protocol/TBinaryProtocol.py
+++ b/module/lib/thrift/protocol/TBinaryProtocol.py
@@ -239,7 +239,7 @@ class TBinaryProtocolAccelerated(TBinaryProtocol):
our C module to do the encoding, bypassing this object entirely.
We inherit from TBinaryProtocol so that the normal TBinaryProtocol
encoding can happen if the fastbinary module doesn't work for some
- reason. (TODO(dreiss): Make this happen sanely in more cases.)
+ reason. (TODO(dreiss): Make this happen sanely in more cases.)
In order to take advantage of the C module, just use
TBinaryProtocolAccelerated instead of TBinaryProtocol.
diff --git a/module/lib/thrift/protocol/TProtocol.py b/module/lib/thrift/protocol/TProtocol.py
index e23774784..595ca5ae6 100644
--- a/module/lib/thrift/protocol/TProtocol.py
+++ b/module/lib/thrift/protocol/TProtocol.py
@@ -202,7 +202,7 @@ class TProtocolBase:
# tuple of: ( 'reader method' name, is_container boolean, 'writer_method' name )
_TTYPE_HANDLERS = (
- (None, None, False), # 0 == TType,STOP
+ (None, None, False), # 0 == TType, STOP
(None, None, False), # 1 == TType.VOID # TODO: handle void?
('readBool', 'writeBool', False), # 2 == TType.BOOL
('readByte', 'writeByte', False), # 3 == TType.BYTE and I08
@@ -298,7 +298,7 @@ class TProtocolBase:
v_val = val_reader()
else:
v_val = self.readFieldByTType(val_ttype, val_spec)
- # this raises a TypeError with unhashable keys types. i.e. d=dict(); d[[0,1]] = 2 fails
+ # this raises a TypeError with unhashable keys types. i.e. d=dict(); d[[0, 1]] = 2 fails
results[k_val] = v_val
self.readMapEnd()
return results
diff --git a/module/lib/thrift/transport/TTransport.py b/module/lib/thrift/transport/TTransport.py
index 12e51a9bf..2b2e99eac 100644
--- a/module/lib/thrift/transport/TTransport.py
+++ b/module/lib/thrift/transport/TTransport.py
@@ -18,7 +18,7 @@
#
from cStringIO import StringIO
-from struct import pack,unpack
+from struct import pack, unpack
from thrift.Thrift import TException
class TTransportException(TException):
@@ -127,7 +127,7 @@ class TBufferedTransportFactory:
return buffered
-class TBufferedTransport(TTransportBase,CReadableTransport):
+class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
diff --git a/module/lib/thrift/transport/TTwisted.py b/module/lib/thrift/transport/TTwisted.py
index b6dcb4e0b..141e299ca 100644
--- a/module/lib/thrift/transport/TTwisted.py
+++ b/module/lib/thrift/transport/TTwisted.py
@@ -79,7 +79,7 @@ class ThriftClientProtocol(basic.Int32StringReceiver):
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
- for k,v in self.client._reqs.iteritems():
+ for k, v in self.client._reqs.iteritems():
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed')
diff --git a/module/lib/thrift/transport/TZlibTransport.py b/module/lib/thrift/transport/TZlibTransport.py
index 4356e4933..89fa96f61 100644
--- a/module/lib/thrift/transport/TZlibTransport.py
+++ b/module/lib/thrift/transport/TZlibTransport.py
@@ -125,7 +125,7 @@ class TZlibTransport(TTransportBase, CReadableTransport):
def getCompRatio(self):
'''
- Get the current measured compression ratios (in,out) from
+ Get the current measured compression ratios (in, out) from
this transport.
Returns a tuple of: